code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def append_cluster(self, cluster, data=None, canvas=0, marker='.', markersize=None, color=None):
"""!
@brief Appends cluster to canvas for drawing.
@param[in] cluster (list): cluster that may consist of indexes of objects from the data or object itself.
@param[in] data (list): If defines that each element of cluster is considered as a index of object from the data.
@param[in] canvas (uint): Number of canvas that should be used for displaying cluster.
@param[in] marker (string): Marker that is used for displaying objects from cluster on the canvas.
@param[in] markersize (uint): Size of marker.
@param[in] color (string): Color of marker.
@return Returns index of cluster descriptor on the canvas.
"""
if len(cluster) == 0:
return
if canvas > self.__number_canvases or canvas < 0:
raise ValueError("Canvas index '%d' is out of range [0; %d]." % self.__number_canvases or canvas)
if color is None:
index_color = len(self.__canvas_clusters[canvas]) % len(color_list.TITLES)
color = color_list.TITLES[index_color]
added_canvas_descriptor = canvas_cluster_descr(cluster, data, marker, markersize, color)
self.__canvas_clusters[canvas].append( added_canvas_descriptor )
if data is None:
dimension = len(cluster[0])
if self.__canvas_dimensions[canvas] is None:
self.__canvas_dimensions[canvas] = dimension
elif self.__canvas_dimensions[canvas] != dimension:
raise ValueError("Only clusters with the same dimension of objects can be displayed on canvas.")
else:
dimension = len(data[0])
if self.__canvas_dimensions[canvas] is None:
self.__canvas_dimensions[canvas] = dimension
elif self.__canvas_dimensions[canvas] != dimension:
raise ValueError("Only clusters with the same dimension of objects can be displayed on canvas.")
if (dimension < 1) or (dimension > 3):
raise ValueError("Only objects with size dimension 1 (1D plot), 2 (2D plot) or 3 (3D plot) "
"can be displayed. For multi-dimensional data use 'cluster_visualizer_multidim'.")
if markersize is None:
if (dimension == 1) or (dimension == 2):
added_canvas_descriptor.markersize = self.__default_2d_marker_size
elif dimension == 3:
added_canvas_descriptor.markersize = self.__default_3d_marker_size
return len(self.__canvas_clusters[canvas]) - 1 | def function[append_cluster, parameter[self, cluster, data, canvas, marker, markersize, color]]:
constant[!
@brief Appends cluster to canvas for drawing.
@param[in] cluster (list): cluster that may consist of indexes of objects from the data or object itself.
@param[in] data (list): If defines that each element of cluster is considered as a index of object from the data.
@param[in] canvas (uint): Number of canvas that should be used for displaying cluster.
@param[in] marker (string): Marker that is used for displaying objects from cluster on the canvas.
@param[in] markersize (uint): Size of marker.
@param[in] color (string): Color of marker.
@return Returns index of cluster descriptor on the canvas.
]
if compare[call[name[len], parameter[name[cluster]]] equal[==] constant[0]] begin[:]
return[None]
if <ast.BoolOp object at 0x7da1b01d6230> begin[:]
<ast.Raise object at 0x7da1b01d6590>
if compare[name[color] is constant[None]] begin[:]
variable[index_color] assign[=] binary_operation[call[name[len], parameter[call[name[self].__canvas_clusters][name[canvas]]]] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[color_list].TITLES]]]
variable[color] assign[=] call[name[color_list].TITLES][name[index_color]]
variable[added_canvas_descriptor] assign[=] call[name[canvas_cluster_descr], parameter[name[cluster], name[data], name[marker], name[markersize], name[color]]]
call[call[name[self].__canvas_clusters][name[canvas]].append, parameter[name[added_canvas_descriptor]]]
if compare[name[data] is constant[None]] begin[:]
variable[dimension] assign[=] call[name[len], parameter[call[name[cluster]][constant[0]]]]
if compare[call[name[self].__canvas_dimensions][name[canvas]] is constant[None]] begin[:]
call[name[self].__canvas_dimensions][name[canvas]] assign[=] name[dimension]
if <ast.BoolOp object at 0x7da1b014c0d0> begin[:]
<ast.Raise object at 0x7da1b014ff70>
if compare[name[markersize] is constant[None]] begin[:]
if <ast.BoolOp object at 0x7da1b014fc70> begin[:]
name[added_canvas_descriptor].markersize assign[=] name[self].__default_2d_marker_size
return[binary_operation[call[name[len], parameter[call[name[self].__canvas_clusters][name[canvas]]]] - constant[1]]] | keyword[def] identifier[append_cluster] ( identifier[self] , identifier[cluster] , identifier[data] = keyword[None] , identifier[canvas] = literal[int] , identifier[marker] = literal[string] , identifier[markersize] = keyword[None] , identifier[color] = keyword[None] ):
literal[string]
keyword[if] identifier[len] ( identifier[cluster] )== literal[int] :
keyword[return]
keyword[if] identifier[canvas] > identifier[self] . identifier[__number_canvases] keyword[or] identifier[canvas] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[self] . identifier[__number_canvases] keyword[or] identifier[canvas] )
keyword[if] identifier[color] keyword[is] keyword[None] :
identifier[index_color] = identifier[len] ( identifier[self] . identifier[__canvas_clusters] [ identifier[canvas] ])% identifier[len] ( identifier[color_list] . identifier[TITLES] )
identifier[color] = identifier[color_list] . identifier[TITLES] [ identifier[index_color] ]
identifier[added_canvas_descriptor] = identifier[canvas_cluster_descr] ( identifier[cluster] , identifier[data] , identifier[marker] , identifier[markersize] , identifier[color] )
identifier[self] . identifier[__canvas_clusters] [ identifier[canvas] ]. identifier[append] ( identifier[added_canvas_descriptor] )
keyword[if] identifier[data] keyword[is] keyword[None] :
identifier[dimension] = identifier[len] ( identifier[cluster] [ literal[int] ])
keyword[if] identifier[self] . identifier[__canvas_dimensions] [ identifier[canvas] ] keyword[is] keyword[None] :
identifier[self] . identifier[__canvas_dimensions] [ identifier[canvas] ]= identifier[dimension]
keyword[elif] identifier[self] . identifier[__canvas_dimensions] [ identifier[canvas] ]!= identifier[dimension] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] :
identifier[dimension] = identifier[len] ( identifier[data] [ literal[int] ])
keyword[if] identifier[self] . identifier[__canvas_dimensions] [ identifier[canvas] ] keyword[is] keyword[None] :
identifier[self] . identifier[__canvas_dimensions] [ identifier[canvas] ]= identifier[dimension]
keyword[elif] identifier[self] . identifier[__canvas_dimensions] [ identifier[canvas] ]!= identifier[dimension] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] ( identifier[dimension] < literal[int] ) keyword[or] ( identifier[dimension] > literal[int] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
keyword[if] identifier[markersize] keyword[is] keyword[None] :
keyword[if] ( identifier[dimension] == literal[int] ) keyword[or] ( identifier[dimension] == literal[int] ):
identifier[added_canvas_descriptor] . identifier[markersize] = identifier[self] . identifier[__default_2d_marker_size]
keyword[elif] identifier[dimension] == literal[int] :
identifier[added_canvas_descriptor] . identifier[markersize] = identifier[self] . identifier[__default_3d_marker_size]
keyword[return] identifier[len] ( identifier[self] . identifier[__canvas_clusters] [ identifier[canvas] ])- literal[int] | def append_cluster(self, cluster, data=None, canvas=0, marker='.', markersize=None, color=None):
"""!
@brief Appends cluster to canvas for drawing.
@param[in] cluster (list): cluster that may consist of indexes of objects from the data or object itself.
@param[in] data (list): If defines that each element of cluster is considered as a index of object from the data.
@param[in] canvas (uint): Number of canvas that should be used for displaying cluster.
@param[in] marker (string): Marker that is used for displaying objects from cluster on the canvas.
@param[in] markersize (uint): Size of marker.
@param[in] color (string): Color of marker.
@return Returns index of cluster descriptor on the canvas.
"""
if len(cluster) == 0:
return # depends on [control=['if'], data=[]]
if canvas > self.__number_canvases or canvas < 0:
raise ValueError("Canvas index '%d' is out of range [0; %d]." % self.__number_canvases or canvas) # depends on [control=['if'], data=[]]
if color is None:
index_color = len(self.__canvas_clusters[canvas]) % len(color_list.TITLES)
color = color_list.TITLES[index_color] # depends on [control=['if'], data=['color']]
added_canvas_descriptor = canvas_cluster_descr(cluster, data, marker, markersize, color)
self.__canvas_clusters[canvas].append(added_canvas_descriptor)
if data is None:
dimension = len(cluster[0])
if self.__canvas_dimensions[canvas] is None:
self.__canvas_dimensions[canvas] = dimension # depends on [control=['if'], data=[]]
elif self.__canvas_dimensions[canvas] != dimension:
raise ValueError('Only clusters with the same dimension of objects can be displayed on canvas.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
dimension = len(data[0])
if self.__canvas_dimensions[canvas] is None:
self.__canvas_dimensions[canvas] = dimension # depends on [control=['if'], data=[]]
elif self.__canvas_dimensions[canvas] != dimension:
raise ValueError('Only clusters with the same dimension of objects can be displayed on canvas.') # depends on [control=['if'], data=[]]
if dimension < 1 or dimension > 3:
raise ValueError("Only objects with size dimension 1 (1D plot), 2 (2D plot) or 3 (3D plot) can be displayed. For multi-dimensional data use 'cluster_visualizer_multidim'.") # depends on [control=['if'], data=[]]
if markersize is None:
if dimension == 1 or dimension == 2:
added_canvas_descriptor.markersize = self.__default_2d_marker_size # depends on [control=['if'], data=[]]
elif dimension == 3:
added_canvas_descriptor.markersize = self.__default_3d_marker_size # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return len(self.__canvas_clusters[canvas]) - 1 |
def plot_confusion_matrix(y_true, y_pred, labels=None, true_labels=None,
pred_labels=None, title=None, normalize=False,
hide_zeros=False, x_tick_rotation=0, ax=None,
figsize=None, cmap='Blues', title_fontsize="large",
text_fontsize="medium"):
"""Generates confusion matrix plot from predictions and true labels
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_pred (array-like, shape (n_samples)):
Estimated targets as returned by a classifier.
labels (array-like, shape (n_classes), optional): List of labels to
index the matrix. This may be used to reorder or select a subset
of labels. If none is given, those that appear at least once in
``y_true`` or ``y_pred`` are used in sorted order. (new in v0.2.5)
true_labels (array-like, optional): The true labels to display.
If none is given, then all of the labels are used.
pred_labels (array-like, optional): The predicted labels to display.
If none is given, then all of the labels are used.
title (string, optional): Title of the generated plot. Defaults to
"Confusion Matrix" if `normalize` is True. Else, defaults to
"Normalized Confusion Matrix.
normalize (bool, optional): If True, normalizes the confusion matrix
before plotting. Defaults to False.
hide_zeros (bool, optional): If True, does not plot cells containing a
value of zero. Defaults to False.
x_tick_rotation (int, optional): Rotates x-axis tick labels by the
specified angle. This is useful in cases where there are numerous
categories and the labels overlap each other.
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot.plotters as skplt
>>> rf = RandomForestClassifier()
>>> rf = rf.fit(X_train, y_train)
>>> y_pred = rf.predict(X_test)
>>> skplt.plot_confusion_matrix(y_test, y_pred, normalize=True)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_confusion_matrix.png
:align: center
:alt: Confusion matrix
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
cm = confusion_matrix(y_true, y_pred, labels=labels)
if labels is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(labels)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm = np.around(cm, decimals=2)
cm[np.isnan(cm)] = 0.0
if true_labels is None:
true_classes = classes
else:
validate_labels(classes, true_labels, "true_labels")
true_label_indexes = np.in1d(classes, true_labels)
true_classes = classes[true_label_indexes]
cm = cm[true_label_indexes]
if pred_labels is None:
pred_classes = classes
else:
validate_labels(classes, pred_labels, "pred_labels")
pred_label_indexes = np.in1d(classes, pred_labels)
pred_classes = classes[pred_label_indexes]
cm = cm[:, pred_label_indexes]
if title:
ax.set_title(title, fontsize=title_fontsize)
elif normalize:
ax.set_title('Normalized Confusion Matrix', fontsize=title_fontsize)
else:
ax.set_title('Confusion Matrix', fontsize=title_fontsize)
image = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.get_cmap(cmap))
plt.colorbar(mappable=image)
x_tick_marks = np.arange(len(pred_classes))
y_tick_marks = np.arange(len(true_classes))
ax.set_xticks(x_tick_marks)
ax.set_xticklabels(pred_classes, fontsize=text_fontsize,
rotation=x_tick_rotation)
ax.set_yticks(y_tick_marks)
ax.set_yticklabels(true_classes, fontsize=text_fontsize)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if not (hide_zeros and cm[i, j] == 0):
ax.text(j, i, cm[i, j],
horizontalalignment="center",
verticalalignment="center",
fontsize=text_fontsize,
color="white" if cm[i, j] > thresh else "black")
ax.set_ylabel('True label', fontsize=text_fontsize)
ax.set_xlabel('Predicted label', fontsize=text_fontsize)
ax.grid('off')
return ax | def function[plot_confusion_matrix, parameter[y_true, y_pred, labels, true_labels, pred_labels, title, normalize, hide_zeros, x_tick_rotation, ax, figsize, cmap, title_fontsize, text_fontsize]]:
constant[Generates confusion matrix plot from predictions and true labels
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_pred (array-like, shape (n_samples)):
Estimated targets as returned by a classifier.
labels (array-like, shape (n_classes), optional): List of labels to
index the matrix. This may be used to reorder or select a subset
of labels. If none is given, those that appear at least once in
``y_true`` or ``y_pred`` are used in sorted order. (new in v0.2.5)
true_labels (array-like, optional): The true labels to display.
If none is given, then all of the labels are used.
pred_labels (array-like, optional): The predicted labels to display.
If none is given, then all of the labels are used.
title (string, optional): Title of the generated plot. Defaults to
"Confusion Matrix" if `normalize` is True. Else, defaults to
"Normalized Confusion Matrix.
normalize (bool, optional): If True, normalizes the confusion matrix
before plotting. Defaults to False.
hide_zeros (bool, optional): If True, does not plot cells containing a
value of zero. Defaults to False.
x_tick_rotation (int, optional): Rotates x-axis tick labels by the
specified angle. This is useful in cases where there are numerous
categories and the labels overlap each other.
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot.plotters as skplt
>>> rf = RandomForestClassifier()
>>> rf = rf.fit(X_train, y_train)
>>> y_pred = rf.predict(X_test)
>>> skplt.plot_confusion_matrix(y_test, y_pred, normalize=True)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_confusion_matrix.png
:align: center
:alt: Confusion matrix
]
if compare[name[ax] is constant[None]] begin[:]
<ast.Tuple object at 0x7da1b26af8b0> assign[=] call[name[plt].subplots, parameter[constant[1], constant[1]]]
variable[cm] assign[=] call[name[confusion_matrix], parameter[name[y_true], name[y_pred]]]
if compare[name[labels] is constant[None]] begin[:]
variable[classes] assign[=] call[name[unique_labels], parameter[name[y_true], name[y_pred]]]
if name[normalize] begin[:]
variable[cm] assign[=] binary_operation[call[name[cm].astype, parameter[constant[float]]] / call[call[name[cm].sum, parameter[]]][tuple[[<ast.Slice object at 0x7da1b26ad8a0>, <ast.Attribute object at 0x7da1b26aeaa0>]]]]
variable[cm] assign[=] call[name[np].around, parameter[name[cm]]]
call[name[cm]][call[name[np].isnan, parameter[name[cm]]]] assign[=] constant[0.0]
if compare[name[true_labels] is constant[None]] begin[:]
variable[true_classes] assign[=] name[classes]
if compare[name[pred_labels] is constant[None]] begin[:]
variable[pred_classes] assign[=] name[classes]
if name[title] begin[:]
call[name[ax].set_title, parameter[name[title]]]
variable[image] assign[=] call[name[ax].imshow, parameter[name[cm]]]
call[name[plt].colorbar, parameter[]]
variable[x_tick_marks] assign[=] call[name[np].arange, parameter[call[name[len], parameter[name[pred_classes]]]]]
variable[y_tick_marks] assign[=] call[name[np].arange, parameter[call[name[len], parameter[name[true_classes]]]]]
call[name[ax].set_xticks, parameter[name[x_tick_marks]]]
call[name[ax].set_xticklabels, parameter[name[pred_classes]]]
call[name[ax].set_yticks, parameter[name[y_tick_marks]]]
call[name[ax].set_yticklabels, parameter[name[true_classes]]]
variable[thresh] assign[=] binary_operation[call[name[cm].max, parameter[]] / constant[2.0]]
for taget[tuple[[<ast.Name object at 0x7da1b1645720>, <ast.Name object at 0x7da1b1645750>]]] in starred[call[name[itertools].product, parameter[call[name[range], parameter[call[name[cm].shape][constant[0]]]], call[name[range], parameter[call[name[cm].shape][constant[1]]]]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b1647100> begin[:]
call[name[ax].text, parameter[name[j], name[i], call[name[cm]][tuple[[<ast.Name object at 0x7da1b1644a30>, <ast.Name object at 0x7da1b16449a0>]]]]]
call[name[ax].set_ylabel, parameter[constant[True label]]]
call[name[ax].set_xlabel, parameter[constant[Predicted label]]]
call[name[ax].grid, parameter[constant[off]]]
return[name[ax]] | keyword[def] identifier[plot_confusion_matrix] ( identifier[y_true] , identifier[y_pred] , identifier[labels] = keyword[None] , identifier[true_labels] = keyword[None] ,
identifier[pred_labels] = keyword[None] , identifier[title] = keyword[None] , identifier[normalize] = keyword[False] ,
identifier[hide_zeros] = keyword[False] , identifier[x_tick_rotation] = literal[int] , identifier[ax] = keyword[None] ,
identifier[figsize] = keyword[None] , identifier[cmap] = literal[string] , identifier[title_fontsize] = literal[string] ,
identifier[text_fontsize] = literal[string] ):
literal[string]
keyword[if] identifier[ax] keyword[is] keyword[None] :
identifier[fig] , identifier[ax] = identifier[plt] . identifier[subplots] ( literal[int] , literal[int] , identifier[figsize] = identifier[figsize] )
identifier[cm] = identifier[confusion_matrix] ( identifier[y_true] , identifier[y_pred] , identifier[labels] = identifier[labels] )
keyword[if] identifier[labels] keyword[is] keyword[None] :
identifier[classes] = identifier[unique_labels] ( identifier[y_true] , identifier[y_pred] )
keyword[else] :
identifier[classes] = identifier[np] . identifier[asarray] ( identifier[labels] )
keyword[if] identifier[normalize] :
identifier[cm] = identifier[cm] . identifier[astype] ( literal[string] )/ identifier[cm] . identifier[sum] ( identifier[axis] = literal[int] )[:, identifier[np] . identifier[newaxis] ]
identifier[cm] = identifier[np] . identifier[around] ( identifier[cm] , identifier[decimals] = literal[int] )
identifier[cm] [ identifier[np] . identifier[isnan] ( identifier[cm] )]= literal[int]
keyword[if] identifier[true_labels] keyword[is] keyword[None] :
identifier[true_classes] = identifier[classes]
keyword[else] :
identifier[validate_labels] ( identifier[classes] , identifier[true_labels] , literal[string] )
identifier[true_label_indexes] = identifier[np] . identifier[in1d] ( identifier[classes] , identifier[true_labels] )
identifier[true_classes] = identifier[classes] [ identifier[true_label_indexes] ]
identifier[cm] = identifier[cm] [ identifier[true_label_indexes] ]
keyword[if] identifier[pred_labels] keyword[is] keyword[None] :
identifier[pred_classes] = identifier[classes]
keyword[else] :
identifier[validate_labels] ( identifier[classes] , identifier[pred_labels] , literal[string] )
identifier[pred_label_indexes] = identifier[np] . identifier[in1d] ( identifier[classes] , identifier[pred_labels] )
identifier[pred_classes] = identifier[classes] [ identifier[pred_label_indexes] ]
identifier[cm] = identifier[cm] [:, identifier[pred_label_indexes] ]
keyword[if] identifier[title] :
identifier[ax] . identifier[set_title] ( identifier[title] , identifier[fontsize] = identifier[title_fontsize] )
keyword[elif] identifier[normalize] :
identifier[ax] . identifier[set_title] ( literal[string] , identifier[fontsize] = identifier[title_fontsize] )
keyword[else] :
identifier[ax] . identifier[set_title] ( literal[string] , identifier[fontsize] = identifier[title_fontsize] )
identifier[image] = identifier[ax] . identifier[imshow] ( identifier[cm] , identifier[interpolation] = literal[string] , identifier[cmap] = identifier[plt] . identifier[cm] . identifier[get_cmap] ( identifier[cmap] ))
identifier[plt] . identifier[colorbar] ( identifier[mappable] = identifier[image] )
identifier[x_tick_marks] = identifier[np] . identifier[arange] ( identifier[len] ( identifier[pred_classes] ))
identifier[y_tick_marks] = identifier[np] . identifier[arange] ( identifier[len] ( identifier[true_classes] ))
identifier[ax] . identifier[set_xticks] ( identifier[x_tick_marks] )
identifier[ax] . identifier[set_xticklabels] ( identifier[pred_classes] , identifier[fontsize] = identifier[text_fontsize] ,
identifier[rotation] = identifier[x_tick_rotation] )
identifier[ax] . identifier[set_yticks] ( identifier[y_tick_marks] )
identifier[ax] . identifier[set_yticklabels] ( identifier[true_classes] , identifier[fontsize] = identifier[text_fontsize] )
identifier[thresh] = identifier[cm] . identifier[max] ()/ literal[int]
keyword[for] identifier[i] , identifier[j] keyword[in] identifier[itertools] . identifier[product] ( identifier[range] ( identifier[cm] . identifier[shape] [ literal[int] ]), identifier[range] ( identifier[cm] . identifier[shape] [ literal[int] ])):
keyword[if] keyword[not] ( identifier[hide_zeros] keyword[and] identifier[cm] [ identifier[i] , identifier[j] ]== literal[int] ):
identifier[ax] . identifier[text] ( identifier[j] , identifier[i] , identifier[cm] [ identifier[i] , identifier[j] ],
identifier[horizontalalignment] = literal[string] ,
identifier[verticalalignment] = literal[string] ,
identifier[fontsize] = identifier[text_fontsize] ,
identifier[color] = literal[string] keyword[if] identifier[cm] [ identifier[i] , identifier[j] ]> identifier[thresh] keyword[else] literal[string] )
identifier[ax] . identifier[set_ylabel] ( literal[string] , identifier[fontsize] = identifier[text_fontsize] )
identifier[ax] . identifier[set_xlabel] ( literal[string] , identifier[fontsize] = identifier[text_fontsize] )
identifier[ax] . identifier[grid] ( literal[string] )
keyword[return] identifier[ax] | def plot_confusion_matrix(y_true, y_pred, labels=None, true_labels=None, pred_labels=None, title=None, normalize=False, hide_zeros=False, x_tick_rotation=0, ax=None, figsize=None, cmap='Blues', title_fontsize='large', text_fontsize='medium'):
"""Generates confusion matrix plot from predictions and true labels
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_pred (array-like, shape (n_samples)):
Estimated targets as returned by a classifier.
labels (array-like, shape (n_classes), optional): List of labels to
index the matrix. This may be used to reorder or select a subset
of labels. If none is given, those that appear at least once in
``y_true`` or ``y_pred`` are used in sorted order. (new in v0.2.5)
true_labels (array-like, optional): The true labels to display.
If none is given, then all of the labels are used.
pred_labels (array-like, optional): The predicted labels to display.
If none is given, then all of the labels are used.
title (string, optional): Title of the generated plot. Defaults to
"Confusion Matrix" if `normalize` is True. Else, defaults to
"Normalized Confusion Matrix.
normalize (bool, optional): If True, normalizes the confusion matrix
before plotting. Defaults to False.
hide_zeros (bool, optional): If True, does not plot cells containing a
value of zero. Defaults to False.
x_tick_rotation (int, optional): Rotates x-axis tick labels by the
specified angle. This is useful in cases where there are numerous
categories and the labels overlap each other.
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot.plotters as skplt
>>> rf = RandomForestClassifier()
>>> rf = rf.fit(X_train, y_train)
>>> y_pred = rf.predict(X_test)
>>> skplt.plot_confusion_matrix(y_test, y_pred, normalize=True)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_confusion_matrix.png
:align: center
:alt: Confusion matrix
"""
if ax is None:
(fig, ax) = plt.subplots(1, 1, figsize=figsize) # depends on [control=['if'], data=['ax']]
cm = confusion_matrix(y_true, y_pred, labels=labels)
if labels is None:
classes = unique_labels(y_true, y_pred) # depends on [control=['if'], data=[]]
else:
classes = np.asarray(labels)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm = np.around(cm, decimals=2)
cm[np.isnan(cm)] = 0.0 # depends on [control=['if'], data=[]]
if true_labels is None:
true_classes = classes # depends on [control=['if'], data=[]]
else:
validate_labels(classes, true_labels, 'true_labels')
true_label_indexes = np.in1d(classes, true_labels)
true_classes = classes[true_label_indexes]
cm = cm[true_label_indexes]
if pred_labels is None:
pred_classes = classes # depends on [control=['if'], data=[]]
else:
validate_labels(classes, pred_labels, 'pred_labels')
pred_label_indexes = np.in1d(classes, pred_labels)
pred_classes = classes[pred_label_indexes]
cm = cm[:, pred_label_indexes]
if title:
ax.set_title(title, fontsize=title_fontsize) # depends on [control=['if'], data=[]]
elif normalize:
ax.set_title('Normalized Confusion Matrix', fontsize=title_fontsize) # depends on [control=['if'], data=[]]
else:
ax.set_title('Confusion Matrix', fontsize=title_fontsize)
image = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.get_cmap(cmap))
plt.colorbar(mappable=image)
x_tick_marks = np.arange(len(pred_classes))
y_tick_marks = np.arange(len(true_classes))
ax.set_xticks(x_tick_marks)
ax.set_xticklabels(pred_classes, fontsize=text_fontsize, rotation=x_tick_rotation)
ax.set_yticks(y_tick_marks)
ax.set_yticklabels(true_classes, fontsize=text_fontsize)
thresh = cm.max() / 2.0
for (i, j) in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if not (hide_zeros and cm[i, j] == 0):
ax.text(j, i, cm[i, j], horizontalalignment='center', verticalalignment='center', fontsize=text_fontsize, color='white' if cm[i, j] > thresh else 'black') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
ax.set_ylabel('True label', fontsize=text_fontsize)
ax.set_xlabel('Predicted label', fontsize=text_fontsize)
ax.grid('off')
return ax |
def directed_account(transition, direction, mechanisms=False, purviews=False,
allow_neg=False):
"""Return the set of all |CausalLinks| of the specified direction."""
if mechanisms is False:
mechanisms = utils.powerset(transition.mechanism_indices(direction),
nonempty=True)
links = [
transition.find_causal_link(direction, mechanism, purviews=purviews,
allow_neg=allow_neg)
for mechanism in mechanisms]
# Filter out causal links with zero alpha
return DirectedAccount(filter(None, links)) | def function[directed_account, parameter[transition, direction, mechanisms, purviews, allow_neg]]:
constant[Return the set of all |CausalLinks| of the specified direction.]
if compare[name[mechanisms] is constant[False]] begin[:]
variable[mechanisms] assign[=] call[name[utils].powerset, parameter[call[name[transition].mechanism_indices, parameter[name[direction]]]]]
variable[links] assign[=] <ast.ListComp object at 0x7da18ede6950>
return[call[name[DirectedAccount], parameter[call[name[filter], parameter[constant[None], name[links]]]]]] | keyword[def] identifier[directed_account] ( identifier[transition] , identifier[direction] , identifier[mechanisms] = keyword[False] , identifier[purviews] = keyword[False] ,
identifier[allow_neg] = keyword[False] ):
literal[string]
keyword[if] identifier[mechanisms] keyword[is] keyword[False] :
identifier[mechanisms] = identifier[utils] . identifier[powerset] ( identifier[transition] . identifier[mechanism_indices] ( identifier[direction] ),
identifier[nonempty] = keyword[True] )
identifier[links] =[
identifier[transition] . identifier[find_causal_link] ( identifier[direction] , identifier[mechanism] , identifier[purviews] = identifier[purviews] ,
identifier[allow_neg] = identifier[allow_neg] )
keyword[for] identifier[mechanism] keyword[in] identifier[mechanisms] ]
keyword[return] identifier[DirectedAccount] ( identifier[filter] ( keyword[None] , identifier[links] )) | def directed_account(transition, direction, mechanisms=False, purviews=False, allow_neg=False):
"""Return the set of all |CausalLinks| of the specified direction."""
if mechanisms is False:
mechanisms = utils.powerset(transition.mechanism_indices(direction), nonempty=True) # depends on [control=['if'], data=['mechanisms']]
links = [transition.find_causal_link(direction, mechanism, purviews=purviews, allow_neg=allow_neg) for mechanism in mechanisms]
# Filter out causal links with zero alpha
return DirectedAccount(filter(None, links)) |
def to_array(self):
"""Returns a 1-dimensional |numpy| |numpy.ndarray| with thirteen
entries first defining the start date, secondly defining the end
date and thirdly the step size in seconds.
"""
values = numpy.empty(13, dtype=float)
values[:6] = self.firstdate.to_array()
values[6:12] = self.lastdate.to_array()
values[12] = self.stepsize.seconds
return values | def function[to_array, parameter[self]]:
constant[Returns a 1-dimensional |numpy| |numpy.ndarray| with thirteen
entries first defining the start date, secondly defining the end
date and thirdly the step size in seconds.
]
variable[values] assign[=] call[name[numpy].empty, parameter[constant[13]]]
call[name[values]][<ast.Slice object at 0x7da20e960760>] assign[=] call[name[self].firstdate.to_array, parameter[]]
call[name[values]][<ast.Slice object at 0x7da20e961e40>] assign[=] call[name[self].lastdate.to_array, parameter[]]
call[name[values]][constant[12]] assign[=] name[self].stepsize.seconds
return[name[values]] | keyword[def] identifier[to_array] ( identifier[self] ):
literal[string]
identifier[values] = identifier[numpy] . identifier[empty] ( literal[int] , identifier[dtype] = identifier[float] )
identifier[values] [: literal[int] ]= identifier[self] . identifier[firstdate] . identifier[to_array] ()
identifier[values] [ literal[int] : literal[int] ]= identifier[self] . identifier[lastdate] . identifier[to_array] ()
identifier[values] [ literal[int] ]= identifier[self] . identifier[stepsize] . identifier[seconds]
keyword[return] identifier[values] | def to_array(self):
"""Returns a 1-dimensional |numpy| |numpy.ndarray| with thirteen
entries first defining the start date, secondly defining the end
date and thirdly the step size in seconds.
"""
values = numpy.empty(13, dtype=float)
values[:6] = self.firstdate.to_array()
values[6:12] = self.lastdate.to_array()
values[12] = self.stepsize.seconds
return values |
def update(self, campaign_id, budget, nick=None):
'''xxxxx.xxxxx.campaign.budget.update
===================================
更新一个推广计划的日限额'''
request = TOPRequest('xxxxx.xxxxx.campaign.budget.update')
request['campaign_id'] = campaign_id
request['budget'] = budget
if nick!=None: request['nick'] = nick
self.create(self.execute(request), fields=['success','result','success','result_code','result_message'], models={'result':CampaignBudget})
return self.result | def function[update, parameter[self, campaign_id, budget, nick]]:
constant[xxxxx.xxxxx.campaign.budget.update
===================================
更新一个推广计划的日限额]
variable[request] assign[=] call[name[TOPRequest], parameter[constant[xxxxx.xxxxx.campaign.budget.update]]]
call[name[request]][constant[campaign_id]] assign[=] name[campaign_id]
call[name[request]][constant[budget]] assign[=] name[budget]
if compare[name[nick] not_equal[!=] constant[None]] begin[:]
call[name[request]][constant[nick]] assign[=] name[nick]
call[name[self].create, parameter[call[name[self].execute, parameter[name[request]]]]]
return[name[self].result] | keyword[def] identifier[update] ( identifier[self] , identifier[campaign_id] , identifier[budget] , identifier[nick] = keyword[None] ):
literal[string]
identifier[request] = identifier[TOPRequest] ( literal[string] )
identifier[request] [ literal[string] ]= identifier[campaign_id]
identifier[request] [ literal[string] ]= identifier[budget]
keyword[if] identifier[nick] != keyword[None] : identifier[request] [ literal[string] ]= identifier[nick]
identifier[self] . identifier[create] ( identifier[self] . identifier[execute] ( identifier[request] ), identifier[fields] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ], identifier[models] ={ literal[string] : identifier[CampaignBudget] })
keyword[return] identifier[self] . identifier[result] | def update(self, campaign_id, budget, nick=None):
"""xxxxx.xxxxx.campaign.budget.update
===================================
更新一个推广计划的日限额"""
request = TOPRequest('xxxxx.xxxxx.campaign.budget.update')
request['campaign_id'] = campaign_id
request['budget'] = budget
if nick != None:
request['nick'] = nick # depends on [control=['if'], data=['nick']]
self.create(self.execute(request), fields=['success', 'result', 'success', 'result_code', 'result_message'], models={'result': CampaignBudget})
return self.result |
def authorized(validator):
"""Decorate a RequestHandler or method to require that a request is authorized
If decorating a coroutine make sure coroutine decorator is first.
eg.::
class Handler(tornado.web.RequestHandler):
@authorized(validator)
@coroutine
def get(self):
pass
:param validator: a coroutine that will authorize the user associated with the token and return True/False
"""
def _authorized_decorator(method):
@gen.coroutine
def wrapper(self, *args, **kwargs):
token = _get_token(self.request)
authorized = yield validator(token, **kwargs)
if not authorized:
message = 'Token is not authorised for this action: {}'.format(token)
logging.warning(message)
raise HTTPError(403, message)
result = method(self, *args, **kwargs)
if isinstance(result, Future):
result = yield result
raise gen.Return(result)
return wrapper
return _authorized_decorator | def function[authorized, parameter[validator]]:
constant[Decorate a RequestHandler or method to require that a request is authorized
If decorating a coroutine make sure coroutine decorator is first.
eg.::
class Handler(tornado.web.RequestHandler):
@authorized(validator)
@coroutine
def get(self):
pass
:param validator: a coroutine that will authorize the user associated with the token and return True/False
]
def function[_authorized_decorator, parameter[method]]:
def function[wrapper, parameter[self]]:
variable[token] assign[=] call[name[_get_token], parameter[name[self].request]]
variable[authorized] assign[=] <ast.Yield object at 0x7da1b09ec2b0>
if <ast.UnaryOp object at 0x7da1b09ee590> begin[:]
variable[message] assign[=] call[constant[Token is not authorised for this action: {}].format, parameter[name[token]]]
call[name[logging].warning, parameter[name[message]]]
<ast.Raise object at 0x7da1b09ede70>
variable[result] assign[=] call[name[method], parameter[name[self], <ast.Starred object at 0x7da1b09eee00>]]
if call[name[isinstance], parameter[name[result], name[Future]]] begin[:]
variable[result] assign[=] <ast.Yield object at 0x7da1b09ec490>
<ast.Raise object at 0x7da1b09d1b10>
return[name[wrapper]]
return[name[_authorized_decorator]] | keyword[def] identifier[authorized] ( identifier[validator] ):
literal[string]
keyword[def] identifier[_authorized_decorator] ( identifier[method] ):
@ identifier[gen] . identifier[coroutine]
keyword[def] identifier[wrapper] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
identifier[token] = identifier[_get_token] ( identifier[self] . identifier[request] )
identifier[authorized] = keyword[yield] identifier[validator] ( identifier[token] ,** identifier[kwargs] )
keyword[if] keyword[not] identifier[authorized] :
identifier[message] = literal[string] . identifier[format] ( identifier[token] )
identifier[logging] . identifier[warning] ( identifier[message] )
keyword[raise] identifier[HTTPError] ( literal[int] , identifier[message] )
identifier[result] = identifier[method] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[isinstance] ( identifier[result] , identifier[Future] ):
identifier[result] = keyword[yield] identifier[result]
keyword[raise] identifier[gen] . identifier[Return] ( identifier[result] )
keyword[return] identifier[wrapper]
keyword[return] identifier[_authorized_decorator] | def authorized(validator):
"""Decorate a RequestHandler or method to require that a request is authorized
If decorating a coroutine make sure coroutine decorator is first.
eg.::
class Handler(tornado.web.RequestHandler):
@authorized(validator)
@coroutine
def get(self):
pass
:param validator: a coroutine that will authorize the user associated with the token and return True/False
"""
def _authorized_decorator(method):
@gen.coroutine
def wrapper(self, *args, **kwargs):
token = _get_token(self.request)
authorized = (yield validator(token, **kwargs))
if not authorized:
message = 'Token is not authorised for this action: {}'.format(token)
logging.warning(message)
raise HTTPError(403, message) # depends on [control=['if'], data=[]]
result = method(self, *args, **kwargs)
if isinstance(result, Future):
result = (yield result) # depends on [control=['if'], data=[]]
raise gen.Return(result)
return wrapper
return _authorized_decorator |
def prepend_global_admin_user(other_users, server):
"""
When making lists of administrative users -- e.g., seeding a new server --
it's useful to put the credentials supplied on the command line at the head
of the queue.
"""
cred0 = get_global_login_user(server, "admin")
if cred0 and cred0["username"] and cred0["password"]:
log_verbose("Seeding : CRED0 to the front of the line!")
return [cred0] + other_users if other_users else [cred0]
else:
return other_users | def function[prepend_global_admin_user, parameter[other_users, server]]:
constant[
When making lists of administrative users -- e.g., seeding a new server --
it's useful to put the credentials supplied on the command line at the head
of the queue.
]
variable[cred0] assign[=] call[name[get_global_login_user], parameter[name[server], constant[admin]]]
if <ast.BoolOp object at 0x7da1b28440d0> begin[:]
call[name[log_verbose], parameter[constant[Seeding : CRED0 to the front of the line!]]]
return[<ast.IfExp object at 0x7da1b2846920>] | keyword[def] identifier[prepend_global_admin_user] ( identifier[other_users] , identifier[server] ):
literal[string]
identifier[cred0] = identifier[get_global_login_user] ( identifier[server] , literal[string] )
keyword[if] identifier[cred0] keyword[and] identifier[cred0] [ literal[string] ] keyword[and] identifier[cred0] [ literal[string] ]:
identifier[log_verbose] ( literal[string] )
keyword[return] [ identifier[cred0] ]+ identifier[other_users] keyword[if] identifier[other_users] keyword[else] [ identifier[cred0] ]
keyword[else] :
keyword[return] identifier[other_users] | def prepend_global_admin_user(other_users, server):
"""
When making lists of administrative users -- e.g., seeding a new server --
it's useful to put the credentials supplied on the command line at the head
of the queue.
"""
cred0 = get_global_login_user(server, 'admin')
if cred0 and cred0['username'] and cred0['password']:
log_verbose('Seeding : CRED0 to the front of the line!')
return [cred0] + other_users if other_users else [cred0] # depends on [control=['if'], data=[]]
else:
return other_users |
def configure_db(app):
"""
0.10 is the first version of ARA that ships with a stable database schema.
We can identify a database that originates from before this by checking if
there is an alembic revision available.
If there is no alembic revision available, assume we are running the first
revision which contains the latest state of the database prior to this.
"""
models.db.init_app(app)
log = logging.getLogger('ara.webapp.configure_db')
log.debug('Setting up database...')
if app.config.get('ARA_AUTOCREATE_DATABASE'):
with app.app_context():
migrations = app.config['DB_MIGRATIONS']
flask_migrate.Migrate(app, models.db, directory=migrations)
config = app.extensions['migrate'].migrate.get_config(migrations)
# Verify if the database tables have been created at all
inspector = Inspector.from_engine(models.db.engine)
if len(inspector.get_table_names()) == 0:
log.info('Initializing new DB from scratch')
flask_migrate.upgrade(directory=migrations)
# Get current alembic head revision
script = ScriptDirectory.from_config(config)
head = script.get_current_head()
# Get current revision, if available
connection = models.db.engine.connect()
context = MigrationContext.configure(connection)
current = context.get_current_revision()
if not current:
log.info('Unstable DB schema, stamping original revision')
flask_migrate.stamp(directory=migrations,
revision='da9459a1f71c')
if head != current:
log.info('DB schema out of date, upgrading')
flask_migrate.upgrade(directory=migrations) | def function[configure_db, parameter[app]]:
constant[
0.10 is the first version of ARA that ships with a stable database schema.
We can identify a database that originates from before this by checking if
there is an alembic revision available.
If there is no alembic revision available, assume we are running the first
revision which contains the latest state of the database prior to this.
]
call[name[models].db.init_app, parameter[name[app]]]
variable[log] assign[=] call[name[logging].getLogger, parameter[constant[ara.webapp.configure_db]]]
call[name[log].debug, parameter[constant[Setting up database...]]]
if call[name[app].config.get, parameter[constant[ARA_AUTOCREATE_DATABASE]]] begin[:]
with call[name[app].app_context, parameter[]] begin[:]
variable[migrations] assign[=] call[name[app].config][constant[DB_MIGRATIONS]]
call[name[flask_migrate].Migrate, parameter[name[app], name[models].db]]
variable[config] assign[=] call[call[name[app].extensions][constant[migrate]].migrate.get_config, parameter[name[migrations]]]
variable[inspector] assign[=] call[name[Inspector].from_engine, parameter[name[models].db.engine]]
if compare[call[name[len], parameter[call[name[inspector].get_table_names, parameter[]]]] equal[==] constant[0]] begin[:]
call[name[log].info, parameter[constant[Initializing new DB from scratch]]]
call[name[flask_migrate].upgrade, parameter[]]
variable[script] assign[=] call[name[ScriptDirectory].from_config, parameter[name[config]]]
variable[head] assign[=] call[name[script].get_current_head, parameter[]]
variable[connection] assign[=] call[name[models].db.engine.connect, parameter[]]
variable[context] assign[=] call[name[MigrationContext].configure, parameter[name[connection]]]
variable[current] assign[=] call[name[context].get_current_revision, parameter[]]
if <ast.UnaryOp object at 0x7da1b17b7550> begin[:]
call[name[log].info, parameter[constant[Unstable DB schema, stamping original revision]]]
call[name[flask_migrate].stamp, parameter[]]
if compare[name[head] not_equal[!=] name[current]] begin[:]
call[name[log].info, parameter[constant[DB schema out of date, upgrading]]]
call[name[flask_migrate].upgrade, parameter[]] | keyword[def] identifier[configure_db] ( identifier[app] ):
literal[string]
identifier[models] . identifier[db] . identifier[init_app] ( identifier[app] )
identifier[log] = identifier[logging] . identifier[getLogger] ( literal[string] )
identifier[log] . identifier[debug] ( literal[string] )
keyword[if] identifier[app] . identifier[config] . identifier[get] ( literal[string] ):
keyword[with] identifier[app] . identifier[app_context] ():
identifier[migrations] = identifier[app] . identifier[config] [ literal[string] ]
identifier[flask_migrate] . identifier[Migrate] ( identifier[app] , identifier[models] . identifier[db] , identifier[directory] = identifier[migrations] )
identifier[config] = identifier[app] . identifier[extensions] [ literal[string] ]. identifier[migrate] . identifier[get_config] ( identifier[migrations] )
identifier[inspector] = identifier[Inspector] . identifier[from_engine] ( identifier[models] . identifier[db] . identifier[engine] )
keyword[if] identifier[len] ( identifier[inspector] . identifier[get_table_names] ())== literal[int] :
identifier[log] . identifier[info] ( literal[string] )
identifier[flask_migrate] . identifier[upgrade] ( identifier[directory] = identifier[migrations] )
identifier[script] = identifier[ScriptDirectory] . identifier[from_config] ( identifier[config] )
identifier[head] = identifier[script] . identifier[get_current_head] ()
identifier[connection] = identifier[models] . identifier[db] . identifier[engine] . identifier[connect] ()
identifier[context] = identifier[MigrationContext] . identifier[configure] ( identifier[connection] )
identifier[current] = identifier[context] . identifier[get_current_revision] ()
keyword[if] keyword[not] identifier[current] :
identifier[log] . identifier[info] ( literal[string] )
identifier[flask_migrate] . identifier[stamp] ( identifier[directory] = identifier[migrations] ,
identifier[revision] = literal[string] )
keyword[if] identifier[head] != identifier[current] :
identifier[log] . identifier[info] ( literal[string] )
identifier[flask_migrate] . identifier[upgrade] ( identifier[directory] = identifier[migrations] ) | def configure_db(app):
"""
0.10 is the first version of ARA that ships with a stable database schema.
We can identify a database that originates from before this by checking if
there is an alembic revision available.
If there is no alembic revision available, assume we are running the first
revision which contains the latest state of the database prior to this.
"""
models.db.init_app(app)
log = logging.getLogger('ara.webapp.configure_db')
log.debug('Setting up database...')
if app.config.get('ARA_AUTOCREATE_DATABASE'):
with app.app_context():
migrations = app.config['DB_MIGRATIONS']
flask_migrate.Migrate(app, models.db, directory=migrations)
config = app.extensions['migrate'].migrate.get_config(migrations)
# Verify if the database tables have been created at all
inspector = Inspector.from_engine(models.db.engine)
if len(inspector.get_table_names()) == 0:
log.info('Initializing new DB from scratch')
flask_migrate.upgrade(directory=migrations) # depends on [control=['if'], data=[]]
# Get current alembic head revision
script = ScriptDirectory.from_config(config)
head = script.get_current_head()
# Get current revision, if available
connection = models.db.engine.connect()
context = MigrationContext.configure(connection)
current = context.get_current_revision()
if not current:
log.info('Unstable DB schema, stamping original revision')
flask_migrate.stamp(directory=migrations, revision='da9459a1f71c') # depends on [control=['if'], data=[]]
if head != current:
log.info('DB schema out of date, upgrading')
flask_migrate.upgrade(directory=migrations) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]] |
def config_to_string(config):
"""Nice output string for the config, which is a nested defaultdict.
Args:
config (defaultdict(defaultdict)): The configuration information.
Returns:
str: A human-readable output string detailing the contents of the config.
"""
output = []
for section, section_content in config.items():
output.append("[{}]".format(section))
for option, option_value in section_content.items():
output.append("{} = {}".format(option, option_value))
return "\n".join(output) | def function[config_to_string, parameter[config]]:
constant[Nice output string for the config, which is a nested defaultdict.
Args:
config (defaultdict(defaultdict)): The configuration information.
Returns:
str: A human-readable output string detailing the contents of the config.
]
variable[output] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b1365db0>, <ast.Name object at 0x7da1b1366590>]]] in starred[call[name[config].items, parameter[]]] begin[:]
call[name[output].append, parameter[call[constant[[{}]].format, parameter[name[section]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b1366770>, <ast.Name object at 0x7da1b13647c0>]]] in starred[call[name[section_content].items, parameter[]]] begin[:]
call[name[output].append, parameter[call[constant[{} = {}].format, parameter[name[option], name[option_value]]]]]
return[call[constant[
].join, parameter[name[output]]]] | keyword[def] identifier[config_to_string] ( identifier[config] ):
literal[string]
identifier[output] =[]
keyword[for] identifier[section] , identifier[section_content] keyword[in] identifier[config] . identifier[items] ():
identifier[output] . identifier[append] ( literal[string] . identifier[format] ( identifier[section] ))
keyword[for] identifier[option] , identifier[option_value] keyword[in] identifier[section_content] . identifier[items] ():
identifier[output] . identifier[append] ( literal[string] . identifier[format] ( identifier[option] , identifier[option_value] ))
keyword[return] literal[string] . identifier[join] ( identifier[output] ) | def config_to_string(config):
"""Nice output string for the config, which is a nested defaultdict.
Args:
config (defaultdict(defaultdict)): The configuration information.
Returns:
str: A human-readable output string detailing the contents of the config.
"""
output = []
for (section, section_content) in config.items():
output.append('[{}]'.format(section))
for (option, option_value) in section_content.items():
output.append('{} = {}'.format(option, option_value)) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return '\n'.join(output) |
def map_across_blocks(self, map_func):
"""Applies `map_func` to every partition.
Args:
map_func: The function to apply.
Returns:
A new BaseFrameManager object, the type of object that called this.
"""
preprocessed_map_func = self.preprocess_func(map_func)
new_partitions = np.array(
[
[part.apply(preprocessed_map_func) for part in row_of_parts]
for row_of_parts in self.partitions
]
)
return self.__constructor__(new_partitions) | def function[map_across_blocks, parameter[self, map_func]]:
constant[Applies `map_func` to every partition.
Args:
map_func: The function to apply.
Returns:
A new BaseFrameManager object, the type of object that called this.
]
variable[preprocessed_map_func] assign[=] call[name[self].preprocess_func, parameter[name[map_func]]]
variable[new_partitions] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da2046237f0>]]
return[call[name[self].__constructor__, parameter[name[new_partitions]]]] | keyword[def] identifier[map_across_blocks] ( identifier[self] , identifier[map_func] ):
literal[string]
identifier[preprocessed_map_func] = identifier[self] . identifier[preprocess_func] ( identifier[map_func] )
identifier[new_partitions] = identifier[np] . identifier[array] (
[
[ identifier[part] . identifier[apply] ( identifier[preprocessed_map_func] ) keyword[for] identifier[part] keyword[in] identifier[row_of_parts] ]
keyword[for] identifier[row_of_parts] keyword[in] identifier[self] . identifier[partitions]
]
)
keyword[return] identifier[self] . identifier[__constructor__] ( identifier[new_partitions] ) | def map_across_blocks(self, map_func):
"""Applies `map_func` to every partition.
Args:
map_func: The function to apply.
Returns:
A new BaseFrameManager object, the type of object that called this.
"""
preprocessed_map_func = self.preprocess_func(map_func)
new_partitions = np.array([[part.apply(preprocessed_map_func) for part in row_of_parts] for row_of_parts in self.partitions])
return self.__constructor__(new_partitions) |
def translate_url(context, language):
"""
Translates the current URL for the given language code, eg:
{% translate_url de %}
"""
try:
request = context["request"]
except KeyError:
return ""
view = resolve(request.path)
current_language = translation.get_language()
translation.activate(language)
try:
url = reverse(view.func, args=view.args, kwargs=view.kwargs)
except NoReverseMatch:
try:
url_name = (view.url_name if not view.namespace
else '%s:%s' % (view.namespace, view.url_name))
url = reverse(url_name, args=view.args, kwargs=view.kwargs)
except NoReverseMatch:
url_name = "admin:" + view.url_name
url = reverse(url_name, args=view.args, kwargs=view.kwargs)
translation.activate(current_language)
if context['request'].META["QUERY_STRING"]:
url += "?" + context['request'].META["QUERY_STRING"]
return url | def function[translate_url, parameter[context, language]]:
constant[
Translates the current URL for the given language code, eg:
{% translate_url de %}
]
<ast.Try object at 0x7da18bc70fd0>
variable[view] assign[=] call[name[resolve], parameter[name[request].path]]
variable[current_language] assign[=] call[name[translation].get_language, parameter[]]
call[name[translation].activate, parameter[name[language]]]
<ast.Try object at 0x7da18c4ccdf0>
call[name[translation].activate, parameter[name[current_language]]]
if call[call[name[context]][constant[request]].META][constant[QUERY_STRING]] begin[:]
<ast.AugAssign object at 0x7da18c4ccbb0>
return[name[url]] | keyword[def] identifier[translate_url] ( identifier[context] , identifier[language] ):
literal[string]
keyword[try] :
identifier[request] = identifier[context] [ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[return] literal[string]
identifier[view] = identifier[resolve] ( identifier[request] . identifier[path] )
identifier[current_language] = identifier[translation] . identifier[get_language] ()
identifier[translation] . identifier[activate] ( identifier[language] )
keyword[try] :
identifier[url] = identifier[reverse] ( identifier[view] . identifier[func] , identifier[args] = identifier[view] . identifier[args] , identifier[kwargs] = identifier[view] . identifier[kwargs] )
keyword[except] identifier[NoReverseMatch] :
keyword[try] :
identifier[url_name] =( identifier[view] . identifier[url_name] keyword[if] keyword[not] identifier[view] . identifier[namespace]
keyword[else] literal[string] %( identifier[view] . identifier[namespace] , identifier[view] . identifier[url_name] ))
identifier[url] = identifier[reverse] ( identifier[url_name] , identifier[args] = identifier[view] . identifier[args] , identifier[kwargs] = identifier[view] . identifier[kwargs] )
keyword[except] identifier[NoReverseMatch] :
identifier[url_name] = literal[string] + identifier[view] . identifier[url_name]
identifier[url] = identifier[reverse] ( identifier[url_name] , identifier[args] = identifier[view] . identifier[args] , identifier[kwargs] = identifier[view] . identifier[kwargs] )
identifier[translation] . identifier[activate] ( identifier[current_language] )
keyword[if] identifier[context] [ literal[string] ]. identifier[META] [ literal[string] ]:
identifier[url] += literal[string] + identifier[context] [ literal[string] ]. identifier[META] [ literal[string] ]
keyword[return] identifier[url] | def translate_url(context, language):
"""
Translates the current URL for the given language code, eg:
{% translate_url de %}
"""
try:
request = context['request'] # depends on [control=['try'], data=[]]
except KeyError:
return '' # depends on [control=['except'], data=[]]
view = resolve(request.path)
current_language = translation.get_language()
translation.activate(language)
try:
url = reverse(view.func, args=view.args, kwargs=view.kwargs) # depends on [control=['try'], data=[]]
except NoReverseMatch:
try:
url_name = view.url_name if not view.namespace else '%s:%s' % (view.namespace, view.url_name)
url = reverse(url_name, args=view.args, kwargs=view.kwargs) # depends on [control=['try'], data=[]]
except NoReverseMatch:
url_name = 'admin:' + view.url_name
url = reverse(url_name, args=view.args, kwargs=view.kwargs) # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
translation.activate(current_language)
if context['request'].META['QUERY_STRING']:
url += '?' + context['request'].META['QUERY_STRING'] # depends on [control=['if'], data=[]]
return url |
def WriteClientActionRequests(self, requests):
"""Writes messages that should go to the client to the db."""
for r in requests:
req_dict = self.flow_requests.get((r.client_id, r.flow_id), {})
if r.request_id not in req_dict:
request_keys = [(r.client_id, r.flow_id, r.request_id) for r in requests
]
raise db.AtLeastOneUnknownRequestError(request_keys)
for r in requests:
request_key = (r.client_id, r.flow_id, r.request_id)
self.client_action_requests[request_key] = r | def function[WriteClientActionRequests, parameter[self, requests]]:
constant[Writes messages that should go to the client to the db.]
for taget[name[r]] in starred[name[requests]] begin[:]
variable[req_dict] assign[=] call[name[self].flow_requests.get, parameter[tuple[[<ast.Attribute object at 0x7da1b1b86b90>, <ast.Attribute object at 0x7da1b1b84e50>]], dictionary[[], []]]]
if compare[name[r].request_id <ast.NotIn object at 0x7da2590d7190> name[req_dict]] begin[:]
variable[request_keys] assign[=] <ast.ListComp object at 0x7da1b1b874f0>
<ast.Raise object at 0x7da1b1b0f790>
for taget[name[r]] in starred[name[requests]] begin[:]
variable[request_key] assign[=] tuple[[<ast.Attribute object at 0x7da1b1b0f7f0>, <ast.Attribute object at 0x7da1b1b0db70>, <ast.Attribute object at 0x7da1b1b84910>]]
call[name[self].client_action_requests][name[request_key]] assign[=] name[r] | keyword[def] identifier[WriteClientActionRequests] ( identifier[self] , identifier[requests] ):
literal[string]
keyword[for] identifier[r] keyword[in] identifier[requests] :
identifier[req_dict] = identifier[self] . identifier[flow_requests] . identifier[get] (( identifier[r] . identifier[client_id] , identifier[r] . identifier[flow_id] ),{})
keyword[if] identifier[r] . identifier[request_id] keyword[not] keyword[in] identifier[req_dict] :
identifier[request_keys] =[( identifier[r] . identifier[client_id] , identifier[r] . identifier[flow_id] , identifier[r] . identifier[request_id] ) keyword[for] identifier[r] keyword[in] identifier[requests]
]
keyword[raise] identifier[db] . identifier[AtLeastOneUnknownRequestError] ( identifier[request_keys] )
keyword[for] identifier[r] keyword[in] identifier[requests] :
identifier[request_key] =( identifier[r] . identifier[client_id] , identifier[r] . identifier[flow_id] , identifier[r] . identifier[request_id] )
identifier[self] . identifier[client_action_requests] [ identifier[request_key] ]= identifier[r] | def WriteClientActionRequests(self, requests):
"""Writes messages that should go to the client to the db."""
for r in requests:
req_dict = self.flow_requests.get((r.client_id, r.flow_id), {})
if r.request_id not in req_dict:
request_keys = [(r.client_id, r.flow_id, r.request_id) for r in requests]
raise db.AtLeastOneUnknownRequestError(request_keys) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['r']]
for r in requests:
request_key = (r.client_id, r.flow_id, r.request_id)
self.client_action_requests[request_key] = r # depends on [control=['for'], data=['r']] |
def registerEventHandlers(self):
"""
Registers the up and down handlers.
Also registers a scheduled function every 60th of a second, causing pyglet to redraw your window with 60fps.
"""
# Crouch/fly down
self.peng.keybinds.add(self.peng.cfg["controls.controls.crouch"],"peng3d:actor.%s.player.controls.crouch"%self.actor.uuid,self.on_crouch_down,False)
# Jump/fly up
self.peng.keybinds.add(self.peng.cfg["controls.controls.jump"],"peng3d:actor.%s.player.controls.jump"%self.actor.uuid,self.on_jump_down,False)
pyglet.clock.schedule_interval(self.update,1.0/60) | def function[registerEventHandlers, parameter[self]]:
constant[
Registers the up and down handlers.
Also registers a scheduled function every 60th of a second, causing pyglet to redraw your window with 60fps.
]
call[name[self].peng.keybinds.add, parameter[call[name[self].peng.cfg][constant[controls.controls.crouch]], binary_operation[constant[peng3d:actor.%s.player.controls.crouch] <ast.Mod object at 0x7da2590d6920> name[self].actor.uuid], name[self].on_crouch_down, constant[False]]]
call[name[self].peng.keybinds.add, parameter[call[name[self].peng.cfg][constant[controls.controls.jump]], binary_operation[constant[peng3d:actor.%s.player.controls.jump] <ast.Mod object at 0x7da2590d6920> name[self].actor.uuid], name[self].on_jump_down, constant[False]]]
call[name[pyglet].clock.schedule_interval, parameter[name[self].update, binary_operation[constant[1.0] / constant[60]]]] | keyword[def] identifier[registerEventHandlers] ( identifier[self] ):
literal[string]
identifier[self] . identifier[peng] . identifier[keybinds] . identifier[add] ( identifier[self] . identifier[peng] . identifier[cfg] [ literal[string] ], literal[string] % identifier[self] . identifier[actor] . identifier[uuid] , identifier[self] . identifier[on_crouch_down] , keyword[False] )
identifier[self] . identifier[peng] . identifier[keybinds] . identifier[add] ( identifier[self] . identifier[peng] . identifier[cfg] [ literal[string] ], literal[string] % identifier[self] . identifier[actor] . identifier[uuid] , identifier[self] . identifier[on_jump_down] , keyword[False] )
identifier[pyglet] . identifier[clock] . identifier[schedule_interval] ( identifier[self] . identifier[update] , literal[int] / literal[int] ) | def registerEventHandlers(self):
"""
Registers the up and down handlers.
Also registers a scheduled function every 60th of a second, causing pyglet to redraw your window with 60fps.
"""
# Crouch/fly down
self.peng.keybinds.add(self.peng.cfg['controls.controls.crouch'], 'peng3d:actor.%s.player.controls.crouch' % self.actor.uuid, self.on_crouch_down, False)
# Jump/fly up
self.peng.keybinds.add(self.peng.cfg['controls.controls.jump'], 'peng3d:actor.%s.player.controls.jump' % self.actor.uuid, self.on_jump_down, False)
pyglet.clock.schedule_interval(self.update, 1.0 / 60) |
def get_opener(self, name):
"""Retrieve an opener for the given protocol
:param name: name of the opener to open
:type name: string
:raises NoOpenerError: if no opener has been registered of that name
"""
if name not in self.registry:
raise NoOpenerError("No opener for %s" % name)
index = self.registry[name]
return self.openers[index] | def function[get_opener, parameter[self, name]]:
constant[Retrieve an opener for the given protocol
:param name: name of the opener to open
:type name: string
:raises NoOpenerError: if no opener has been registered of that name
]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self].registry] begin[:]
<ast.Raise object at 0x7da1b1f45030>
variable[index] assign[=] call[name[self].registry][name[name]]
return[call[name[self].openers][name[index]]] | keyword[def] identifier[get_opener] ( identifier[self] , identifier[name] ):
literal[string]
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[registry] :
keyword[raise] identifier[NoOpenerError] ( literal[string] % identifier[name] )
identifier[index] = identifier[self] . identifier[registry] [ identifier[name] ]
keyword[return] identifier[self] . identifier[openers] [ identifier[index] ] | def get_opener(self, name):
"""Retrieve an opener for the given protocol
:param name: name of the opener to open
:type name: string
:raises NoOpenerError: if no opener has been registered of that name
"""
if name not in self.registry:
raise NoOpenerError('No opener for %s' % name) # depends on [control=['if'], data=['name']]
index = self.registry[name]
return self.openers[index] |
def assemble_chain(leaf, store):
"""Assemble the trust chain.
This assembly method uses the certificates subject and issuer common name and
should be used for informational purposes only. It does *not*
cryptographically verify the chain!
:param OpenSSL.crypto.X509 leaf: The leaf certificate from which to build the
chain.
:param list[OpenSSL.crypto.X509] store: A list of certificates to use to
resolve the chain.
:return: The trust chain.
:rtype: list[OpenSSL.crypto.X509]
"""
store_dict = {}
for cert in store:
store_dict[cert.get_subject().CN] = cert
chain = [leaf]
current = leaf
try:
while current.get_issuer().CN != current.get_subject().CN:
chain.append(store_dict[current.get_issuer().CN])
current = store_dict[current.get_issuer().CN]
except KeyError:
invalid = crypto.X509()
patch_certificate(invalid)
invalid.set_subject(current.get_issuer())
chain.append(invalid)
chain.reverse()
return chain | def function[assemble_chain, parameter[leaf, store]]:
constant[Assemble the trust chain.
This assembly method uses the certificates subject and issuer common name and
should be used for informational purposes only. It does *not*
cryptographically verify the chain!
:param OpenSSL.crypto.X509 leaf: The leaf certificate from which to build the
chain.
:param list[OpenSSL.crypto.X509] store: A list of certificates to use to
resolve the chain.
:return: The trust chain.
:rtype: list[OpenSSL.crypto.X509]
]
variable[store_dict] assign[=] dictionary[[], []]
for taget[name[cert]] in starred[name[store]] begin[:]
call[name[store_dict]][call[name[cert].get_subject, parameter[]].CN] assign[=] name[cert]
variable[chain] assign[=] list[[<ast.Name object at 0x7da1b1ddb610>]]
variable[current] assign[=] name[leaf]
<ast.Try object at 0x7da1b1ddb3d0>
call[name[chain].reverse, parameter[]]
return[name[chain]] | keyword[def] identifier[assemble_chain] ( identifier[leaf] , identifier[store] ):
literal[string]
identifier[store_dict] ={}
keyword[for] identifier[cert] keyword[in] identifier[store] :
identifier[store_dict] [ identifier[cert] . identifier[get_subject] (). identifier[CN] ]= identifier[cert]
identifier[chain] =[ identifier[leaf] ]
identifier[current] = identifier[leaf]
keyword[try] :
keyword[while] identifier[current] . identifier[get_issuer] (). identifier[CN] != identifier[current] . identifier[get_subject] (). identifier[CN] :
identifier[chain] . identifier[append] ( identifier[store_dict] [ identifier[current] . identifier[get_issuer] (). identifier[CN] ])
identifier[current] = identifier[store_dict] [ identifier[current] . identifier[get_issuer] (). identifier[CN] ]
keyword[except] identifier[KeyError] :
identifier[invalid] = identifier[crypto] . identifier[X509] ()
identifier[patch_certificate] ( identifier[invalid] )
identifier[invalid] . identifier[set_subject] ( identifier[current] . identifier[get_issuer] ())
identifier[chain] . identifier[append] ( identifier[invalid] )
identifier[chain] . identifier[reverse] ()
keyword[return] identifier[chain] | def assemble_chain(leaf, store):
"""Assemble the trust chain.
This assembly method uses the certificates subject and issuer common name and
should be used for informational purposes only. It does *not*
cryptographically verify the chain!
:param OpenSSL.crypto.X509 leaf: The leaf certificate from which to build the
chain.
:param list[OpenSSL.crypto.X509] store: A list of certificates to use to
resolve the chain.
:return: The trust chain.
:rtype: list[OpenSSL.crypto.X509]
"""
store_dict = {}
for cert in store:
store_dict[cert.get_subject().CN] = cert # depends on [control=['for'], data=['cert']]
chain = [leaf]
current = leaf
try:
while current.get_issuer().CN != current.get_subject().CN:
chain.append(store_dict[current.get_issuer().CN])
current = store_dict[current.get_issuer().CN] # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
except KeyError:
invalid = crypto.X509()
patch_certificate(invalid)
invalid.set_subject(current.get_issuer())
chain.append(invalid) # depends on [control=['except'], data=[]]
chain.reverse()
return chain |
def scrub_output_pre_save(model, **kwargs):
"""scrub output before saving notebooks"""
# only run on notebooks
if model['type'] != 'notebook':
return
# only run on nbformat v4
if model['content']['nbformat'] != 4:
return
for cell in model['content']['cells']:
if cell['cell_type'] != 'code':
continue
cell['outputs'] = []
cell['execution_count'] = None | def function[scrub_output_pre_save, parameter[model]]:
constant[scrub output before saving notebooks]
if compare[call[name[model]][constant[type]] not_equal[!=] constant[notebook]] begin[:]
return[None]
if compare[call[call[name[model]][constant[content]]][constant[nbformat]] not_equal[!=] constant[4]] begin[:]
return[None]
for taget[name[cell]] in starred[call[call[name[model]][constant[content]]][constant[cells]]] begin[:]
if compare[call[name[cell]][constant[cell_type]] not_equal[!=] constant[code]] begin[:]
continue
call[name[cell]][constant[outputs]] assign[=] list[[]]
call[name[cell]][constant[execution_count]] assign[=] constant[None] | keyword[def] identifier[scrub_output_pre_save] ( identifier[model] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[model] [ literal[string] ]!= literal[string] :
keyword[return]
keyword[if] identifier[model] [ literal[string] ][ literal[string] ]!= literal[int] :
keyword[return]
keyword[for] identifier[cell] keyword[in] identifier[model] [ literal[string] ][ literal[string] ]:
keyword[if] identifier[cell] [ literal[string] ]!= literal[string] :
keyword[continue]
identifier[cell] [ literal[string] ]=[]
identifier[cell] [ literal[string] ]= keyword[None] | def scrub_output_pre_save(model, **kwargs):
"""scrub output before saving notebooks"""
# only run on notebooks
if model['type'] != 'notebook':
return # depends on [control=['if'], data=[]]
# only run on nbformat v4
if model['content']['nbformat'] != 4:
return # depends on [control=['if'], data=[]]
for cell in model['content']['cells']:
if cell['cell_type'] != 'code':
continue # depends on [control=['if'], data=[]]
cell['outputs'] = []
cell['execution_count'] = None # depends on [control=['for'], data=['cell']] |
def modify_item(self, item_uri, metadata):
"""Modify the metadata on an item
"""
md = json.dumps({'metadata': metadata})
response = self.api_request(item_uri, method='PUT', data=md)
return self.__check_success(response) | def function[modify_item, parameter[self, item_uri, metadata]]:
constant[Modify the metadata on an item
]
variable[md] assign[=] call[name[json].dumps, parameter[dictionary[[<ast.Constant object at 0x7da1b222fa00>], [<ast.Name object at 0x7da1b222ddb0>]]]]
variable[response] assign[=] call[name[self].api_request, parameter[name[item_uri]]]
return[call[name[self].__check_success, parameter[name[response]]]] | keyword[def] identifier[modify_item] ( identifier[self] , identifier[item_uri] , identifier[metadata] ):
literal[string]
identifier[md] = identifier[json] . identifier[dumps] ({ literal[string] : identifier[metadata] })
identifier[response] = identifier[self] . identifier[api_request] ( identifier[item_uri] , identifier[method] = literal[string] , identifier[data] = identifier[md] )
keyword[return] identifier[self] . identifier[__check_success] ( identifier[response] ) | def modify_item(self, item_uri, metadata):
"""Modify the metadata on an item
"""
md = json.dumps({'metadata': metadata})
response = self.api_request(item_uri, method='PUT', data=md)
return self.__check_success(response) |
def secret_absent(name, namespace='default', **kwargs):
'''
Ensures that the named secret is absent from the given namespace.
name
The name of the secret
namespace
The name of the namespace
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
secret = __salt__['kubernetes.show_secret'](name, namespace, **kwargs)
if secret is None:
ret['result'] = True if not __opts__['test'] else None
ret['comment'] = 'The secret does not exist'
return ret
if __opts__['test']:
ret['comment'] = 'The secret is going to be deleted'
ret['result'] = None
return ret
__salt__['kubernetes.delete_secret'](name, namespace, **kwargs)
# As for kubernetes 1.6.4 doesn't set a code when deleting a secret
# The kubernetes module will raise an exception if the kubernetes
# server will return an error
ret['result'] = True
ret['changes'] = {
'kubernetes.secret': {
'new': 'absent', 'old': 'present'}}
ret['comment'] = 'Secret deleted'
return ret | def function[secret_absent, parameter[name, namespace]]:
constant[
Ensures that the named secret is absent from the given namespace.
name
The name of the secret
namespace
The name of the namespace
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da2044c2740>, <ast.Constant object at 0x7da2044c2650>, <ast.Constant object at 0x7da2044c11e0>, <ast.Constant object at 0x7da2044c2d40>], [<ast.Name object at 0x7da2044c1090>, <ast.Dict object at 0x7da2044c0dc0>, <ast.Constant object at 0x7da2044c3520>, <ast.Constant object at 0x7da2044c2a40>]]
variable[secret] assign[=] call[call[name[__salt__]][constant[kubernetes.show_secret]], parameter[name[name], name[namespace]]]
if compare[name[secret] is constant[None]] begin[:]
call[name[ret]][constant[result]] assign[=] <ast.IfExp object at 0x7da2044c1450>
call[name[ret]][constant[comment]] assign[=] constant[The secret does not exist]
return[name[ret]]
if call[name[__opts__]][constant[test]] begin[:]
call[name[ret]][constant[comment]] assign[=] constant[The secret is going to be deleted]
call[name[ret]][constant[result]] assign[=] constant[None]
return[name[ret]]
call[call[name[__salt__]][constant[kubernetes.delete_secret]], parameter[name[name], name[namespace]]]
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[changes]] assign[=] dictionary[[<ast.Constant object at 0x7da2044c2890>], [<ast.Dict object at 0x7da2044c22c0>]]
call[name[ret]][constant[comment]] assign[=] constant[Secret deleted]
return[name[ret]] | keyword[def] identifier[secret_absent] ( identifier[name] , identifier[namespace] = literal[string] ,** identifier[kwargs] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[False] ,
literal[string] : literal[string] }
identifier[secret] = identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[namespace] ,** identifier[kwargs] )
keyword[if] identifier[secret] keyword[is] keyword[None] :
identifier[ret] [ literal[string] ]= keyword[True] keyword[if] keyword[not] identifier[__opts__] [ literal[string] ] keyword[else] keyword[None]
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= literal[string]
identifier[ret] [ literal[string] ]= keyword[None]
keyword[return] identifier[ret]
identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[namespace] ,** identifier[kwargs] )
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]={
literal[string] :{
literal[string] : literal[string] , literal[string] : literal[string] }}
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret] | def secret_absent(name, namespace='default', **kwargs):
"""
Ensures that the named secret is absent from the given namespace.
name
The name of the secret
namespace
The name of the namespace
"""
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
secret = __salt__['kubernetes.show_secret'](name, namespace, **kwargs)
if secret is None:
ret['result'] = True if not __opts__['test'] else None
ret['comment'] = 'The secret does not exist'
return ret # depends on [control=['if'], data=[]]
if __opts__['test']:
ret['comment'] = 'The secret is going to be deleted'
ret['result'] = None
return ret # depends on [control=['if'], data=[]]
__salt__['kubernetes.delete_secret'](name, namespace, **kwargs)
# As for kubernetes 1.6.4 doesn't set a code when deleting a secret
# The kubernetes module will raise an exception if the kubernetes
# server will return an error
ret['result'] = True
ret['changes'] = {'kubernetes.secret': {'new': 'absent', 'old': 'present'}}
ret['comment'] = 'Secret deleted'
return ret |
def create_rpc_request_header(self):
'''Creates and serializes a delimited RpcRequestHeaderProto message.'''
rpcheader = RpcRequestHeaderProto()
rpcheader.rpcKind = 2 # rpcheaderproto.RpcKindProto.Value('RPC_PROTOCOL_BUFFER')
rpcheader.rpcOp = 0 # rpcheaderproto.RpcPayloadOperationProto.Value('RPC_FINAL_PACKET')
rpcheader.callId = self.call_id
rpcheader.retryCount = -1
rpcheader.clientId = self.client_id[0:16]
if self.call_id == -3:
self.call_id = 0
else:
self.call_id += 1
# Serialize delimited
s_rpcHeader = rpcheader.SerializeToString()
log_protobuf_message("RpcRequestHeaderProto (len: %d)" % (len(s_rpcHeader)), rpcheader)
return s_rpcHeader | def function[create_rpc_request_header, parameter[self]]:
constant[Creates and serializes a delimited RpcRequestHeaderProto message.]
variable[rpcheader] assign[=] call[name[RpcRequestHeaderProto], parameter[]]
name[rpcheader].rpcKind assign[=] constant[2]
name[rpcheader].rpcOp assign[=] constant[0]
name[rpcheader].callId assign[=] name[self].call_id
name[rpcheader].retryCount assign[=] <ast.UnaryOp object at 0x7da1b08d5510>
name[rpcheader].clientId assign[=] call[name[self].client_id][<ast.Slice object at 0x7da1b08bd0c0>]
if compare[name[self].call_id equal[==] <ast.UnaryOp object at 0x7da1b08be740>] begin[:]
name[self].call_id assign[=] constant[0]
variable[s_rpcHeader] assign[=] call[name[rpcheader].SerializeToString, parameter[]]
call[name[log_protobuf_message], parameter[binary_operation[constant[RpcRequestHeaderProto (len: %d)] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[s_rpcHeader]]]], name[rpcheader]]]
return[name[s_rpcHeader]] | keyword[def] identifier[create_rpc_request_header] ( identifier[self] ):
literal[string]
identifier[rpcheader] = identifier[RpcRequestHeaderProto] ()
identifier[rpcheader] . identifier[rpcKind] = literal[int]
identifier[rpcheader] . identifier[rpcOp] = literal[int]
identifier[rpcheader] . identifier[callId] = identifier[self] . identifier[call_id]
identifier[rpcheader] . identifier[retryCount] =- literal[int]
identifier[rpcheader] . identifier[clientId] = identifier[self] . identifier[client_id] [ literal[int] : literal[int] ]
keyword[if] identifier[self] . identifier[call_id] ==- literal[int] :
identifier[self] . identifier[call_id] = literal[int]
keyword[else] :
identifier[self] . identifier[call_id] += literal[int]
identifier[s_rpcHeader] = identifier[rpcheader] . identifier[SerializeToString] ()
identifier[log_protobuf_message] ( literal[string] %( identifier[len] ( identifier[s_rpcHeader] )), identifier[rpcheader] )
keyword[return] identifier[s_rpcHeader] | def create_rpc_request_header(self):
"""Creates and serializes a delimited RpcRequestHeaderProto message."""
rpcheader = RpcRequestHeaderProto()
rpcheader.rpcKind = 2 # rpcheaderproto.RpcKindProto.Value('RPC_PROTOCOL_BUFFER')
rpcheader.rpcOp = 0 # rpcheaderproto.RpcPayloadOperationProto.Value('RPC_FINAL_PACKET')
rpcheader.callId = self.call_id
rpcheader.retryCount = -1
rpcheader.clientId = self.client_id[0:16]
if self.call_id == -3:
self.call_id = 0 # depends on [control=['if'], data=[]]
else:
self.call_id += 1
# Serialize delimited
s_rpcHeader = rpcheader.SerializeToString()
log_protobuf_message('RpcRequestHeaderProto (len: %d)' % len(s_rpcHeader), rpcheader)
return s_rpcHeader |
def pid(self, value):
"""Process ID setter."""
self.bytearray[self._get_slicers(0)] = bytearray(c_int32(value or 0)) | def function[pid, parameter[self, value]]:
constant[Process ID setter.]
call[name[self].bytearray][call[name[self]._get_slicers, parameter[constant[0]]]] assign[=] call[name[bytearray], parameter[call[name[c_int32], parameter[<ast.BoolOp object at 0x7da1b2637340>]]]] | keyword[def] identifier[pid] ( identifier[self] , identifier[value] ):
literal[string]
identifier[self] . identifier[bytearray] [ identifier[self] . identifier[_get_slicers] ( literal[int] )]= identifier[bytearray] ( identifier[c_int32] ( identifier[value] keyword[or] literal[int] )) | def pid(self, value):
"""Process ID setter."""
self.bytearray[self._get_slicers(0)] = bytearray(c_int32(value or 0)) |
def delete_as(access_token, subscription_id, resource_group, as_name):
'''Delete availability set.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
as_name (str): Name of the availability set.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/availabilitySets/', as_name,
'?api-version=', COMP_API])
return do_delete(endpoint, access_token) | def function[delete_as, parameter[access_token, subscription_id, resource_group, as_name]]:
constant[Delete availability set.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
as_name (str): Name of the availability set.
Returns:
HTTP response.
]
variable[endpoint] assign[=] call[constant[].join, parameter[list[[<ast.Call object at 0x7da1b04d9270>, <ast.Constant object at 0x7da1b04db730>, <ast.Name object at 0x7da1b04dbd60>, <ast.Constant object at 0x7da1b04d91b0>, <ast.Name object at 0x7da1b04d81f0>, <ast.Constant object at 0x7da1b04db010>, <ast.Name object at 0x7da1b04dae00>, <ast.Constant object at 0x7da1b04db5e0>, <ast.Name object at 0x7da1b04d9ae0>]]]]
return[call[name[do_delete], parameter[name[endpoint], name[access_token]]]] | keyword[def] identifier[delete_as] ( identifier[access_token] , identifier[subscription_id] , identifier[resource_group] , identifier[as_name] ):
literal[string]
identifier[endpoint] = literal[string] . identifier[join] ([ identifier[get_rm_endpoint] (),
literal[string] , identifier[subscription_id] ,
literal[string] , identifier[resource_group] ,
literal[string] , identifier[as_name] ,
literal[string] , identifier[COMP_API] ])
keyword[return] identifier[do_delete] ( identifier[endpoint] , identifier[access_token] ) | def delete_as(access_token, subscription_id, resource_group, as_name):
"""Delete availability set.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
as_name (str): Name of the availability set.
Returns:
HTTP response.
"""
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/availabilitySets/', as_name, '?api-version=', COMP_API])
return do_delete(endpoint, access_token) |
def calculate_sunrise_sunset_from_datetime(self, datetime, depression=0.833,
is_solar_time=False):
"""Calculate sunrise, sunset and noon for a day of year."""
# TODO(mostapha): This should be more generic and based on a method
if datetime.year != 2016 and self.is_leap_year:
datetime = DateTime(datetime.month, datetime.day, datetime.hour,
datetime.minute, True)
sol_dec, eq_of_time = self._calculate_solar_geometry(datetime)
# calculate sunrise and sunset hour
if is_solar_time:
noon = .5
else:
noon = (720 -
4 * math.degrees(self._longitude) -
eq_of_time +
self.time_zone * 60
) / 1440.0
try:
sunrise_hour_angle = self._calculate_sunrise_hour_angle(
sol_dec, depression)
except ValueError:
# no sun rise and sunset at this hour
noon = 24 * noon
return {
"sunrise": None,
"noon": DateTime(datetime.month, datetime.day,
*self._calculate_hour_and_minute(noon),
leap_year=self.is_leap_year),
"sunset": None
}
else:
sunrise = noon - sunrise_hour_angle * 4 / 1440.0
sunset = noon + sunrise_hour_angle * 4 / 1440.0
noon = 24 * noon
sunrise = 24 * sunrise
sunset = 24 * sunset
return {
"sunrise": DateTime(datetime.month, datetime.day,
*self._calculate_hour_and_minute(sunrise),
leap_year=self.is_leap_year),
"noon": DateTime(datetime.month, datetime.day,
*self._calculate_hour_and_minute(noon),
leap_year=self.is_leap_year),
"sunset": DateTime(datetime.month, datetime.day,
*self._calculate_hour_and_minute(sunset),
leap_year=self.is_leap_year)
} | def function[calculate_sunrise_sunset_from_datetime, parameter[self, datetime, depression, is_solar_time]]:
constant[Calculate sunrise, sunset and noon for a day of year.]
if <ast.BoolOp object at 0x7da1b12b9f30> begin[:]
variable[datetime] assign[=] call[name[DateTime], parameter[name[datetime].month, name[datetime].day, name[datetime].hour, name[datetime].minute, constant[True]]]
<ast.Tuple object at 0x7da1b12b94b0> assign[=] call[name[self]._calculate_solar_geometry, parameter[name[datetime]]]
if name[is_solar_time] begin[:]
variable[noon] assign[=] constant[0.5]
<ast.Try object at 0x7da1b12b9600> | keyword[def] identifier[calculate_sunrise_sunset_from_datetime] ( identifier[self] , identifier[datetime] , identifier[depression] = literal[int] ,
identifier[is_solar_time] = keyword[False] ):
literal[string]
keyword[if] identifier[datetime] . identifier[year] != literal[int] keyword[and] identifier[self] . identifier[is_leap_year] :
identifier[datetime] = identifier[DateTime] ( identifier[datetime] . identifier[month] , identifier[datetime] . identifier[day] , identifier[datetime] . identifier[hour] ,
identifier[datetime] . identifier[minute] , keyword[True] )
identifier[sol_dec] , identifier[eq_of_time] = identifier[self] . identifier[_calculate_solar_geometry] ( identifier[datetime] )
keyword[if] identifier[is_solar_time] :
identifier[noon] = literal[int]
keyword[else] :
identifier[noon] =( literal[int] -
literal[int] * identifier[math] . identifier[degrees] ( identifier[self] . identifier[_longitude] )-
identifier[eq_of_time] +
identifier[self] . identifier[time_zone] * literal[int]
)/ literal[int]
keyword[try] :
identifier[sunrise_hour_angle] = identifier[self] . identifier[_calculate_sunrise_hour_angle] (
identifier[sol_dec] , identifier[depression] )
keyword[except] identifier[ValueError] :
identifier[noon] = literal[int] * identifier[noon]
keyword[return] {
literal[string] : keyword[None] ,
literal[string] : identifier[DateTime] ( identifier[datetime] . identifier[month] , identifier[datetime] . identifier[day] ,
* identifier[self] . identifier[_calculate_hour_and_minute] ( identifier[noon] ),
identifier[leap_year] = identifier[self] . identifier[is_leap_year] ),
literal[string] : keyword[None]
}
keyword[else] :
identifier[sunrise] = identifier[noon] - identifier[sunrise_hour_angle] * literal[int] / literal[int]
identifier[sunset] = identifier[noon] + identifier[sunrise_hour_angle] * literal[int] / literal[int]
identifier[noon] = literal[int] * identifier[noon]
identifier[sunrise] = literal[int] * identifier[sunrise]
identifier[sunset] = literal[int] * identifier[sunset]
keyword[return] {
literal[string] : identifier[DateTime] ( identifier[datetime] . identifier[month] , identifier[datetime] . identifier[day] ,
* identifier[self] . identifier[_calculate_hour_and_minute] ( identifier[sunrise] ),
identifier[leap_year] = identifier[self] . identifier[is_leap_year] ),
literal[string] : identifier[DateTime] ( identifier[datetime] . identifier[month] , identifier[datetime] . identifier[day] ,
* identifier[self] . identifier[_calculate_hour_and_minute] ( identifier[noon] ),
identifier[leap_year] = identifier[self] . identifier[is_leap_year] ),
literal[string] : identifier[DateTime] ( identifier[datetime] . identifier[month] , identifier[datetime] . identifier[day] ,
* identifier[self] . identifier[_calculate_hour_and_minute] ( identifier[sunset] ),
identifier[leap_year] = identifier[self] . identifier[is_leap_year] )
} | def calculate_sunrise_sunset_from_datetime(self, datetime, depression=0.833, is_solar_time=False):
"""Calculate sunrise, sunset and noon for a day of year."""
# TODO(mostapha): This should be more generic and based on a method
if datetime.year != 2016 and self.is_leap_year:
datetime = DateTime(datetime.month, datetime.day, datetime.hour, datetime.minute, True) # depends on [control=['if'], data=[]]
(sol_dec, eq_of_time) = self._calculate_solar_geometry(datetime)
# calculate sunrise and sunset hour
if is_solar_time:
noon = 0.5 # depends on [control=['if'], data=[]]
else:
noon = (720 - 4 * math.degrees(self._longitude) - eq_of_time + self.time_zone * 60) / 1440.0
try:
sunrise_hour_angle = self._calculate_sunrise_hour_angle(sol_dec, depression) # depends on [control=['try'], data=[]]
except ValueError:
# no sun rise and sunset at this hour
noon = 24 * noon
return {'sunrise': None, 'noon': DateTime(datetime.month, datetime.day, *self._calculate_hour_and_minute(noon), leap_year=self.is_leap_year), 'sunset': None} # depends on [control=['except'], data=[]]
else:
sunrise = noon - sunrise_hour_angle * 4 / 1440.0
sunset = noon + sunrise_hour_angle * 4 / 1440.0
noon = 24 * noon
sunrise = 24 * sunrise
sunset = 24 * sunset
return {'sunrise': DateTime(datetime.month, datetime.day, *self._calculate_hour_and_minute(sunrise), leap_year=self.is_leap_year), 'noon': DateTime(datetime.month, datetime.day, *self._calculate_hour_and_minute(noon), leap_year=self.is_leap_year), 'sunset': DateTime(datetime.month, datetime.day, *self._calculate_hour_and_minute(sunset), leap_year=self.is_leap_year)} |
def init(i): # pragma: no cover
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
global cfg, work, initialized, paths_repos, type_long, string_io
if initialized:
return {'return':0}
# Split version
cfg['version']=__version__.split('.')
# Default URL. FIXME: should be formed from wfe_host and wfe_port when they are known.
# cfg['wfe_url_prefix'] = 'http://%s:%s/web?' % (cfg['default_host'], cfg['default_port'])
# Check long/int types
try:
x=long
except Exception as e:
type_long=int
else:
type_long=long
# Import StringIO
if sys.version_info[0]>2:
import io
string_io=io.StringIO
else:
from StringIO import StringIO
string_io=StringIO
# Check where are repos (to keep compatibility with past CK < V1.5)
p=''
import inspect
pxx=os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
px=os.path.dirname(pxx)
py=os.path.join(pxx, cfg['subdir_default_repo'])
if os.path.isdir(py):
p=py
if p=='':
from distutils.sysconfig import get_python_lib
px=get_python_lib()
py=os.path.join(px, cfg['kernel_dir'], cfg['subdir_default_repo'])
if os.path.isdir(py):
p=py
if p=='':
import site
for px in site.getsitepackages():
py=os.path.join(px, cfg['kernel_dir'],cfg['subdir_default_repo'])
if os.path.isdir(py):
p=py
break
# Check CK_ROOT environment variable
s=os.environ.get(cfg['env_key_root'],'').strip()
if s!='':
work['env_root']=os.path.realpath(s)
for px in cfg['kernel_dirs']:
py=os.path.join(work['env_root'], px, cfg['subdir_default_repo'])
if os.path.isdir(py):
p=py
break
elif px!='':
work['env_root']=px
if p=='':
return {'return':1, 'error':'Internal CK error (can\'t find default repo) - please report to authors'}
# Check default repo
work['dir_default_repo']=p
work['dir_default_repo_path']=os.path.join(work['dir_default_repo'], cfg['module_repo_name'], cfg['repo_name_default'])
work['dir_default_kernel']=os.path.join(work['dir_default_repo'], cfg['subdir_kernel'])
work['dir_default_cfg']=os.path.join(work['dir_default_kernel'], cfg['subdir_kernel_default'], cfg['subdir_ck_ext'], cfg['file_meta'])
work['dir_work_repo']=work['dir_default_repo']
work['dir_work_repo_path']=work['dir_default_repo_path']
work['dir_work_kernel']=work['dir_default_kernel']
work['dir_work_cfg']=work['dir_default_cfg']
work['repo_name_work']=cfg['repo_name_default']
work['repo_uid_work']=cfg['repo_uid_default']
# Check external repos
rps=os.environ.get(cfg['env_key_repos'],'').strip()
if rps=='':
# Get home user directory
from os.path import expanduser
home = expanduser("~")
# In the original version, if path to repos was not defined, I was using CK path,
# however, when installed as root, it will fail
# rps=os.path.join(work['env_root'],cfg['subdir_default_repos'])
# hence I changed to <user home dir>/CK
rps=os.path.join(home, cfg['user_home_dir_ext'])
if not os.path.isdir(rps):
os.makedirs(rps)
work['dir_repos']=rps
# Check CK_LOCAL_REPO environment variable - if doesn't exist, create in user space
s=os.environ.get(cfg['env_key_local_repo'],'').strip()
if s=='':
# Set up local default repository
s=os.path.join(rps, cfg['repo_name_local'])
if not os.path.isdir(s):
os.makedirs(s)
# Create description
rq=save_json_to_file({'json_file':os.path.join(s,cfg['repo_file']),
'dict':{'data_alias':cfg['repo_name_local'],
'data_uoa':cfg['repo_name_local'],
'data_name':cfg['repo_name_local'],
'data_uid':cfg['repo_uid_local']},
'sort_keys':'yes'})
if rq['return']>0: return rq
if s!='':
work['local_kernel_uoa']=cfg['subdir_kernel_default']
x=os.environ.get(cfg['env_key_local_kernel_uoa'],'').strip()
if x!='': work['local_kernel_uoa']=x
work['dir_local_repo']=os.path.realpath(s)
work['dir_local_repo_path']=os.path.join(work['dir_local_repo'], cfg['module_repo_name'], cfg['repo_name_local'])
work['dir_local_kernel']=os.path.join(work['dir_local_repo'], cfg['subdir_kernel'])
work['dir_local_cfg']=os.path.join(work['dir_local_kernel'], work['local_kernel_uoa'], cfg['subdir_ck_ext'], cfg['file_meta'])
# Update work repo!
work['dir_work_repo']=work['dir_local_repo']
work['dir_work_repo_path']=work['dir_local_repo_path']
work['dir_work_kernel']=work['dir_local_kernel']
work['dir_work_cfg']=work['dir_local_cfg']
work['repo_name_work']=cfg['repo_name_local']
work['repo_uid_work']=cfg['repo_uid_local']
paths_repos.append({'path':work['dir_local_repo'],
'repo_uoa':cfg['repo_name_local'],
'repo_uid':cfg['repo_uid_local'],
'repo_alias':cfg['repo_name_local']})
paths_repos.append({'path':work['dir_default_repo'],
'repo_uoa':cfg['repo_name_default'],
'repo_uid':cfg['repo_uid_default'],
'repo_alias':cfg['repo_name_default']})
# Prepare repo cache
work['dir_cache_repo_uoa']=os.path.join(work['dir_work_repo'],cfg['file_cache_repo_uoa'])
work['dir_cache_repo_info']=os.path.join(work['dir_work_repo'],cfg['file_cache_repo_info'])
# Check if first time and then copy local cache files (with remote-ck)
if not os.path.isfile(work['dir_cache_repo_uoa']) and not os.path.isfile(work['dir_cache_repo_info']):
rx=load_text_file({'text_file':os.path.join(work['dir_default_repo'],cfg['file_cache_repo_uoa'])})
if rx['return']>0: return rx
x1=rx['string']
rx=load_text_file({'text_file':os.path.join(work['dir_default_repo'],cfg['file_cache_repo_info'])})
if rx['return']>0: return rx
x2=rx['string']
rx=save_text_file({'text_file':work['dir_cache_repo_info'], 'string':x2})
if rx['return']>0: return rx
rx=save_text_file({'text_file':work['dir_cache_repo_uoa'], 'string':x1})
if rx['return']>0: return rx
# Check if local configuration exists, and if not, create it
if not os.path.isfile(work['dir_local_cfg']):
# Create empty local configuration
rx=add({'repo_uoa':cfg['repo_name_local'],
'module_uoa':cfg['subdir_kernel'],
'data_uoa':work['local_kernel_uoa']})
if rx['return']>0:
return {'return':rx['return'],
'error':'can\'t create local configuration entry'}
# Read kernel configuration (if exists)
if os.path.isfile(work['dir_work_cfg']):
r=load_json_file({'json_file':work['dir_work_cfg']})
if r['return']>0: return r
cfg1=r['dict']
# Update cfg
r=merge_dicts({'dict1':cfg, 'dict2':cfg1})
if r['return']>0: return r
initialized=True
return {'return':0} | def function[init, parameter[i]]:
constant[
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
]
<ast.Global object at 0x7da1b220f1c0>
if name[initialized] begin[:]
return[dictionary[[<ast.Constant object at 0x7da1b220e560>], [<ast.Constant object at 0x7da1b220d840>]]]
call[name[cfg]][constant[version]] assign[=] call[name[__version__].split, parameter[constant[.]]]
<ast.Try object at 0x7da1b220c730>
if compare[call[name[sys].version_info][constant[0]] greater[>] constant[2]] begin[:]
import module[io]
variable[string_io] assign[=] name[io].StringIO
variable[p] assign[=] constant[]
import module[inspect]
variable[pxx] assign[=] call[name[os].path.dirname, parameter[call[name[os].path.abspath, parameter[call[name[inspect].getfile, parameter[call[name[inspect].currentframe, parameter[]]]]]]]]
variable[px] assign[=] call[name[os].path.dirname, parameter[name[pxx]]]
variable[py] assign[=] call[name[os].path.join, parameter[name[pxx], call[name[cfg]][constant[subdir_default_repo]]]]
if call[name[os].path.isdir, parameter[name[py]]] begin[:]
variable[p] assign[=] name[py]
if compare[name[p] equal[==] constant[]] begin[:]
from relative_module[distutils.sysconfig] import module[get_python_lib]
variable[px] assign[=] call[name[get_python_lib], parameter[]]
variable[py] assign[=] call[name[os].path.join, parameter[name[px], call[name[cfg]][constant[kernel_dir]], call[name[cfg]][constant[subdir_default_repo]]]]
if call[name[os].path.isdir, parameter[name[py]]] begin[:]
variable[p] assign[=] name[py]
if compare[name[p] equal[==] constant[]] begin[:]
import module[site]
for taget[name[px]] in starred[call[name[site].getsitepackages, parameter[]]] begin[:]
variable[py] assign[=] call[name[os].path.join, parameter[name[px], call[name[cfg]][constant[kernel_dir]], call[name[cfg]][constant[subdir_default_repo]]]]
if call[name[os].path.isdir, parameter[name[py]]] begin[:]
variable[p] assign[=] name[py]
break
variable[s] assign[=] call[call[name[os].environ.get, parameter[call[name[cfg]][constant[env_key_root]], constant[]]].strip, parameter[]]
if compare[name[s] not_equal[!=] constant[]] begin[:]
call[name[work]][constant[env_root]] assign[=] call[name[os].path.realpath, parameter[name[s]]]
for taget[name[px]] in starred[call[name[cfg]][constant[kernel_dirs]]] begin[:]
variable[py] assign[=] call[name[os].path.join, parameter[call[name[work]][constant[env_root]], name[px], call[name[cfg]][constant[subdir_default_repo]]]]
if call[name[os].path.isdir, parameter[name[py]]] begin[:]
variable[p] assign[=] name[py]
break
if compare[name[p] equal[==] constant[]] begin[:]
return[dictionary[[<ast.Constant object at 0x7da1b22a3340>, <ast.Constant object at 0x7da1b22a3310>], [<ast.Constant object at 0x7da1b22a32e0>, <ast.Constant object at 0x7da1b22a32b0>]]]
call[name[work]][constant[dir_default_repo]] assign[=] name[p]
call[name[work]][constant[dir_default_repo_path]] assign[=] call[name[os].path.join, parameter[call[name[work]][constant[dir_default_repo]], call[name[cfg]][constant[module_repo_name]], call[name[cfg]][constant[repo_name_default]]]]
call[name[work]][constant[dir_default_kernel]] assign[=] call[name[os].path.join, parameter[call[name[work]][constant[dir_default_repo]], call[name[cfg]][constant[subdir_kernel]]]]
call[name[work]][constant[dir_default_cfg]] assign[=] call[name[os].path.join, parameter[call[name[work]][constant[dir_default_kernel]], call[name[cfg]][constant[subdir_kernel_default]], call[name[cfg]][constant[subdir_ck_ext]], call[name[cfg]][constant[file_meta]]]]
call[name[work]][constant[dir_work_repo]] assign[=] call[name[work]][constant[dir_default_repo]]
call[name[work]][constant[dir_work_repo_path]] assign[=] call[name[work]][constant[dir_default_repo_path]]
call[name[work]][constant[dir_work_kernel]] assign[=] call[name[work]][constant[dir_default_kernel]]
call[name[work]][constant[dir_work_cfg]] assign[=] call[name[work]][constant[dir_default_cfg]]
call[name[work]][constant[repo_name_work]] assign[=] call[name[cfg]][constant[repo_name_default]]
call[name[work]][constant[repo_uid_work]] assign[=] call[name[cfg]][constant[repo_uid_default]]
variable[rps] assign[=] call[call[name[os].environ.get, parameter[call[name[cfg]][constant[env_key_repos]], constant[]]].strip, parameter[]]
if compare[name[rps] equal[==] constant[]] begin[:]
from relative_module[os.path] import module[expanduser]
variable[home] assign[=] call[name[expanduser], parameter[constant[~]]]
variable[rps] assign[=] call[name[os].path.join, parameter[name[home], call[name[cfg]][constant[user_home_dir_ext]]]]
if <ast.UnaryOp object at 0x7da1b22a1990> begin[:]
call[name[os].makedirs, parameter[name[rps]]]
call[name[work]][constant[dir_repos]] assign[=] name[rps]
variable[s] assign[=] call[call[name[os].environ.get, parameter[call[name[cfg]][constant[env_key_local_repo]], constant[]]].strip, parameter[]]
if compare[name[s] equal[==] constant[]] begin[:]
variable[s] assign[=] call[name[os].path.join, parameter[name[rps], call[name[cfg]][constant[repo_name_local]]]]
if <ast.UnaryOp object at 0x7da1b22a1150> begin[:]
call[name[os].makedirs, parameter[name[s]]]
variable[rq] assign[=] call[name[save_json_to_file], parameter[dictionary[[<ast.Constant object at 0x7da1b22a0e20>, <ast.Constant object at 0x7da1b22a0df0>, <ast.Constant object at 0x7da1b22a0dc0>], [<ast.Call object at 0x7da1b22a0d90>, <ast.Dict object at 0x7da1b22a0c10>, <ast.Constant object at 0x7da1b22a08e0>]]]]
if compare[call[name[rq]][constant[return]] greater[>] constant[0]] begin[:]
return[name[rq]]
if compare[name[s] not_equal[!=] constant[]] begin[:]
call[name[work]][constant[local_kernel_uoa]] assign[=] call[name[cfg]][constant[subdir_kernel_default]]
variable[x] assign[=] call[call[name[os].environ.get, parameter[call[name[cfg]][constant[env_key_local_kernel_uoa]], constant[]]].strip, parameter[]]
if compare[name[x] not_equal[!=] constant[]] begin[:]
call[name[work]][constant[local_kernel_uoa]] assign[=] name[x]
call[name[work]][constant[dir_local_repo]] assign[=] call[name[os].path.realpath, parameter[name[s]]]
call[name[work]][constant[dir_local_repo_path]] assign[=] call[name[os].path.join, parameter[call[name[work]][constant[dir_local_repo]], call[name[cfg]][constant[module_repo_name]], call[name[cfg]][constant[repo_name_local]]]]
call[name[work]][constant[dir_local_kernel]] assign[=] call[name[os].path.join, parameter[call[name[work]][constant[dir_local_repo]], call[name[cfg]][constant[subdir_kernel]]]]
call[name[work]][constant[dir_local_cfg]] assign[=] call[name[os].path.join, parameter[call[name[work]][constant[dir_local_kernel]], call[name[work]][constant[local_kernel_uoa]], call[name[cfg]][constant[subdir_ck_ext]], call[name[cfg]][constant[file_meta]]]]
call[name[work]][constant[dir_work_repo]] assign[=] call[name[work]][constant[dir_local_repo]]
call[name[work]][constant[dir_work_repo_path]] assign[=] call[name[work]][constant[dir_local_repo_path]]
call[name[work]][constant[dir_work_kernel]] assign[=] call[name[work]][constant[dir_local_kernel]]
call[name[work]][constant[dir_work_cfg]] assign[=] call[name[work]][constant[dir_local_cfg]]
call[name[work]][constant[repo_name_work]] assign[=] call[name[cfg]][constant[repo_name_local]]
call[name[work]][constant[repo_uid_work]] assign[=] call[name[cfg]][constant[repo_uid_local]]
call[name[paths_repos].append, parameter[dictionary[[<ast.Constant object at 0x7da1b22aecb0>, <ast.Constant object at 0x7da1b22aec80>, <ast.Constant object at 0x7da1b22aec50>, <ast.Constant object at 0x7da1b22aec20>], [<ast.Subscript object at 0x7da1b22aebf0>, <ast.Subscript object at 0x7da1b22aeb60>, <ast.Subscript object at 0x7da1b22aead0>, <ast.Subscript object at 0x7da1b22aea40>]]]]
call[name[paths_repos].append, parameter[dictionary[[<ast.Constant object at 0x7da1b22ae890>, <ast.Constant object at 0x7da1b22ae860>, <ast.Constant object at 0x7da1b22ae830>, <ast.Constant object at 0x7da1b22ae800>], [<ast.Subscript object at 0x7da1b22ae7d0>, <ast.Subscript object at 0x7da1b22ae740>, <ast.Subscript object at 0x7da1b22ae6b0>, <ast.Subscript object at 0x7da1b22ae620>]]]]
call[name[work]][constant[dir_cache_repo_uoa]] assign[=] call[name[os].path.join, parameter[call[name[work]][constant[dir_work_repo]], call[name[cfg]][constant[file_cache_repo_uoa]]]]
call[name[work]][constant[dir_cache_repo_info]] assign[=] call[name[os].path.join, parameter[call[name[work]][constant[dir_work_repo]], call[name[cfg]][constant[file_cache_repo_info]]]]
if <ast.BoolOp object at 0x7da1b22adff0> begin[:]
variable[rx] assign[=] call[name[load_text_file], parameter[dictionary[[<ast.Constant object at 0x7da1b22adbd0>], [<ast.Call object at 0x7da1b22adba0>]]]]
if compare[call[name[rx]][constant[return]] greater[>] constant[0]] begin[:]
return[name[rx]]
variable[x1] assign[=] call[name[rx]][constant[string]]
variable[rx] assign[=] call[name[load_text_file], parameter[dictionary[[<ast.Constant object at 0x7da1b22ad630>], [<ast.Call object at 0x7da1b22ad600>]]]]
if compare[call[name[rx]][constant[return]] greater[>] constant[0]] begin[:]
return[name[rx]]
variable[x2] assign[=] call[name[rx]][constant[string]]
variable[rx] assign[=] call[name[save_text_file], parameter[dictionary[[<ast.Constant object at 0x7da1b22f5c90>, <ast.Constant object at 0x7da1b22f7bb0>], [<ast.Subscript object at 0x7da1b22f6740>, <ast.Name object at 0x7da1b22f6e30>]]]]
if compare[call[name[rx]][constant[return]] greater[>] constant[0]] begin[:]
return[name[rx]]
variable[rx] assign[=] call[name[save_text_file], parameter[dictionary[[<ast.Constant object at 0x7da1b22f6f80>, <ast.Constant object at 0x7da1b22f7160>], [<ast.Subscript object at 0x7da1b22f55a0>, <ast.Name object at 0x7da1b22f7d30>]]]]
if compare[call[name[rx]][constant[return]] greater[>] constant[0]] begin[:]
return[name[rx]]
if <ast.UnaryOp object at 0x7da1b22f6080> begin[:]
variable[rx] assign[=] call[name[add], parameter[dictionary[[<ast.Constant object at 0x7da1b22165c0>, <ast.Constant object at 0x7da1b2215360>, <ast.Constant object at 0x7da1b2215420>], [<ast.Subscript object at 0x7da1b2215390>, <ast.Subscript object at 0x7da1b2215db0>, <ast.Subscript object at 0x7da1b2215cf0>]]]]
if compare[call[name[rx]][constant[return]] greater[>] constant[0]] begin[:]
return[dictionary[[<ast.Constant object at 0x7da1b2214610>, <ast.Constant object at 0x7da1b2216ef0>], [<ast.Subscript object at 0x7da1b2217010>, <ast.Constant object at 0x7da1b2217160>]]]
if call[name[os].path.isfile, parameter[call[name[work]][constant[dir_work_cfg]]]] begin[:]
variable[r] assign[=] call[name[load_json_file], parameter[dictionary[[<ast.Constant object at 0x7da1b22150c0>], [<ast.Subscript object at 0x7da1b2215030>]]]]
if compare[call[name[r]][constant[return]] greater[>] constant[0]] begin[:]
return[name[r]]
variable[cfg1] assign[=] call[name[r]][constant[dict]]
variable[r] assign[=] call[name[merge_dicts], parameter[dictionary[[<ast.Constant object at 0x7da1b2215120>, <ast.Constant object at 0x7da1b22146d0>], [<ast.Name object at 0x7da1b2215480>, <ast.Name object at 0x7da1b22154e0>]]]]
if compare[call[name[r]][constant[return]] greater[>] constant[0]] begin[:]
return[name[r]]
variable[initialized] assign[=] constant[True]
return[dictionary[[<ast.Constant object at 0x7da1b2215210>], [<ast.Constant object at 0x7da1b22152a0>]]] | keyword[def] identifier[init] ( identifier[i] ):
literal[string]
keyword[global] identifier[cfg] , identifier[work] , identifier[initialized] , identifier[paths_repos] , identifier[type_long] , identifier[string_io]
keyword[if] identifier[initialized] :
keyword[return] { literal[string] : literal[int] }
identifier[cfg] [ literal[string] ]= identifier[__version__] . identifier[split] ( literal[string] )
keyword[try] :
identifier[x] = identifier[long]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[type_long] = identifier[int]
keyword[else] :
identifier[type_long] = identifier[long]
keyword[if] identifier[sys] . identifier[version_info] [ literal[int] ]> literal[int] :
keyword[import] identifier[io]
identifier[string_io] = identifier[io] . identifier[StringIO]
keyword[else] :
keyword[from] identifier[StringIO] keyword[import] identifier[StringIO]
identifier[string_io] = identifier[StringIO]
identifier[p] = literal[string]
keyword[import] identifier[inspect]
identifier[pxx] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[inspect] . identifier[getfile] ( identifier[inspect] . identifier[currentframe] ())))
identifier[px] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[pxx] )
identifier[py] = identifier[os] . identifier[path] . identifier[join] ( identifier[pxx] , identifier[cfg] [ literal[string] ])
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[py] ):
identifier[p] = identifier[py]
keyword[if] identifier[p] == literal[string] :
keyword[from] identifier[distutils] . identifier[sysconfig] keyword[import] identifier[get_python_lib]
identifier[px] = identifier[get_python_lib] ()
identifier[py] = identifier[os] . identifier[path] . identifier[join] ( identifier[px] , identifier[cfg] [ literal[string] ], identifier[cfg] [ literal[string] ])
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[py] ):
identifier[p] = identifier[py]
keyword[if] identifier[p] == literal[string] :
keyword[import] identifier[site]
keyword[for] identifier[px] keyword[in] identifier[site] . identifier[getsitepackages] ():
identifier[py] = identifier[os] . identifier[path] . identifier[join] ( identifier[px] , identifier[cfg] [ literal[string] ], identifier[cfg] [ literal[string] ])
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[py] ):
identifier[p] = identifier[py]
keyword[break]
identifier[s] = identifier[os] . identifier[environ] . identifier[get] ( identifier[cfg] [ literal[string] ], literal[string] ). identifier[strip] ()
keyword[if] identifier[s] != literal[string] :
identifier[work] [ literal[string] ]= identifier[os] . identifier[path] . identifier[realpath] ( identifier[s] )
keyword[for] identifier[px] keyword[in] identifier[cfg] [ literal[string] ]:
identifier[py] = identifier[os] . identifier[path] . identifier[join] ( identifier[work] [ literal[string] ], identifier[px] , identifier[cfg] [ literal[string] ])
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[py] ):
identifier[p] = identifier[py]
keyword[break]
keyword[elif] identifier[px] != literal[string] :
identifier[work] [ literal[string] ]= identifier[px]
keyword[if] identifier[p] == literal[string] :
keyword[return] { literal[string] : literal[int] , literal[string] : literal[string] }
identifier[work] [ literal[string] ]= identifier[p]
identifier[work] [ literal[string] ]= identifier[os] . identifier[path] . identifier[join] ( identifier[work] [ literal[string] ], identifier[cfg] [ literal[string] ], identifier[cfg] [ literal[string] ])
identifier[work] [ literal[string] ]= identifier[os] . identifier[path] . identifier[join] ( identifier[work] [ literal[string] ], identifier[cfg] [ literal[string] ])
identifier[work] [ literal[string] ]= identifier[os] . identifier[path] . identifier[join] ( identifier[work] [ literal[string] ], identifier[cfg] [ literal[string] ], identifier[cfg] [ literal[string] ], identifier[cfg] [ literal[string] ])
identifier[work] [ literal[string] ]= identifier[work] [ literal[string] ]
identifier[work] [ literal[string] ]= identifier[work] [ literal[string] ]
identifier[work] [ literal[string] ]= identifier[work] [ literal[string] ]
identifier[work] [ literal[string] ]= identifier[work] [ literal[string] ]
identifier[work] [ literal[string] ]= identifier[cfg] [ literal[string] ]
identifier[work] [ literal[string] ]= identifier[cfg] [ literal[string] ]
identifier[rps] = identifier[os] . identifier[environ] . identifier[get] ( identifier[cfg] [ literal[string] ], literal[string] ). identifier[strip] ()
keyword[if] identifier[rps] == literal[string] :
keyword[from] identifier[os] . identifier[path] keyword[import] identifier[expanduser]
identifier[home] = identifier[expanduser] ( literal[string] )
identifier[rps] = identifier[os] . identifier[path] . identifier[join] ( identifier[home] , identifier[cfg] [ literal[string] ])
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[rps] ):
identifier[os] . identifier[makedirs] ( identifier[rps] )
identifier[work] [ literal[string] ]= identifier[rps]
identifier[s] = identifier[os] . identifier[environ] . identifier[get] ( identifier[cfg] [ literal[string] ], literal[string] ). identifier[strip] ()
keyword[if] identifier[s] == literal[string] :
identifier[s] = identifier[os] . identifier[path] . identifier[join] ( identifier[rps] , identifier[cfg] [ literal[string] ])
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[s] ):
identifier[os] . identifier[makedirs] ( identifier[s] )
identifier[rq] = identifier[save_json_to_file] ({ literal[string] : identifier[os] . identifier[path] . identifier[join] ( identifier[s] , identifier[cfg] [ literal[string] ]),
literal[string] :{ literal[string] : identifier[cfg] [ literal[string] ],
literal[string] : identifier[cfg] [ literal[string] ],
literal[string] : identifier[cfg] [ literal[string] ],
literal[string] : identifier[cfg] [ literal[string] ]},
literal[string] : literal[string] })
keyword[if] identifier[rq] [ literal[string] ]> literal[int] : keyword[return] identifier[rq]
keyword[if] identifier[s] != literal[string] :
identifier[work] [ literal[string] ]= identifier[cfg] [ literal[string] ]
identifier[x] = identifier[os] . identifier[environ] . identifier[get] ( identifier[cfg] [ literal[string] ], literal[string] ). identifier[strip] ()
keyword[if] identifier[x] != literal[string] : identifier[work] [ literal[string] ]= identifier[x]
identifier[work] [ literal[string] ]= identifier[os] . identifier[path] . identifier[realpath] ( identifier[s] )
identifier[work] [ literal[string] ]= identifier[os] . identifier[path] . identifier[join] ( identifier[work] [ literal[string] ], identifier[cfg] [ literal[string] ], identifier[cfg] [ literal[string] ])
identifier[work] [ literal[string] ]= identifier[os] . identifier[path] . identifier[join] ( identifier[work] [ literal[string] ], identifier[cfg] [ literal[string] ])
identifier[work] [ literal[string] ]= identifier[os] . identifier[path] . identifier[join] ( identifier[work] [ literal[string] ], identifier[work] [ literal[string] ], identifier[cfg] [ literal[string] ], identifier[cfg] [ literal[string] ])
identifier[work] [ literal[string] ]= identifier[work] [ literal[string] ]
identifier[work] [ literal[string] ]= identifier[work] [ literal[string] ]
identifier[work] [ literal[string] ]= identifier[work] [ literal[string] ]
identifier[work] [ literal[string] ]= identifier[work] [ literal[string] ]
identifier[work] [ literal[string] ]= identifier[cfg] [ literal[string] ]
identifier[work] [ literal[string] ]= identifier[cfg] [ literal[string] ]
identifier[paths_repos] . identifier[append] ({ literal[string] : identifier[work] [ literal[string] ],
literal[string] : identifier[cfg] [ literal[string] ],
literal[string] : identifier[cfg] [ literal[string] ],
literal[string] : identifier[cfg] [ literal[string] ]})
identifier[paths_repos] . identifier[append] ({ literal[string] : identifier[work] [ literal[string] ],
literal[string] : identifier[cfg] [ literal[string] ],
literal[string] : identifier[cfg] [ literal[string] ],
literal[string] : identifier[cfg] [ literal[string] ]})
identifier[work] [ literal[string] ]= identifier[os] . identifier[path] . identifier[join] ( identifier[work] [ literal[string] ], identifier[cfg] [ literal[string] ])
identifier[work] [ literal[string] ]= identifier[os] . identifier[path] . identifier[join] ( identifier[work] [ literal[string] ], identifier[cfg] [ literal[string] ])
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[work] [ literal[string] ]) keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[work] [ literal[string] ]):
identifier[rx] = identifier[load_text_file] ({ literal[string] : identifier[os] . identifier[path] . identifier[join] ( identifier[work] [ literal[string] ], identifier[cfg] [ literal[string] ])})
keyword[if] identifier[rx] [ literal[string] ]> literal[int] : keyword[return] identifier[rx]
identifier[x1] = identifier[rx] [ literal[string] ]
identifier[rx] = identifier[load_text_file] ({ literal[string] : identifier[os] . identifier[path] . identifier[join] ( identifier[work] [ literal[string] ], identifier[cfg] [ literal[string] ])})
keyword[if] identifier[rx] [ literal[string] ]> literal[int] : keyword[return] identifier[rx]
identifier[x2] = identifier[rx] [ literal[string] ]
identifier[rx] = identifier[save_text_file] ({ literal[string] : identifier[work] [ literal[string] ], literal[string] : identifier[x2] })
keyword[if] identifier[rx] [ literal[string] ]> literal[int] : keyword[return] identifier[rx]
identifier[rx] = identifier[save_text_file] ({ literal[string] : identifier[work] [ literal[string] ], literal[string] : identifier[x1] })
keyword[if] identifier[rx] [ literal[string] ]> literal[int] : keyword[return] identifier[rx]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[work] [ literal[string] ]):
identifier[rx] = identifier[add] ({ literal[string] : identifier[cfg] [ literal[string] ],
literal[string] : identifier[cfg] [ literal[string] ],
literal[string] : identifier[work] [ literal[string] ]})
keyword[if] identifier[rx] [ literal[string] ]> literal[int] :
keyword[return] { literal[string] : identifier[rx] [ literal[string] ],
literal[string] : literal[string] }
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[work] [ literal[string] ]):
identifier[r] = identifier[load_json_file] ({ literal[string] : identifier[work] [ literal[string] ]})
keyword[if] identifier[r] [ literal[string] ]> literal[int] : keyword[return] identifier[r]
identifier[cfg1] = identifier[r] [ literal[string] ]
identifier[r] = identifier[merge_dicts] ({ literal[string] : identifier[cfg] , literal[string] : identifier[cfg1] })
keyword[if] identifier[r] [ literal[string] ]> literal[int] : keyword[return] identifier[r]
identifier[initialized] = keyword[True]
keyword[return] { literal[string] : literal[int] } | def init(i): # pragma: no cover
'\n Input: {}\n\n Output: {\n return - return code = 0, if successful\n > 0, if error\n (error) - error text if return > 0\n }\n '
global cfg, work, initialized, paths_repos, type_long, string_io
if initialized:
return {'return': 0} # depends on [control=['if'], data=[]]
# Split version
cfg['version'] = __version__.split('.')
# Default URL. FIXME: should be formed from wfe_host and wfe_port when they are known.
# cfg['wfe_url_prefix'] = 'http://%s:%s/web?' % (cfg['default_host'], cfg['default_port'])
# Check long/int types
try:
x = long # depends on [control=['try'], data=[]]
except Exception as e:
type_long = int # depends on [control=['except'], data=[]]
else:
type_long = long
# Import StringIO
if sys.version_info[0] > 2:
import io
string_io = io.StringIO # depends on [control=['if'], data=[]]
else:
from StringIO import StringIO
string_io = StringIO
# Check where are repos (to keep compatibility with past CK < V1.5)
p = ''
import inspect
pxx = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
px = os.path.dirname(pxx)
py = os.path.join(pxx, cfg['subdir_default_repo'])
if os.path.isdir(py):
p = py # depends on [control=['if'], data=[]]
if p == '':
from distutils.sysconfig import get_python_lib
px = get_python_lib()
py = os.path.join(px, cfg['kernel_dir'], cfg['subdir_default_repo'])
if os.path.isdir(py):
p = py # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['p']]
if p == '':
import site
for px in site.getsitepackages():
py = os.path.join(px, cfg['kernel_dir'], cfg['subdir_default_repo'])
if os.path.isdir(py):
p = py
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['px']] # depends on [control=['if'], data=['p']]
# Check CK_ROOT environment variable
s = os.environ.get(cfg['env_key_root'], '').strip()
if s != '':
work['env_root'] = os.path.realpath(s)
for px in cfg['kernel_dirs']:
py = os.path.join(work['env_root'], px, cfg['subdir_default_repo'])
if os.path.isdir(py):
p = py
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['px']] # depends on [control=['if'], data=['s']]
elif px != '':
work['env_root'] = px # depends on [control=['if'], data=['px']]
if p == '':
return {'return': 1, 'error': "Internal CK error (can't find default repo) - please report to authors"} # depends on [control=['if'], data=[]]
# Check default repo
work['dir_default_repo'] = p
work['dir_default_repo_path'] = os.path.join(work['dir_default_repo'], cfg['module_repo_name'], cfg['repo_name_default'])
work['dir_default_kernel'] = os.path.join(work['dir_default_repo'], cfg['subdir_kernel'])
work['dir_default_cfg'] = os.path.join(work['dir_default_kernel'], cfg['subdir_kernel_default'], cfg['subdir_ck_ext'], cfg['file_meta'])
work['dir_work_repo'] = work['dir_default_repo']
work['dir_work_repo_path'] = work['dir_default_repo_path']
work['dir_work_kernel'] = work['dir_default_kernel']
work['dir_work_cfg'] = work['dir_default_cfg']
work['repo_name_work'] = cfg['repo_name_default']
work['repo_uid_work'] = cfg['repo_uid_default']
# Check external repos
rps = os.environ.get(cfg['env_key_repos'], '').strip()
if rps == '':
# Get home user directory
from os.path import expanduser
home = expanduser('~')
# In the original version, if path to repos was not defined, I was using CK path,
# however, when installed as root, it will fail
# rps=os.path.join(work['env_root'],cfg['subdir_default_repos'])
# hence I changed to <user home dir>/CK
rps = os.path.join(home, cfg['user_home_dir_ext'])
if not os.path.isdir(rps):
os.makedirs(rps) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['rps']]
work['dir_repos'] = rps
# Check CK_LOCAL_REPO environment variable - if doesn't exist, create in user space
s = os.environ.get(cfg['env_key_local_repo'], '').strip()
if s == '':
# Set up local default repository
s = os.path.join(rps, cfg['repo_name_local'])
if not os.path.isdir(s):
os.makedirs(s)
# Create description
rq = save_json_to_file({'json_file': os.path.join(s, cfg['repo_file']), 'dict': {'data_alias': cfg['repo_name_local'], 'data_uoa': cfg['repo_name_local'], 'data_name': cfg['repo_name_local'], 'data_uid': cfg['repo_uid_local']}, 'sort_keys': 'yes'})
if rq['return'] > 0:
return rq # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['s']]
if s != '':
work['local_kernel_uoa'] = cfg['subdir_kernel_default']
x = os.environ.get(cfg['env_key_local_kernel_uoa'], '').strip()
if x != '':
work['local_kernel_uoa'] = x # depends on [control=['if'], data=['x']]
work['dir_local_repo'] = os.path.realpath(s)
work['dir_local_repo_path'] = os.path.join(work['dir_local_repo'], cfg['module_repo_name'], cfg['repo_name_local'])
work['dir_local_kernel'] = os.path.join(work['dir_local_repo'], cfg['subdir_kernel'])
work['dir_local_cfg'] = os.path.join(work['dir_local_kernel'], work['local_kernel_uoa'], cfg['subdir_ck_ext'], cfg['file_meta'])
# Update work repo!
work['dir_work_repo'] = work['dir_local_repo']
work['dir_work_repo_path'] = work['dir_local_repo_path']
work['dir_work_kernel'] = work['dir_local_kernel']
work['dir_work_cfg'] = work['dir_local_cfg']
work['repo_name_work'] = cfg['repo_name_local']
work['repo_uid_work'] = cfg['repo_uid_local']
paths_repos.append({'path': work['dir_local_repo'], 'repo_uoa': cfg['repo_name_local'], 'repo_uid': cfg['repo_uid_local'], 'repo_alias': cfg['repo_name_local']}) # depends on [control=['if'], data=['s']]
paths_repos.append({'path': work['dir_default_repo'], 'repo_uoa': cfg['repo_name_default'], 'repo_uid': cfg['repo_uid_default'], 'repo_alias': cfg['repo_name_default']})
# Prepare repo cache
work['dir_cache_repo_uoa'] = os.path.join(work['dir_work_repo'], cfg['file_cache_repo_uoa'])
work['dir_cache_repo_info'] = os.path.join(work['dir_work_repo'], cfg['file_cache_repo_info'])
# Check if first time and then copy local cache files (with remote-ck)
if not os.path.isfile(work['dir_cache_repo_uoa']) and (not os.path.isfile(work['dir_cache_repo_info'])):
rx = load_text_file({'text_file': os.path.join(work['dir_default_repo'], cfg['file_cache_repo_uoa'])})
if rx['return'] > 0:
return rx # depends on [control=['if'], data=[]]
x1 = rx['string']
rx = load_text_file({'text_file': os.path.join(work['dir_default_repo'], cfg['file_cache_repo_info'])})
if rx['return'] > 0:
return rx # depends on [control=['if'], data=[]]
x2 = rx['string']
rx = save_text_file({'text_file': work['dir_cache_repo_info'], 'string': x2})
if rx['return'] > 0:
return rx # depends on [control=['if'], data=[]]
rx = save_text_file({'text_file': work['dir_cache_repo_uoa'], 'string': x1})
if rx['return'] > 0:
return rx # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Check if local configuration exists, and if not, create it
if not os.path.isfile(work['dir_local_cfg']):
# Create empty local configuration
rx = add({'repo_uoa': cfg['repo_name_local'], 'module_uoa': cfg['subdir_kernel'], 'data_uoa': work['local_kernel_uoa']})
if rx['return'] > 0:
return {'return': rx['return'], 'error': "can't create local configuration entry"} # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Read kernel configuration (if exists)
if os.path.isfile(work['dir_work_cfg']):
r = load_json_file({'json_file': work['dir_work_cfg']})
if r['return'] > 0:
return r # depends on [control=['if'], data=[]]
cfg1 = r['dict']
# Update cfg
r = merge_dicts({'dict1': cfg, 'dict2': cfg1})
if r['return'] > 0:
return r # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
initialized = True
return {'return': 0} |
def get_draft_version(self, expand=[]):
"""
Get the current draft version of this layer.
:raises NotFound: if there is no draft version.
"""
target_url = self._client.get_url('VERSION', 'GET', 'draft', {'layer_id': self.id})
return self._manager._get(target_url, expand=expand) | def function[get_draft_version, parameter[self, expand]]:
constant[
Get the current draft version of this layer.
:raises NotFound: if there is no draft version.
]
variable[target_url] assign[=] call[name[self]._client.get_url, parameter[constant[VERSION], constant[GET], constant[draft], dictionary[[<ast.Constant object at 0x7da1b1023910>], [<ast.Attribute object at 0x7da1b10219f0>]]]]
return[call[name[self]._manager._get, parameter[name[target_url]]]] | keyword[def] identifier[get_draft_version] ( identifier[self] , identifier[expand] =[]):
literal[string]
identifier[target_url] = identifier[self] . identifier[_client] . identifier[get_url] ( literal[string] , literal[string] , literal[string] ,{ literal[string] : identifier[self] . identifier[id] })
keyword[return] identifier[self] . identifier[_manager] . identifier[_get] ( identifier[target_url] , identifier[expand] = identifier[expand] ) | def get_draft_version(self, expand=[]):
"""
Get the current draft version of this layer.
:raises NotFound: if there is no draft version.
"""
target_url = self._client.get_url('VERSION', 'GET', 'draft', {'layer_id': self.id})
return self._manager._get(target_url, expand=expand) |
def _feature_most_population(self, results):
"""
Find the placename with the largest population and return its country.
More population is a rough measure of importance.
Paramaters
----------
results: dict
output of `query_geonames`
Returns
-------
most_pop: str
ISO code of country of place with largest population,
or empty string if none
"""
try:
populations = [i['population'] for i in results['hits']['hits']]
most_pop = results['hits']['hits'][np.array(populations).astype("int").argmax()]
return most_pop['country_code3']
except Exception as e:
return "" | def function[_feature_most_population, parameter[self, results]]:
constant[
Find the placename with the largest population and return its country.
More population is a rough measure of importance.
Paramaters
----------
results: dict
output of `query_geonames`
Returns
-------
most_pop: str
ISO code of country of place with largest population,
or empty string if none
]
<ast.Try object at 0x7da2044c15d0> | keyword[def] identifier[_feature_most_population] ( identifier[self] , identifier[results] ):
literal[string]
keyword[try] :
identifier[populations] =[ identifier[i] [ literal[string] ] keyword[for] identifier[i] keyword[in] identifier[results] [ literal[string] ][ literal[string] ]]
identifier[most_pop] = identifier[results] [ literal[string] ][ literal[string] ][ identifier[np] . identifier[array] ( identifier[populations] ). identifier[astype] ( literal[string] ). identifier[argmax] ()]
keyword[return] identifier[most_pop] [ literal[string] ]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[return] literal[string] | def _feature_most_population(self, results):
"""
Find the placename with the largest population and return its country.
More population is a rough measure of importance.
Paramaters
----------
results: dict
output of `query_geonames`
Returns
-------
most_pop: str
ISO code of country of place with largest population,
or empty string if none
"""
try:
populations = [i['population'] for i in results['hits']['hits']]
most_pop = results['hits']['hits'][np.array(populations).astype('int').argmax()]
return most_pop['country_code3'] # depends on [control=['try'], data=[]]
except Exception as e:
return '' # depends on [control=['except'], data=[]] |
def build_index_sortmerna(ref_fp, working_dir):
"""Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases
"""
logger = logging.getLogger(__name__)
logger.info('build_index_sortmerna files %s to'
' dir %s' % (ref_fp, working_dir))
all_db = []
for db in ref_fp:
fasta_dir, fasta_filename = split(db)
index_basename = splitext(fasta_filename)[0]
db_output = join(working_dir, index_basename)
logger.debug('processing file %s into location %s' % (db, db_output))
params = ['indexdb_rna', '--ref', '%s,%s' %
(db, db_output), '--tmpdir', working_dir]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running indexdb_rna on file %s to dir %s. '
'database not indexed' % (db, db_output))
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
logger.critical('execution halted')
raise RuntimeError('Cannot index database file %s' % db)
logger.debug('file %s indexed' % db)
all_db.append(db_output)
return all_db | def function[build_index_sortmerna, parameter[ref_fp, working_dir]]:
constant[Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases
]
variable[logger] assign[=] call[name[logging].getLogger, parameter[name[__name__]]]
call[name[logger].info, parameter[binary_operation[constant[build_index_sortmerna files %s to dir %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e956b00>, <ast.Name object at 0x7da20e9571c0>]]]]]
variable[all_db] assign[=] list[[]]
for taget[name[db]] in starred[name[ref_fp]] begin[:]
<ast.Tuple object at 0x7da20e9565c0> assign[=] call[name[split], parameter[name[db]]]
variable[index_basename] assign[=] call[call[name[splitext], parameter[name[fasta_filename]]]][constant[0]]
variable[db_output] assign[=] call[name[join], parameter[name[working_dir], name[index_basename]]]
call[name[logger].debug, parameter[binary_operation[constant[processing file %s into location %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e9578e0>, <ast.Name object at 0x7da20e9569b0>]]]]]
variable[params] assign[=] list[[<ast.Constant object at 0x7da20e957d00>, <ast.Constant object at 0x7da20e956d70>, <ast.BinOp object at 0x7da20e957880>, <ast.Constant object at 0x7da20e955540>, <ast.Name object at 0x7da20e954610>]]
<ast.Tuple object at 0x7da20e957dc0> assign[=] call[name[_system_call], parameter[name[params]]]
if <ast.UnaryOp object at 0x7da20e9567a0> begin[:]
call[name[logger].error, parameter[binary_operation[constant[Problem running indexdb_rna on file %s to dir %s. database not indexed] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f58f190>, <ast.Name object at 0x7da18f58e770>]]]]]
call[name[logger].debug, parameter[binary_operation[constant[stdout: %s] <ast.Mod object at 0x7da2590d6920> name[sout]]]]
call[name[logger].debug, parameter[binary_operation[constant[stderr: %s] <ast.Mod object at 0x7da2590d6920> name[serr]]]]
call[name[logger].critical, parameter[constant[execution halted]]]
<ast.Raise object at 0x7da20c992c80>
call[name[logger].debug, parameter[binary_operation[constant[file %s indexed] <ast.Mod object at 0x7da2590d6920> name[db]]]]
call[name[all_db].append, parameter[name[db_output]]]
return[name[all_db]] | keyword[def] identifier[build_index_sortmerna] ( identifier[ref_fp] , identifier[working_dir] ):
literal[string]
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] )
identifier[logger] . identifier[info] ( literal[string]
literal[string] %( identifier[ref_fp] , identifier[working_dir] ))
identifier[all_db] =[]
keyword[for] identifier[db] keyword[in] identifier[ref_fp] :
identifier[fasta_dir] , identifier[fasta_filename] = identifier[split] ( identifier[db] )
identifier[index_basename] = identifier[splitext] ( identifier[fasta_filename] )[ literal[int] ]
identifier[db_output] = identifier[join] ( identifier[working_dir] , identifier[index_basename] )
identifier[logger] . identifier[debug] ( literal[string] %( identifier[db] , identifier[db_output] ))
identifier[params] =[ literal[string] , literal[string] , literal[string] %
( identifier[db] , identifier[db_output] ), literal[string] , identifier[working_dir] ]
identifier[sout] , identifier[serr] , identifier[res] = identifier[_system_call] ( identifier[params] )
keyword[if] keyword[not] identifier[res] == literal[int] :
identifier[logger] . identifier[error] ( literal[string]
literal[string] %( identifier[db] , identifier[db_output] ))
identifier[logger] . identifier[debug] ( literal[string] % identifier[sout] )
identifier[logger] . identifier[debug] ( literal[string] % identifier[serr] )
identifier[logger] . identifier[critical] ( literal[string] )
keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[db] )
identifier[logger] . identifier[debug] ( literal[string] % identifier[db] )
identifier[all_db] . identifier[append] ( identifier[db_output] )
keyword[return] identifier[all_db] | def build_index_sortmerna(ref_fp, working_dir):
"""Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases
"""
logger = logging.getLogger(__name__)
logger.info('build_index_sortmerna files %s to dir %s' % (ref_fp, working_dir))
all_db = []
for db in ref_fp:
(fasta_dir, fasta_filename) = split(db)
index_basename = splitext(fasta_filename)[0]
db_output = join(working_dir, index_basename)
logger.debug('processing file %s into location %s' % (db, db_output))
params = ['indexdb_rna', '--ref', '%s,%s' % (db, db_output), '--tmpdir', working_dir]
(sout, serr, res) = _system_call(params)
if not res == 0:
logger.error('Problem running indexdb_rna on file %s to dir %s. database not indexed' % (db, db_output))
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
logger.critical('execution halted')
raise RuntimeError('Cannot index database file %s' % db) # depends on [control=['if'], data=[]]
logger.debug('file %s indexed' % db)
all_db.append(db_output) # depends on [control=['for'], data=['db']]
return all_db |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: UsageContext for this UsageInstance
:rtype: twilio.rest.preview.wireless.sim.usage.UsageContext
"""
if self._context is None:
self._context = UsageContext(self._version, sim_sid=self._solution['sim_sid'], )
return self._context | def function[_proxy, parameter[self]]:
constant[
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: UsageContext for this UsageInstance
:rtype: twilio.rest.preview.wireless.sim.usage.UsageContext
]
if compare[name[self]._context is constant[None]] begin[:]
name[self]._context assign[=] call[name[UsageContext], parameter[name[self]._version]]
return[name[self]._context] | keyword[def] identifier[_proxy] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_context] keyword[is] keyword[None] :
identifier[self] . identifier[_context] = identifier[UsageContext] ( identifier[self] . identifier[_version] , identifier[sim_sid] = identifier[self] . identifier[_solution] [ literal[string] ],)
keyword[return] identifier[self] . identifier[_context] | def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: UsageContext for this UsageInstance
:rtype: twilio.rest.preview.wireless.sim.usage.UsageContext
"""
if self._context is None:
self._context = UsageContext(self._version, sim_sid=self._solution['sim_sid']) # depends on [control=['if'], data=[]]
return self._context |
def create_connector_resource(name, server=None, **kwargs):
'''
Create a connection resource
'''
defaults = {
'description': '',
'enabled': True,
'id': name,
'poolName': '',
'objectType': 'user',
'target': 'server'
}
# Data = defaults + merge kwargs + poolname
data = defaults
data.update(kwargs)
if not data['poolName']:
raise CommandExecutionError('No pool name!')
# Fix for lowercase vs camelCase naming differences
for key, value in list(data.items()):
del data[key]
data[key.lower()] = value
return _create_element(name, 'resources/connector-resource', data, server) | def function[create_connector_resource, parameter[name, server]]:
constant[
Create a connection resource
]
variable[defaults] assign[=] dictionary[[<ast.Constant object at 0x7da1b1fa7850>, <ast.Constant object at 0x7da1b1fa6c80>, <ast.Constant object at 0x7da1b1fa6b00>, <ast.Constant object at 0x7da1b1fa7340>, <ast.Constant object at 0x7da1b1fa6f20>, <ast.Constant object at 0x7da1b1fa72b0>], [<ast.Constant object at 0x7da1b1fa7af0>, <ast.Constant object at 0x7da1b1fa6d70>, <ast.Name object at 0x7da1b1fa7370>, <ast.Constant object at 0x7da1b1fa6950>, <ast.Constant object at 0x7da1b1fa6560>, <ast.Constant object at 0x7da1b1fa65c0>]]
variable[data] assign[=] name[defaults]
call[name[data].update, parameter[name[kwargs]]]
if <ast.UnaryOp object at 0x7da1b1fa7670> begin[:]
<ast.Raise object at 0x7da1b1fa73d0>
for taget[tuple[[<ast.Name object at 0x7da1b1fa7730>, <ast.Name object at 0x7da1b1fa77c0>]]] in starred[call[name[list], parameter[call[name[data].items, parameter[]]]]] begin[:]
<ast.Delete object at 0x7da1b1c36440>
call[name[data]][call[name[key].lower, parameter[]]] assign[=] name[value]
return[call[name[_create_element], parameter[name[name], constant[resources/connector-resource], name[data], name[server]]]] | keyword[def] identifier[create_connector_resource] ( identifier[name] , identifier[server] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[defaults] ={
literal[string] : literal[string] ,
literal[string] : keyword[True] ,
literal[string] : identifier[name] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}
identifier[data] = identifier[defaults]
identifier[data] . identifier[update] ( identifier[kwargs] )
keyword[if] keyword[not] identifier[data] [ literal[string] ]:
keyword[raise] identifier[CommandExecutionError] ( literal[string] )
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[list] ( identifier[data] . identifier[items] ()):
keyword[del] identifier[data] [ identifier[key] ]
identifier[data] [ identifier[key] . identifier[lower] ()]= identifier[value]
keyword[return] identifier[_create_element] ( identifier[name] , literal[string] , identifier[data] , identifier[server] ) | def create_connector_resource(name, server=None, **kwargs):
"""
Create a connection resource
"""
defaults = {'description': '', 'enabled': True, 'id': name, 'poolName': '', 'objectType': 'user', 'target': 'server'}
# Data = defaults + merge kwargs + poolname
data = defaults
data.update(kwargs)
if not data['poolName']:
raise CommandExecutionError('No pool name!') # depends on [control=['if'], data=[]]
# Fix for lowercase vs camelCase naming differences
for (key, value) in list(data.items()):
del data[key]
data[key.lower()] = value # depends on [control=['for'], data=[]]
return _create_element(name, 'resources/connector-resource', data, server) |
def nlms(u, d, M, step, eps=0.001, leak=0, initCoeffs=None, N=None,
returnCoeffs=False):
"""
Perform normalized least-mean-squares (NLMS) adaptive filtering on u to
minimize error given by e=d-y, where y is the output of the adaptive
filter.
Parameters
----------
u : array-like
One-dimensional filter input.
d : array-like
One-dimensional desired signal, i.e., the output of the unknown FIR
system which the adaptive filter should identify. Must have length >=
len(u), or N+M-1 if number of iterations are limited (via the N
parameter).
M : int
Desired number of filter taps (desired filter order + 1), must be
non-negative.
step : float
Step size of the algorithm, must be non-negative.
Optional Parameters
-------------------
eps : float
Regularization factor to avoid numerical issues when power of input
is close to zero. Defaults to 0.001. Must be non-negative.
leak : float
Leakage factor, must be equal to or greater than zero and smaller than
one. When greater than zero a leaky LMS filter is used. Defaults to 0,
i.e., no leakage.
initCoeffs : array-like
Initial filter coefficients to use. Should match desired number of
filter taps, defaults to zeros.
N : int
Number of iterations to run. Must be less than or equal to len(u)-M+1.
Defaults to len(u)-M+1.
returnCoeffs : boolean
If true, will return all filter coefficients for every iteration in an
N x M matrix. Does not include the initial coefficients. If false, only
the latest coefficients in a vector of length M is returned. Defaults
to false.
Returns
-------
y : numpy.array
Output values of LMS filter, array of length N.
e : numpy.array
Error signal, i.e, d-y. Array of length N.
w : numpy.array
Final filter coefficients in array of length M if returnCoeffs is
False. NxM array containing all filter coefficients for all iterations
otherwise.
Raises
------
TypeError
If number of filter taps M is not type integer, number of iterations N
is not type integer, or leakage leak is not type float/int.
ValueError
If number of iterations N is greater than len(u)-M, number of filter
taps M is negative, or if step-size or leakage is outside specified
range.
Minimal Working Example
-----------------------
>>> import numpy as np
>>>
>>> np.random.seed(1337)
>>> ulen = 2000
>>> coeff = np.concatenate(([4], np.zeros(10), [-11], np.zeros(7), [0.7]))
>>> u = np.random.randn(ulen)
>>> d = np.convolve(u, coeff)
>>>
>>> M = 20 # No. of taps
>>> step = 1 # Step size
>>> y, e, w = nlms(u, d, M, step)
>>> print np.allclose(w, coeff)
True
Extended Example
----------------
>>> import numpy as np
>>>
>>> np.random.seed(1337)
>>> N = 1000
>>> coeffs = np.concatenate(([13], np.zeros(9), [-3], np.zeros(8), [-.2]))
>>> u = np.random.randn(20000) # Note len(u) >> N but we limit iterations
>>> d = np.convolve(u, coeffs)
>>>
>>> M = 20 # No. of taps
>>> step = 1 # Step size
>>> y, e, w = nlms(u, d, M, step, N=N, returnCoeffs=True)
>>> y.shape == (N,)
True
>>> e.shape == (N,)
True
>>> w.shape == (N, M)
True
>>> # Calculate mean square weight error
>>> mswe = np.mean((w - coeffs)**2, axis=1)
>>> # Should never increase so diff should above be > 0
>>> diff = np.diff(mswe)
>>> (diff <= 1e-10).all()
True
"""
# Check epsilon
_pchk.checkRegFactor(eps)
# Num taps check
_pchk.checkNumTaps(M)
# Max iteration check
if N is None:
N = len(u)-M+1
_pchk.checkIter(N, len(u)-M+1)
# Check len(d)
_pchk.checkDesiredSignal(d, N, M)
# Step check
_pchk.checkStep(step)
# Leakage check
_pchk.checkLeakage(leak)
# Init. coeffs check
if initCoeffs is None:
initCoeffs = np.zeros(M)
else:
_pchk.checkInitCoeffs(initCoeffs, M)
# Initialization
y = np.zeros(N) # Filter output
e = np.zeros(N) # Error signal
w = initCoeffs # Initial filter coeffs
leakstep = (1 - step*leak)
if returnCoeffs:
W = np.zeros((N, M)) # Matrix to hold coeffs for each iteration
# Perform filtering
for n in xrange(N):
x = np.flipud(u[n:n+M]) # Slice to get view of M latest datapoints
y[n] = np.dot(x, w)
e[n] = d[n+M-1] - y[n]
normFactor = 1./(np.dot(x, x) + eps)
w = leakstep * w + step * normFactor * x * e[n]
y[n] = np.dot(x, w)
if returnCoeffs:
W[n] = w
if returnCoeffs:
w = W
return y, e, w | def function[nlms, parameter[u, d, M, step, eps, leak, initCoeffs, N, returnCoeffs]]:
constant[
Perform normalized least-mean-squares (NLMS) adaptive filtering on u to
minimize error given by e=d-y, where y is the output of the adaptive
filter.
Parameters
----------
u : array-like
One-dimensional filter input.
d : array-like
One-dimensional desired signal, i.e., the output of the unknown FIR
system which the adaptive filter should identify. Must have length >=
len(u), or N+M-1 if number of iterations are limited (via the N
parameter).
M : int
Desired number of filter taps (desired filter order + 1), must be
non-negative.
step : float
Step size of the algorithm, must be non-negative.
Optional Parameters
-------------------
eps : float
Regularization factor to avoid numerical issues when power of input
is close to zero. Defaults to 0.001. Must be non-negative.
leak : float
Leakage factor, must be equal to or greater than zero and smaller than
one. When greater than zero a leaky LMS filter is used. Defaults to 0,
i.e., no leakage.
initCoeffs : array-like
Initial filter coefficients to use. Should match desired number of
filter taps, defaults to zeros.
N : int
Number of iterations to run. Must be less than or equal to len(u)-M+1.
Defaults to len(u)-M+1.
returnCoeffs : boolean
If true, will return all filter coefficients for every iteration in an
N x M matrix. Does not include the initial coefficients. If false, only
the latest coefficients in a vector of length M is returned. Defaults
to false.
Returns
-------
y : numpy.array
Output values of LMS filter, array of length N.
e : numpy.array
Error signal, i.e, d-y. Array of length N.
w : numpy.array
Final filter coefficients in array of length M if returnCoeffs is
False. NxM array containing all filter coefficients for all iterations
otherwise.
Raises
------
TypeError
If number of filter taps M is not type integer, number of iterations N
is not type integer, or leakage leak is not type float/int.
ValueError
If number of iterations N is greater than len(u)-M, number of filter
taps M is negative, or if step-size or leakage is outside specified
range.
Minimal Working Example
-----------------------
>>> import numpy as np
>>>
>>> np.random.seed(1337)
>>> ulen = 2000
>>> coeff = np.concatenate(([4], np.zeros(10), [-11], np.zeros(7), [0.7]))
>>> u = np.random.randn(ulen)
>>> d = np.convolve(u, coeff)
>>>
>>> M = 20 # No. of taps
>>> step = 1 # Step size
>>> y, e, w = nlms(u, d, M, step)
>>> print np.allclose(w, coeff)
True
Extended Example
----------------
>>> import numpy as np
>>>
>>> np.random.seed(1337)
>>> N = 1000
>>> coeffs = np.concatenate(([13], np.zeros(9), [-3], np.zeros(8), [-.2]))
>>> u = np.random.randn(20000) # Note len(u) >> N but we limit iterations
>>> d = np.convolve(u, coeffs)
>>>
>>> M = 20 # No. of taps
>>> step = 1 # Step size
>>> y, e, w = nlms(u, d, M, step, N=N, returnCoeffs=True)
>>> y.shape == (N,)
True
>>> e.shape == (N,)
True
>>> w.shape == (N, M)
True
>>> # Calculate mean square weight error
>>> mswe = np.mean((w - coeffs)**2, axis=1)
>>> # Should never increase so diff should above be > 0
>>> diff = np.diff(mswe)
>>> (diff <= 1e-10).all()
True
]
call[name[_pchk].checkRegFactor, parameter[name[eps]]]
call[name[_pchk].checkNumTaps, parameter[name[M]]]
if compare[name[N] is constant[None]] begin[:]
variable[N] assign[=] binary_operation[binary_operation[call[name[len], parameter[name[u]]] - name[M]] + constant[1]]
call[name[_pchk].checkIter, parameter[name[N], binary_operation[binary_operation[call[name[len], parameter[name[u]]] - name[M]] + constant[1]]]]
call[name[_pchk].checkDesiredSignal, parameter[name[d], name[N], name[M]]]
call[name[_pchk].checkStep, parameter[name[step]]]
call[name[_pchk].checkLeakage, parameter[name[leak]]]
if compare[name[initCoeffs] is constant[None]] begin[:]
variable[initCoeffs] assign[=] call[name[np].zeros, parameter[name[M]]]
variable[y] assign[=] call[name[np].zeros, parameter[name[N]]]
variable[e] assign[=] call[name[np].zeros, parameter[name[N]]]
variable[w] assign[=] name[initCoeffs]
variable[leakstep] assign[=] binary_operation[constant[1] - binary_operation[name[step] * name[leak]]]
if name[returnCoeffs] begin[:]
variable[W] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b2347cd0>, <ast.Name object at 0x7da18fe93c10>]]]]
for taget[name[n]] in starred[call[name[xrange], parameter[name[N]]]] begin[:]
variable[x] assign[=] call[name[np].flipud, parameter[call[name[u]][<ast.Slice object at 0x7da18fe90c40>]]]
call[name[y]][name[n]] assign[=] call[name[np].dot, parameter[name[x], name[w]]]
call[name[e]][name[n]] assign[=] binary_operation[call[name[d]][binary_operation[binary_operation[name[n] + name[M]] - constant[1]]] - call[name[y]][name[n]]]
variable[normFactor] assign[=] binary_operation[constant[1.0] / binary_operation[call[name[np].dot, parameter[name[x], name[x]]] + name[eps]]]
variable[w] assign[=] binary_operation[binary_operation[name[leakstep] * name[w]] + binary_operation[binary_operation[binary_operation[name[step] * name[normFactor]] * name[x]] * call[name[e]][name[n]]]]
call[name[y]][name[n]] assign[=] call[name[np].dot, parameter[name[x], name[w]]]
if name[returnCoeffs] begin[:]
call[name[W]][name[n]] assign[=] name[w]
if name[returnCoeffs] begin[:]
variable[w] assign[=] name[W]
return[tuple[[<ast.Name object at 0x7da18fe92320>, <ast.Name object at 0x7da18fe936a0>, <ast.Name object at 0x7da18fe92800>]]] | keyword[def] identifier[nlms] ( identifier[u] , identifier[d] , identifier[M] , identifier[step] , identifier[eps] = literal[int] , identifier[leak] = literal[int] , identifier[initCoeffs] = keyword[None] , identifier[N] = keyword[None] ,
identifier[returnCoeffs] = keyword[False] ):
literal[string]
identifier[_pchk] . identifier[checkRegFactor] ( identifier[eps] )
identifier[_pchk] . identifier[checkNumTaps] ( identifier[M] )
keyword[if] identifier[N] keyword[is] keyword[None] :
identifier[N] = identifier[len] ( identifier[u] )- identifier[M] + literal[int]
identifier[_pchk] . identifier[checkIter] ( identifier[N] , identifier[len] ( identifier[u] )- identifier[M] + literal[int] )
identifier[_pchk] . identifier[checkDesiredSignal] ( identifier[d] , identifier[N] , identifier[M] )
identifier[_pchk] . identifier[checkStep] ( identifier[step] )
identifier[_pchk] . identifier[checkLeakage] ( identifier[leak] )
keyword[if] identifier[initCoeffs] keyword[is] keyword[None] :
identifier[initCoeffs] = identifier[np] . identifier[zeros] ( identifier[M] )
keyword[else] :
identifier[_pchk] . identifier[checkInitCoeffs] ( identifier[initCoeffs] , identifier[M] )
identifier[y] = identifier[np] . identifier[zeros] ( identifier[N] )
identifier[e] = identifier[np] . identifier[zeros] ( identifier[N] )
identifier[w] = identifier[initCoeffs]
identifier[leakstep] =( literal[int] - identifier[step] * identifier[leak] )
keyword[if] identifier[returnCoeffs] :
identifier[W] = identifier[np] . identifier[zeros] (( identifier[N] , identifier[M] ))
keyword[for] identifier[n] keyword[in] identifier[xrange] ( identifier[N] ):
identifier[x] = identifier[np] . identifier[flipud] ( identifier[u] [ identifier[n] : identifier[n] + identifier[M] ])
identifier[y] [ identifier[n] ]= identifier[np] . identifier[dot] ( identifier[x] , identifier[w] )
identifier[e] [ identifier[n] ]= identifier[d] [ identifier[n] + identifier[M] - literal[int] ]- identifier[y] [ identifier[n] ]
identifier[normFactor] = literal[int] /( identifier[np] . identifier[dot] ( identifier[x] , identifier[x] )+ identifier[eps] )
identifier[w] = identifier[leakstep] * identifier[w] + identifier[step] * identifier[normFactor] * identifier[x] * identifier[e] [ identifier[n] ]
identifier[y] [ identifier[n] ]= identifier[np] . identifier[dot] ( identifier[x] , identifier[w] )
keyword[if] identifier[returnCoeffs] :
identifier[W] [ identifier[n] ]= identifier[w]
keyword[if] identifier[returnCoeffs] :
identifier[w] = identifier[W]
keyword[return] identifier[y] , identifier[e] , identifier[w] | def nlms(u, d, M, step, eps=0.001, leak=0, initCoeffs=None, N=None, returnCoeffs=False):
"""
Perform normalized least-mean-squares (NLMS) adaptive filtering on u to
minimize error given by e=d-y, where y is the output of the adaptive
filter.
Parameters
----------
u : array-like
One-dimensional filter input.
d : array-like
One-dimensional desired signal, i.e., the output of the unknown FIR
system which the adaptive filter should identify. Must have length >=
len(u), or N+M-1 if number of iterations are limited (via the N
parameter).
M : int
Desired number of filter taps (desired filter order + 1), must be
non-negative.
step : float
Step size of the algorithm, must be non-negative.
Optional Parameters
-------------------
eps : float
Regularization factor to avoid numerical issues when power of input
is close to zero. Defaults to 0.001. Must be non-negative.
leak : float
Leakage factor, must be equal to or greater than zero and smaller than
one. When greater than zero a leaky LMS filter is used. Defaults to 0,
i.e., no leakage.
initCoeffs : array-like
Initial filter coefficients to use. Should match desired number of
filter taps, defaults to zeros.
N : int
Number of iterations to run. Must be less than or equal to len(u)-M+1.
Defaults to len(u)-M+1.
returnCoeffs : boolean
If true, will return all filter coefficients for every iteration in an
N x M matrix. Does not include the initial coefficients. If false, only
the latest coefficients in a vector of length M is returned. Defaults
to false.
Returns
-------
y : numpy.array
Output values of LMS filter, array of length N.
e : numpy.array
Error signal, i.e, d-y. Array of length N.
w : numpy.array
Final filter coefficients in array of length M if returnCoeffs is
False. NxM array containing all filter coefficients for all iterations
otherwise.
Raises
------
TypeError
If number of filter taps M is not type integer, number of iterations N
is not type integer, or leakage leak is not type float/int.
ValueError
If number of iterations N is greater than len(u)-M, number of filter
taps M is negative, or if step-size or leakage is outside specified
range.
Minimal Working Example
-----------------------
>>> import numpy as np
>>>
>>> np.random.seed(1337)
>>> ulen = 2000
>>> coeff = np.concatenate(([4], np.zeros(10), [-11], np.zeros(7), [0.7]))
>>> u = np.random.randn(ulen)
>>> d = np.convolve(u, coeff)
>>>
>>> M = 20 # No. of taps
>>> step = 1 # Step size
>>> y, e, w = nlms(u, d, M, step)
>>> print np.allclose(w, coeff)
True
Extended Example
----------------
>>> import numpy as np
>>>
>>> np.random.seed(1337)
>>> N = 1000
>>> coeffs = np.concatenate(([13], np.zeros(9), [-3], np.zeros(8), [-.2]))
>>> u = np.random.randn(20000) # Note len(u) >> N but we limit iterations
>>> d = np.convolve(u, coeffs)
>>>
>>> M = 20 # No. of taps
>>> step = 1 # Step size
>>> y, e, w = nlms(u, d, M, step, N=N, returnCoeffs=True)
>>> y.shape == (N,)
True
>>> e.shape == (N,)
True
>>> w.shape == (N, M)
True
>>> # Calculate mean square weight error
>>> mswe = np.mean((w - coeffs)**2, axis=1)
>>> # Should never increase so diff should above be > 0
>>> diff = np.diff(mswe)
>>> (diff <= 1e-10).all()
True
"""
# Check epsilon
_pchk.checkRegFactor(eps)
# Num taps check
_pchk.checkNumTaps(M)
# Max iteration check
if N is None:
N = len(u) - M + 1 # depends on [control=['if'], data=['N']]
_pchk.checkIter(N, len(u) - M + 1)
# Check len(d)
_pchk.checkDesiredSignal(d, N, M)
# Step check
_pchk.checkStep(step)
# Leakage check
_pchk.checkLeakage(leak)
# Init. coeffs check
if initCoeffs is None:
initCoeffs = np.zeros(M) # depends on [control=['if'], data=['initCoeffs']]
else:
_pchk.checkInitCoeffs(initCoeffs, M)
# Initialization
y = np.zeros(N) # Filter output
e = np.zeros(N) # Error signal
w = initCoeffs # Initial filter coeffs
leakstep = 1 - step * leak
if returnCoeffs:
W = np.zeros((N, M)) # Matrix to hold coeffs for each iteration # depends on [control=['if'], data=[]]
# Perform filtering
for n in xrange(N):
x = np.flipud(u[n:n + M]) # Slice to get view of M latest datapoints
y[n] = np.dot(x, w)
e[n] = d[n + M - 1] - y[n]
normFactor = 1.0 / (np.dot(x, x) + eps)
w = leakstep * w + step * normFactor * x * e[n]
y[n] = np.dot(x, w)
if returnCoeffs:
W[n] = w # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['n']]
if returnCoeffs:
w = W # depends on [control=['if'], data=[]]
return (y, e, w) |
def revoke(self, cidr_ip=None, ec2_group=None):
"""
Revoke access to a CIDR range or EC2 SecurityGroup.
You need to pass in either a CIDR block or
an EC2 SecurityGroup from which to revoke access.
@type cidr_ip: string
@param cidr_ip: A valid CIDR IP range to revoke
@type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup>`
@rtype: bool
@return: True if successful.
"""
if isinstance(ec2_group, SecurityGroup):
group_name = ec2_group.name
group_owner_id = ec2_group.owner_id
return self.connection.revoke_dbsecurity_group(
self.name,
ec2_security_group_name=group_name,
ec2_security_group_owner_id=group_owner_id)
# Revoking by CIDR IP range
return self.connection.revoke_dbsecurity_group(
self.name, cidr_ip=cidr_ip) | def function[revoke, parameter[self, cidr_ip, ec2_group]]:
constant[
Revoke access to a CIDR range or EC2 SecurityGroup.
You need to pass in either a CIDR block or
an EC2 SecurityGroup from which to revoke access.
@type cidr_ip: string
@param cidr_ip: A valid CIDR IP range to revoke
@type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup>`
@rtype: bool
@return: True if successful.
]
if call[name[isinstance], parameter[name[ec2_group], name[SecurityGroup]]] begin[:]
variable[group_name] assign[=] name[ec2_group].name
variable[group_owner_id] assign[=] name[ec2_group].owner_id
return[call[name[self].connection.revoke_dbsecurity_group, parameter[name[self].name]]]
return[call[name[self].connection.revoke_dbsecurity_group, parameter[name[self].name]]] | keyword[def] identifier[revoke] ( identifier[self] , identifier[cidr_ip] = keyword[None] , identifier[ec2_group] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[ec2_group] , identifier[SecurityGroup] ):
identifier[group_name] = identifier[ec2_group] . identifier[name]
identifier[group_owner_id] = identifier[ec2_group] . identifier[owner_id]
keyword[return] identifier[self] . identifier[connection] . identifier[revoke_dbsecurity_group] (
identifier[self] . identifier[name] ,
identifier[ec2_security_group_name] = identifier[group_name] ,
identifier[ec2_security_group_owner_id] = identifier[group_owner_id] )
keyword[return] identifier[self] . identifier[connection] . identifier[revoke_dbsecurity_group] (
identifier[self] . identifier[name] , identifier[cidr_ip] = identifier[cidr_ip] ) | def revoke(self, cidr_ip=None, ec2_group=None):
"""
Revoke access to a CIDR range or EC2 SecurityGroup.
You need to pass in either a CIDR block or
an EC2 SecurityGroup from which to revoke access.
@type cidr_ip: string
@param cidr_ip: A valid CIDR IP range to revoke
@type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup>`
@rtype: bool
@return: True if successful.
"""
if isinstance(ec2_group, SecurityGroup):
group_name = ec2_group.name
group_owner_id = ec2_group.owner_id
return self.connection.revoke_dbsecurity_group(self.name, ec2_security_group_name=group_name, ec2_security_group_owner_id=group_owner_id) # depends on [control=['if'], data=[]]
# Revoking by CIDR IP range
return self.connection.revoke_dbsecurity_group(self.name, cidr_ip=cidr_ip) |
def restartCheckpoint(self, jobStore):
"""Restart a checkpoint after the total failure of jobs in its subtree.
Writes the changes to the jobStore immediately. All the
checkpoint's successors will be deleted, but its retry count
will *not* be decreased.
Returns a list with the IDs of any successors deleted.
"""
assert self.checkpoint is not None
successorsDeleted = []
if self.stack or self.services or self.command != None:
if self.command != None:
assert self.command == self.checkpoint
logger.debug("Checkpoint job already has command set to run")
else:
self.command = self.checkpoint
jobStore.update(self) # Update immediately to ensure that checkpoint
# is made before deleting any remaining successors
if self.stack or self.services:
# If the subtree of successors is not complete restart everything
logger.debug("Checkpoint job has unfinished successor jobs, deleting the jobs on the stack: %s, services: %s " %
(self.stack, self.services))
# Delete everything on the stack, as these represent successors to clean
# up as we restart the queue
def recursiveDelete(jobGraph2):
# Recursive walk the stack to delete all remaining jobs
for jobs in jobGraph2.stack + jobGraph2.services:
for jobNode in jobs:
if jobStore.exists(jobNode.jobStoreID):
recursiveDelete(jobStore.load(jobNode.jobStoreID))
else:
logger.debug("Job %s has already been deleted", jobNode)
if jobGraph2 != self:
logger.debug("Checkpoint is deleting old successor job: %s", jobGraph2.jobStoreID)
jobStore.delete(jobGraph2.jobStoreID)
successorsDeleted.append(jobGraph2.jobStoreID)
recursiveDelete(self)
self.stack = [ [], [] ] # Initialise the job to mimic the state of a job
# that has been previously serialised but which as yet has no successors
self.services = [] # Empty the services
# Update the jobStore to avoid doing this twice on failure and make this clean.
jobStore.update(self)
return successorsDeleted | def function[restartCheckpoint, parameter[self, jobStore]]:
constant[Restart a checkpoint after the total failure of jobs in its subtree.
Writes the changes to the jobStore immediately. All the
checkpoint's successors will be deleted, but its retry count
will *not* be decreased.
Returns a list with the IDs of any successors deleted.
]
assert[compare[name[self].checkpoint is_not constant[None]]]
variable[successorsDeleted] assign[=] list[[]]
if <ast.BoolOp object at 0x7da18dc05db0> begin[:]
if compare[name[self].command not_equal[!=] constant[None]] begin[:]
assert[compare[name[self].command equal[==] name[self].checkpoint]]
call[name[logger].debug, parameter[constant[Checkpoint job already has command set to run]]]
call[name[jobStore].update, parameter[name[self]]]
if <ast.BoolOp object at 0x7da18dc058a0> begin[:]
call[name[logger].debug, parameter[binary_operation[constant[Checkpoint job has unfinished successor jobs, deleting the jobs on the stack: %s, services: %s ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18dc04c10>, <ast.Attribute object at 0x7da18dc04610>]]]]]
def function[recursiveDelete, parameter[jobGraph2]]:
for taget[name[jobs]] in starred[binary_operation[name[jobGraph2].stack + name[jobGraph2].services]] begin[:]
for taget[name[jobNode]] in starred[name[jobs]] begin[:]
if call[name[jobStore].exists, parameter[name[jobNode].jobStoreID]] begin[:]
call[name[recursiveDelete], parameter[call[name[jobStore].load, parameter[name[jobNode].jobStoreID]]]]
if compare[name[jobGraph2] not_equal[!=] name[self]] begin[:]
call[name[logger].debug, parameter[constant[Checkpoint is deleting old successor job: %s], name[jobGraph2].jobStoreID]]
call[name[jobStore].delete, parameter[name[jobGraph2].jobStoreID]]
call[name[successorsDeleted].append, parameter[name[jobGraph2].jobStoreID]]
call[name[recursiveDelete], parameter[name[self]]]
name[self].stack assign[=] list[[<ast.List object at 0x7da18dc076d0>, <ast.List object at 0x7da18dc058d0>]]
name[self].services assign[=] list[[]]
call[name[jobStore].update, parameter[name[self]]]
return[name[successorsDeleted]] | keyword[def] identifier[restartCheckpoint] ( identifier[self] , identifier[jobStore] ):
literal[string]
keyword[assert] identifier[self] . identifier[checkpoint] keyword[is] keyword[not] keyword[None]
identifier[successorsDeleted] =[]
keyword[if] identifier[self] . identifier[stack] keyword[or] identifier[self] . identifier[services] keyword[or] identifier[self] . identifier[command] != keyword[None] :
keyword[if] identifier[self] . identifier[command] != keyword[None] :
keyword[assert] identifier[self] . identifier[command] == identifier[self] . identifier[checkpoint]
identifier[logger] . identifier[debug] ( literal[string] )
keyword[else] :
identifier[self] . identifier[command] = identifier[self] . identifier[checkpoint]
identifier[jobStore] . identifier[update] ( identifier[self] )
keyword[if] identifier[self] . identifier[stack] keyword[or] identifier[self] . identifier[services] :
identifier[logger] . identifier[debug] ( literal[string] %
( identifier[self] . identifier[stack] , identifier[self] . identifier[services] ))
keyword[def] identifier[recursiveDelete] ( identifier[jobGraph2] ):
keyword[for] identifier[jobs] keyword[in] identifier[jobGraph2] . identifier[stack] + identifier[jobGraph2] . identifier[services] :
keyword[for] identifier[jobNode] keyword[in] identifier[jobs] :
keyword[if] identifier[jobStore] . identifier[exists] ( identifier[jobNode] . identifier[jobStoreID] ):
identifier[recursiveDelete] ( identifier[jobStore] . identifier[load] ( identifier[jobNode] . identifier[jobStoreID] ))
keyword[else] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[jobNode] )
keyword[if] identifier[jobGraph2] != identifier[self] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[jobGraph2] . identifier[jobStoreID] )
identifier[jobStore] . identifier[delete] ( identifier[jobGraph2] . identifier[jobStoreID] )
identifier[successorsDeleted] . identifier[append] ( identifier[jobGraph2] . identifier[jobStoreID] )
identifier[recursiveDelete] ( identifier[self] )
identifier[self] . identifier[stack] =[[],[]]
identifier[self] . identifier[services] =[]
identifier[jobStore] . identifier[update] ( identifier[self] )
keyword[return] identifier[successorsDeleted] | def restartCheckpoint(self, jobStore):
"""Restart a checkpoint after the total failure of jobs in its subtree.
Writes the changes to the jobStore immediately. All the
checkpoint's successors will be deleted, but its retry count
will *not* be decreased.
Returns a list with the IDs of any successors deleted.
"""
assert self.checkpoint is not None
successorsDeleted = []
if self.stack or self.services or self.command != None:
if self.command != None:
assert self.command == self.checkpoint
logger.debug('Checkpoint job already has command set to run') # depends on [control=['if'], data=[]]
else:
self.command = self.checkpoint
jobStore.update(self) # Update immediately to ensure that checkpoint
# is made before deleting any remaining successors
if self.stack or self.services:
# If the subtree of successors is not complete restart everything
logger.debug('Checkpoint job has unfinished successor jobs, deleting the jobs on the stack: %s, services: %s ' % (self.stack, self.services))
# Delete everything on the stack, as these represent successors to clean
# up as we restart the queue
def recursiveDelete(jobGraph2):
# Recursive walk the stack to delete all remaining jobs
for jobs in jobGraph2.stack + jobGraph2.services:
for jobNode in jobs:
if jobStore.exists(jobNode.jobStoreID):
recursiveDelete(jobStore.load(jobNode.jobStoreID)) # depends on [control=['if'], data=[]]
else:
logger.debug('Job %s has already been deleted', jobNode) # depends on [control=['for'], data=['jobNode']] # depends on [control=['for'], data=['jobs']]
if jobGraph2 != self:
logger.debug('Checkpoint is deleting old successor job: %s', jobGraph2.jobStoreID)
jobStore.delete(jobGraph2.jobStoreID)
successorsDeleted.append(jobGraph2.jobStoreID) # depends on [control=['if'], data=['jobGraph2']]
recursiveDelete(self)
self.stack = [[], []] # Initialise the job to mimic the state of a job
# that has been previously serialised but which as yet has no successors
self.services = [] # Empty the services
# Update the jobStore to avoid doing this twice on failure and make this clean.
jobStore.update(self) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return successorsDeleted |
def store(self, loc, df):
"""Store dataframe in the given location.
Store some arbitrary dataframe:
>>> data.store('my_data', df)
Now recover it from the global store.
>>> data.my_data
...
"""
path = "%s.%s" % (self._root / "processed" / loc, FILE_EXTENSION)
WRITE_DF(df, path, **WRITE_DF_OPTS)
self._cache[loc] = df | def function[store, parameter[self, loc, df]]:
constant[Store dataframe in the given location.
Store some arbitrary dataframe:
>>> data.store('my_data', df)
Now recover it from the global store.
>>> data.my_data
...
]
variable[path] assign[=] binary_operation[constant[%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da1b05f16c0>, <ast.Name object at 0x7da1b05f32e0>]]]
call[name[WRITE_DF], parameter[name[df], name[path]]]
call[name[self]._cache][name[loc]] assign[=] name[df] | keyword[def] identifier[store] ( identifier[self] , identifier[loc] , identifier[df] ):
literal[string]
identifier[path] = literal[string] %( identifier[self] . identifier[_root] / literal[string] / identifier[loc] , identifier[FILE_EXTENSION] )
identifier[WRITE_DF] ( identifier[df] , identifier[path] ,** identifier[WRITE_DF_OPTS] )
identifier[self] . identifier[_cache] [ identifier[loc] ]= identifier[df] | def store(self, loc, df):
"""Store dataframe in the given location.
Store some arbitrary dataframe:
>>> data.store('my_data', df)
Now recover it from the global store.
>>> data.my_data
...
"""
path = '%s.%s' % (self._root / 'processed' / loc, FILE_EXTENSION)
WRITE_DF(df, path, **WRITE_DF_OPTS)
self._cache[loc] = df |
def toints(self):
"""\
Returns an iterable of integers interpreting the content of `seq`
as sequence of binary numbers of length 8.
"""
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
return zip_longest(*[iter(iterable)] * n, fillvalue=fillvalue)
return [int(''.join(map(str, group)), 2) for group in grouper(self._data, 8, 0)] | def function[toints, parameter[self]]:
constant[ Returns an iterable of integers interpreting the content of `seq`
as sequence of binary numbers of length 8.
]
def function[grouper, parameter[iterable, n, fillvalue]]:
constant[Collect data into fixed-length chunks or blocks]
return[call[name[zip_longest], parameter[<ast.Starred object at 0x7da18fe918a0>]]]
return[<ast.ListComp object at 0x7da18fe92320>] | keyword[def] identifier[toints] ( identifier[self] ):
literal[string]
keyword[def] identifier[grouper] ( identifier[iterable] , identifier[n] , identifier[fillvalue] = keyword[None] ):
literal[string]
keyword[return] identifier[zip_longest] (*[ identifier[iter] ( identifier[iterable] )]* identifier[n] , identifier[fillvalue] = identifier[fillvalue] )
keyword[return] [ identifier[int] ( literal[string] . identifier[join] ( identifier[map] ( identifier[str] , identifier[group] )), literal[int] ) keyword[for] identifier[group] keyword[in] identifier[grouper] ( identifier[self] . identifier[_data] , literal[int] , literal[int] )] | def toints(self):
""" Returns an iterable of integers interpreting the content of `seq`
as sequence of binary numbers of length 8.
"""
def grouper(iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks"""
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
return zip_longest(*[iter(iterable)] * n, fillvalue=fillvalue)
return [int(''.join(map(str, group)), 2) for group in grouper(self._data, 8, 0)] |
def get_method_bridges(self, name, arg_types=()):
"""
generator of bridge methods found that adapt the return types of a
named method and having argument type descriptors matching
those in arg_types.
"""
for m in self.get_methods_by_name(name):
if ((m.is_bridge() and
m.get_arg_type_descriptors() == arg_types)):
yield m | def function[get_method_bridges, parameter[self, name, arg_types]]:
constant[
generator of bridge methods found that adapt the return types of a
named method and having argument type descriptors matching
those in arg_types.
]
for taget[name[m]] in starred[call[name[self].get_methods_by_name, parameter[name[name]]]] begin[:]
if <ast.BoolOp object at 0x7da1b0b588b0> begin[:]
<ast.Yield object at 0x7da1b0b586d0> | keyword[def] identifier[get_method_bridges] ( identifier[self] , identifier[name] , identifier[arg_types] =()):
literal[string]
keyword[for] identifier[m] keyword[in] identifier[self] . identifier[get_methods_by_name] ( identifier[name] ):
keyword[if] (( identifier[m] . identifier[is_bridge] () keyword[and]
identifier[m] . identifier[get_arg_type_descriptors] ()== identifier[arg_types] )):
keyword[yield] identifier[m] | def get_method_bridges(self, name, arg_types=()):
"""
generator of bridge methods found that adapt the return types of a
named method and having argument type descriptors matching
those in arg_types.
"""
for m in self.get_methods_by_name(name):
if m.is_bridge() and m.get_arg_type_descriptors() == arg_types:
yield m # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['m']] |
def size(self):
"""The size of the schema. If the underlying data source changes, it may be outdated.
"""
if self._size is None:
self._size = 0
for csv_file in self.files:
self._size += sum(1 if line else 0 for line in _util.open_local_or_gcs(csv_file, 'r'))
return self._size | def function[size, parameter[self]]:
constant[The size of the schema. If the underlying data source changes, it may be outdated.
]
if compare[name[self]._size is constant[None]] begin[:]
name[self]._size assign[=] constant[0]
for taget[name[csv_file]] in starred[name[self].files] begin[:]
<ast.AugAssign object at 0x7da1b113efe0>
return[name[self]._size] | keyword[def] identifier[size] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_size] keyword[is] keyword[None] :
identifier[self] . identifier[_size] = literal[int]
keyword[for] identifier[csv_file] keyword[in] identifier[self] . identifier[files] :
identifier[self] . identifier[_size] += identifier[sum] ( literal[int] keyword[if] identifier[line] keyword[else] literal[int] keyword[for] identifier[line] keyword[in] identifier[_util] . identifier[open_local_or_gcs] ( identifier[csv_file] , literal[string] ))
keyword[return] identifier[self] . identifier[_size] | def size(self):
"""The size of the schema. If the underlying data source changes, it may be outdated.
"""
if self._size is None:
self._size = 0
for csv_file in self.files:
self._size += sum((1 if line else 0 for line in _util.open_local_or_gcs(csv_file, 'r'))) # depends on [control=['for'], data=['csv_file']] # depends on [control=['if'], data=[]]
return self._size |
def get_as_nullable_map(self, key):
"""
Converts map element into an AnyValueMap or returns None if conversion is not possible.
:param key: a key of element to get.
:return: AnyValueMap value of the element or None if conversion is not supported.
"""
value = self.get_as_object(key)
return AnyValueMap.from_value(value) | def function[get_as_nullable_map, parameter[self, key]]:
constant[
Converts map element into an AnyValueMap or returns None if conversion is not possible.
:param key: a key of element to get.
:return: AnyValueMap value of the element or None if conversion is not supported.
]
variable[value] assign[=] call[name[self].get_as_object, parameter[name[key]]]
return[call[name[AnyValueMap].from_value, parameter[name[value]]]] | keyword[def] identifier[get_as_nullable_map] ( identifier[self] , identifier[key] ):
literal[string]
identifier[value] = identifier[self] . identifier[get_as_object] ( identifier[key] )
keyword[return] identifier[AnyValueMap] . identifier[from_value] ( identifier[value] ) | def get_as_nullable_map(self, key):
"""
Converts map element into an AnyValueMap or returns None if conversion is not possible.
:param key: a key of element to get.
:return: AnyValueMap value of the element or None if conversion is not supported.
"""
value = self.get_as_object(key)
return AnyValueMap.from_value(value) |
def hide(cls):
"""
Hide the log interface.
"""
cls.el.style.display = "none"
cls.overlay.hide()
cls.bind() | def function[hide, parameter[cls]]:
constant[
Hide the log interface.
]
name[cls].el.style.display assign[=] constant[none]
call[name[cls].overlay.hide, parameter[]]
call[name[cls].bind, parameter[]] | keyword[def] identifier[hide] ( identifier[cls] ):
literal[string]
identifier[cls] . identifier[el] . identifier[style] . identifier[display] = literal[string]
identifier[cls] . identifier[overlay] . identifier[hide] ()
identifier[cls] . identifier[bind] () | def hide(cls):
"""
Hide the log interface.
"""
cls.el.style.display = 'none'
cls.overlay.hide()
cls.bind() |
def get_overlapping_ranges(self, collection_link, sorted_ranges):
'''
Given the sorted ranges and a collection,
Returns the list of overlapping partition key ranges
:param str collection_link:
The collection link.
:param (list of routing_range._Range) sorted_ranges: The sorted list of non-overlapping ranges.
:return:
List of partition key ranges.
:rtype: list of dict
:raises ValueError: If two ranges in sorted_ranges overlap or if the list is not sorted
'''
# validate if the list is non-overlapping and sorted
if not self._is_sorted_and_non_overlapping(sorted_ranges):
raise ValueError("the list of ranges is not a non-overlapping sorted ranges")
target_partition_key_ranges = []
it = iter(sorted_ranges)
try:
currentProvidedRange = next(it)
while True:
if (currentProvidedRange.isEmpty()):
# skip and go to the next item\
currentProvidedRange = next(it)
continue
if len(target_partition_key_ranges):
queryRange = self._subtract_range(currentProvidedRange, target_partition_key_ranges[-1])
else:
queryRange = currentProvidedRange
overlappingRanges = _PartitionKeyRangeCache.get_overlapping_ranges(self, collection_link, queryRange)
assert len(overlappingRanges), ("code bug: returned overlapping ranges for queryRange {} is empty".format(queryRange))
target_partition_key_ranges.extend(overlappingRanges)
lastKnownTargetRange = routing_range._Range.PartitionKeyRangeToRange(target_partition_key_ranges[-1])
# the overlapping ranges must contain the requested range
assert currentProvidedRange.max <= lastKnownTargetRange.max, "code bug: returned overlapping ranges {} does not contain the requested range {}".format(overlappingRanges, queryRange)
# the current range is contained in target_partition_key_ranges just move forward
currentProvidedRange = next(it)
while currentProvidedRange.max <= lastKnownTargetRange.max:
# the current range is covered too. just move forward
currentProvidedRange = next(it)
except StopIteration:
# when the iteration is exhausted we get here. There is nothing else to be done
pass
return target_partition_key_ranges | def function[get_overlapping_ranges, parameter[self, collection_link, sorted_ranges]]:
constant[
Given the sorted ranges and a collection,
Returns the list of overlapping partition key ranges
:param str collection_link:
The collection link.
:param (list of routing_range._Range) sorted_ranges: The sorted list of non-overlapping ranges.
:return:
List of partition key ranges.
:rtype: list of dict
:raises ValueError: If two ranges in sorted_ranges overlap or if the list is not sorted
]
if <ast.UnaryOp object at 0x7da20c6ab010> begin[:]
<ast.Raise object at 0x7da20c6a8df0>
variable[target_partition_key_ranges] assign[=] list[[]]
variable[it] assign[=] call[name[iter], parameter[name[sorted_ranges]]]
<ast.Try object at 0x7da20c6aa3e0>
return[name[target_partition_key_ranges]] | keyword[def] identifier[get_overlapping_ranges] ( identifier[self] , identifier[collection_link] , identifier[sorted_ranges] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_is_sorted_and_non_overlapping] ( identifier[sorted_ranges] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[target_partition_key_ranges] =[]
identifier[it] = identifier[iter] ( identifier[sorted_ranges] )
keyword[try] :
identifier[currentProvidedRange] = identifier[next] ( identifier[it] )
keyword[while] keyword[True] :
keyword[if] ( identifier[currentProvidedRange] . identifier[isEmpty] ()):
identifier[currentProvidedRange] = identifier[next] ( identifier[it] )
keyword[continue]
keyword[if] identifier[len] ( identifier[target_partition_key_ranges] ):
identifier[queryRange] = identifier[self] . identifier[_subtract_range] ( identifier[currentProvidedRange] , identifier[target_partition_key_ranges] [- literal[int] ])
keyword[else] :
identifier[queryRange] = identifier[currentProvidedRange]
identifier[overlappingRanges] = identifier[_PartitionKeyRangeCache] . identifier[get_overlapping_ranges] ( identifier[self] , identifier[collection_link] , identifier[queryRange] )
keyword[assert] identifier[len] ( identifier[overlappingRanges] ),( literal[string] . identifier[format] ( identifier[queryRange] ))
identifier[target_partition_key_ranges] . identifier[extend] ( identifier[overlappingRanges] )
identifier[lastKnownTargetRange] = identifier[routing_range] . identifier[_Range] . identifier[PartitionKeyRangeToRange] ( identifier[target_partition_key_ranges] [- literal[int] ])
keyword[assert] identifier[currentProvidedRange] . identifier[max] <= identifier[lastKnownTargetRange] . identifier[max] , literal[string] . identifier[format] ( identifier[overlappingRanges] , identifier[queryRange] )
identifier[currentProvidedRange] = identifier[next] ( identifier[it] )
keyword[while] identifier[currentProvidedRange] . identifier[max] <= identifier[lastKnownTargetRange] . identifier[max] :
identifier[currentProvidedRange] = identifier[next] ( identifier[it] )
keyword[except] identifier[StopIteration] :
keyword[pass]
keyword[return] identifier[target_partition_key_ranges] | def get_overlapping_ranges(self, collection_link, sorted_ranges):
"""
Given the sorted ranges and a collection,
Returns the list of overlapping partition key ranges
:param str collection_link:
The collection link.
:param (list of routing_range._Range) sorted_ranges: The sorted list of non-overlapping ranges.
:return:
List of partition key ranges.
:rtype: list of dict
:raises ValueError: If two ranges in sorted_ranges overlap or if the list is not sorted
"""
# validate if the list is non-overlapping and sorted
if not self._is_sorted_and_non_overlapping(sorted_ranges):
raise ValueError('the list of ranges is not a non-overlapping sorted ranges') # depends on [control=['if'], data=[]]
target_partition_key_ranges = []
it = iter(sorted_ranges)
try:
currentProvidedRange = next(it)
while True:
if currentProvidedRange.isEmpty():
# skip and go to the next item\
currentProvidedRange = next(it)
continue # depends on [control=['if'], data=[]]
if len(target_partition_key_ranges):
queryRange = self._subtract_range(currentProvidedRange, target_partition_key_ranges[-1]) # depends on [control=['if'], data=[]]
else:
queryRange = currentProvidedRange
overlappingRanges = _PartitionKeyRangeCache.get_overlapping_ranges(self, collection_link, queryRange)
assert len(overlappingRanges), 'code bug: returned overlapping ranges for queryRange {} is empty'.format(queryRange)
target_partition_key_ranges.extend(overlappingRanges)
lastKnownTargetRange = routing_range._Range.PartitionKeyRangeToRange(target_partition_key_ranges[-1])
# the overlapping ranges must contain the requested range
assert currentProvidedRange.max <= lastKnownTargetRange.max, 'code bug: returned overlapping ranges {} does not contain the requested range {}'.format(overlappingRanges, queryRange)
# the current range is contained in target_partition_key_ranges just move forward
currentProvidedRange = next(it)
while currentProvidedRange.max <= lastKnownTargetRange.max:
# the current range is covered too. just move forward
currentProvidedRange = next(it) # depends on [control=['while'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
except StopIteration:
# when the iteration is exhausted we get here. There is nothing else to be done
pass # depends on [control=['except'], data=[]]
return target_partition_key_ranges |
def from_dict(data, ctx):
"""
Instantiate a new DynamicOrderState from a dict (generally from loading
a JSON response). The data used to instantiate the DynamicOrderState is
a shallow copy of the dict passed in, with any complex child types
instantiated appropriately.
"""
data = data.copy()
if data.get('trailingStopValue') is not None:
data['trailingStopValue'] = ctx.convert_decimal_number(
data.get('trailingStopValue')
)
if data.get('triggerDistance') is not None:
data['triggerDistance'] = ctx.convert_decimal_number(
data.get('triggerDistance')
)
return DynamicOrderState(**data) | def function[from_dict, parameter[data, ctx]]:
constant[
Instantiate a new DynamicOrderState from a dict (generally from loading
a JSON response). The data used to instantiate the DynamicOrderState is
a shallow copy of the dict passed in, with any complex child types
instantiated appropriately.
]
variable[data] assign[=] call[name[data].copy, parameter[]]
if compare[call[name[data].get, parameter[constant[trailingStopValue]]] is_not constant[None]] begin[:]
call[name[data]][constant[trailingStopValue]] assign[=] call[name[ctx].convert_decimal_number, parameter[call[name[data].get, parameter[constant[trailingStopValue]]]]]
if compare[call[name[data].get, parameter[constant[triggerDistance]]] is_not constant[None]] begin[:]
call[name[data]][constant[triggerDistance]] assign[=] call[name[ctx].convert_decimal_number, parameter[call[name[data].get, parameter[constant[triggerDistance]]]]]
return[call[name[DynamicOrderState], parameter[]]] | keyword[def] identifier[from_dict] ( identifier[data] , identifier[ctx] ):
literal[string]
identifier[data] = identifier[data] . identifier[copy] ()
keyword[if] identifier[data] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[ctx] . identifier[convert_decimal_number] (
identifier[data] . identifier[get] ( literal[string] )
)
keyword[if] identifier[data] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[ctx] . identifier[convert_decimal_number] (
identifier[data] . identifier[get] ( literal[string] )
)
keyword[return] identifier[DynamicOrderState] (** identifier[data] ) | def from_dict(data, ctx):
"""
Instantiate a new DynamicOrderState from a dict (generally from loading
a JSON response). The data used to instantiate the DynamicOrderState is
a shallow copy of the dict passed in, with any complex child types
instantiated appropriately.
"""
data = data.copy()
if data.get('trailingStopValue') is not None:
data['trailingStopValue'] = ctx.convert_decimal_number(data.get('trailingStopValue')) # depends on [control=['if'], data=[]]
if data.get('triggerDistance') is not None:
data['triggerDistance'] = ctx.convert_decimal_number(data.get('triggerDistance')) # depends on [control=['if'], data=[]]
return DynamicOrderState(**data) |
def _init_zeo_root(self, attempts=3):
"""
Get and initialize the ZEO root object.
Args:
attempts (int, default 3): How many times to try, if the connection
was lost.
"""
try:
db_root = self._connection.root()
except ConnectionStateError:
if attempts <= 0:
raise
self._open_connection()
return self._init_zeo_root(attempts=attempts-1)
# init the root, if it wasn't already declared
if self.project_key and self.project_key not in db_root:
with transaction.manager:
db_root[self.project_key] = self.default_type()
self._root = db_root[self.project_key] if self.project_key else db_root | def function[_init_zeo_root, parameter[self, attempts]]:
constant[
Get and initialize the ZEO root object.
Args:
attempts (int, default 3): How many times to try, if the connection
was lost.
]
<ast.Try object at 0x7da1b15ac820>
if <ast.BoolOp object at 0x7da1b15ac6a0> begin[:]
with name[transaction].manager begin[:]
call[name[db_root]][name[self].project_key] assign[=] call[name[self].default_type, parameter[]]
name[self]._root assign[=] <ast.IfExp object at 0x7da1b15b3010> | keyword[def] identifier[_init_zeo_root] ( identifier[self] , identifier[attempts] = literal[int] ):
literal[string]
keyword[try] :
identifier[db_root] = identifier[self] . identifier[_connection] . identifier[root] ()
keyword[except] identifier[ConnectionStateError] :
keyword[if] identifier[attempts] <= literal[int] :
keyword[raise]
identifier[self] . identifier[_open_connection] ()
keyword[return] identifier[self] . identifier[_init_zeo_root] ( identifier[attempts] = identifier[attempts] - literal[int] )
keyword[if] identifier[self] . identifier[project_key] keyword[and] identifier[self] . identifier[project_key] keyword[not] keyword[in] identifier[db_root] :
keyword[with] identifier[transaction] . identifier[manager] :
identifier[db_root] [ identifier[self] . identifier[project_key] ]= identifier[self] . identifier[default_type] ()
identifier[self] . identifier[_root] = identifier[db_root] [ identifier[self] . identifier[project_key] ] keyword[if] identifier[self] . identifier[project_key] keyword[else] identifier[db_root] | def _init_zeo_root(self, attempts=3):
"""
Get and initialize the ZEO root object.
Args:
attempts (int, default 3): How many times to try, if the connection
was lost.
"""
try:
db_root = self._connection.root() # depends on [control=['try'], data=[]]
except ConnectionStateError:
if attempts <= 0:
raise # depends on [control=['if'], data=[]]
self._open_connection()
return self._init_zeo_root(attempts=attempts - 1) # depends on [control=['except'], data=[]]
# init the root, if it wasn't already declared
if self.project_key and self.project_key not in db_root:
with transaction.manager:
db_root[self.project_key] = self.default_type() # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
self._root = db_root[self.project_key] if self.project_key else db_root |
def create_tokenizer(self, name, config=dict()):
"""Create a pipeline component from a factory.
name (unicode): Factory name to look up in `Language.factories`.
config (dict): Configuration parameters to initialise component.
RETURNS (callable): Pipeline component.
"""
if name not in self.factories:
raise KeyError(Errors.E002.format(name=name))
factory = self.factories[name]
return factory(self, **config) | def function[create_tokenizer, parameter[self, name, config]]:
constant[Create a pipeline component from a factory.
name (unicode): Factory name to look up in `Language.factories`.
config (dict): Configuration parameters to initialise component.
RETURNS (callable): Pipeline component.
]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self].factories] begin[:]
<ast.Raise object at 0x7da1b1933cd0>
variable[factory] assign[=] call[name[self].factories][name[name]]
return[call[name[factory], parameter[name[self]]]] | keyword[def] identifier[create_tokenizer] ( identifier[self] , identifier[name] , identifier[config] = identifier[dict] ()):
literal[string]
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[factories] :
keyword[raise] identifier[KeyError] ( identifier[Errors] . identifier[E002] . identifier[format] ( identifier[name] = identifier[name] ))
identifier[factory] = identifier[self] . identifier[factories] [ identifier[name] ]
keyword[return] identifier[factory] ( identifier[self] ,** identifier[config] ) | def create_tokenizer(self, name, config=dict()):
"""Create a pipeline component from a factory.
name (unicode): Factory name to look up in `Language.factories`.
config (dict): Configuration parameters to initialise component.
RETURNS (callable): Pipeline component.
"""
if name not in self.factories:
raise KeyError(Errors.E002.format(name=name)) # depends on [control=['if'], data=['name']]
factory = self.factories[name]
return factory(self, **config) |
def get_stats(self):
"""Get general stats for the cache."""
expired = sum([x['expired'] for _, x in
self._CACHE_STATS['access_stats'].items()])
miss = sum([x['miss'] for _, x in
self._CACHE_STATS['access_stats'].items()])
hit = sum([x['hit'] for _, x in
self._CACHE_STATS['access_stats'].items()])
return {
'totals': {
'keys': len(self._CACHE_STATS['access_stats']),
'expired': expired,
'miss': miss,
'hit': hit,
}
} | def function[get_stats, parameter[self]]:
constant[Get general stats for the cache.]
variable[expired] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da1b016f940>]]
variable[miss] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da1b01906a0>]]
variable[hit] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da1b01920e0>]]
return[dictionary[[<ast.Constant object at 0x7da1b0192500>], [<ast.Dict object at 0x7da1b0190e50>]]] | keyword[def] identifier[get_stats] ( identifier[self] ):
literal[string]
identifier[expired] = identifier[sum] ([ identifier[x] [ literal[string] ] keyword[for] identifier[_] , identifier[x] keyword[in]
identifier[self] . identifier[_CACHE_STATS] [ literal[string] ]. identifier[items] ()])
identifier[miss] = identifier[sum] ([ identifier[x] [ literal[string] ] keyword[for] identifier[_] , identifier[x] keyword[in]
identifier[self] . identifier[_CACHE_STATS] [ literal[string] ]. identifier[items] ()])
identifier[hit] = identifier[sum] ([ identifier[x] [ literal[string] ] keyword[for] identifier[_] , identifier[x] keyword[in]
identifier[self] . identifier[_CACHE_STATS] [ literal[string] ]. identifier[items] ()])
keyword[return] {
literal[string] :{
literal[string] : identifier[len] ( identifier[self] . identifier[_CACHE_STATS] [ literal[string] ]),
literal[string] : identifier[expired] ,
literal[string] : identifier[miss] ,
literal[string] : identifier[hit] ,
}
} | def get_stats(self):
"""Get general stats for the cache."""
expired = sum([x['expired'] for (_, x) in self._CACHE_STATS['access_stats'].items()])
miss = sum([x['miss'] for (_, x) in self._CACHE_STATS['access_stats'].items()])
hit = sum([x['hit'] for (_, x) in self._CACHE_STATS['access_stats'].items()])
return {'totals': {'keys': len(self._CACHE_STATS['access_stats']), 'expired': expired, 'miss': miss, 'hit': hit}} |
def sync(self, vault_client, opt):
"""Synchronizes the context to the Vault server. This
has the effect of updating every resource which is
in the context and has changes pending."""
active_mounts = []
for audit_log in self.logs():
audit_log.sync(vault_client)
# Handle policies only on the first pass. This allows us
# to ensure that ACL's are in place prior to actually
# making any changes.
not_policies = self.sync_policies(vault_client)
# Handle auth wrapper resources on the next path. The resources
# may update a path on their own. They may also provide mount
# tuning information.
not_auth = self.sync_auth(vault_client, not_policies)
# Handle mounts only on the next pass. This allows us to
# ensure that everything is in order prior to actually
# provisioning secrets. Note we handle removals before
# anything else, allowing us to address mount conflicts.
active_mounts, not_mounts = self.sync_mounts(active_mounts,
not_auth,
vault_client)
# Now handle everything else. If "best practices" are being
# adhered to then every generic mountpoint should exist by now.
# We handle "child" resources after the first batch
sorted_resources = sorted(not_mounts, key=childless_first)
for resource in sorted_resources:
resource.sync(vault_client)
for mount in self.mounts():
if not find_backend(mount.path, active_mounts):
mount.unmount(vault_client)
if opt.remove_unknown:
self.prune(vault_client) | def function[sync, parameter[self, vault_client, opt]]:
constant[Synchronizes the context to the Vault server. This
has the effect of updating every resource which is
in the context and has changes pending.]
variable[active_mounts] assign[=] list[[]]
for taget[name[audit_log]] in starred[call[name[self].logs, parameter[]]] begin[:]
call[name[audit_log].sync, parameter[name[vault_client]]]
variable[not_policies] assign[=] call[name[self].sync_policies, parameter[name[vault_client]]]
variable[not_auth] assign[=] call[name[self].sync_auth, parameter[name[vault_client], name[not_policies]]]
<ast.Tuple object at 0x7da1b183a7a0> assign[=] call[name[self].sync_mounts, parameter[name[active_mounts], name[not_auth], name[vault_client]]]
variable[sorted_resources] assign[=] call[name[sorted], parameter[name[not_mounts]]]
for taget[name[resource]] in starred[name[sorted_resources]] begin[:]
call[name[resource].sync, parameter[name[vault_client]]]
for taget[name[mount]] in starred[call[name[self].mounts, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b1838d00> begin[:]
call[name[mount].unmount, parameter[name[vault_client]]]
if name[opt].remove_unknown begin[:]
call[name[self].prune, parameter[name[vault_client]]] | keyword[def] identifier[sync] ( identifier[self] , identifier[vault_client] , identifier[opt] ):
literal[string]
identifier[active_mounts] =[]
keyword[for] identifier[audit_log] keyword[in] identifier[self] . identifier[logs] ():
identifier[audit_log] . identifier[sync] ( identifier[vault_client] )
identifier[not_policies] = identifier[self] . identifier[sync_policies] ( identifier[vault_client] )
identifier[not_auth] = identifier[self] . identifier[sync_auth] ( identifier[vault_client] , identifier[not_policies] )
identifier[active_mounts] , identifier[not_mounts] = identifier[self] . identifier[sync_mounts] ( identifier[active_mounts] ,
identifier[not_auth] ,
identifier[vault_client] )
identifier[sorted_resources] = identifier[sorted] ( identifier[not_mounts] , identifier[key] = identifier[childless_first] )
keyword[for] identifier[resource] keyword[in] identifier[sorted_resources] :
identifier[resource] . identifier[sync] ( identifier[vault_client] )
keyword[for] identifier[mount] keyword[in] identifier[self] . identifier[mounts] ():
keyword[if] keyword[not] identifier[find_backend] ( identifier[mount] . identifier[path] , identifier[active_mounts] ):
identifier[mount] . identifier[unmount] ( identifier[vault_client] )
keyword[if] identifier[opt] . identifier[remove_unknown] :
identifier[self] . identifier[prune] ( identifier[vault_client] ) | def sync(self, vault_client, opt):
"""Synchronizes the context to the Vault server. This
has the effect of updating every resource which is
in the context and has changes pending."""
active_mounts = []
for audit_log in self.logs():
audit_log.sync(vault_client) # depends on [control=['for'], data=['audit_log']]
# Handle policies only on the first pass. This allows us
# to ensure that ACL's are in place prior to actually
# making any changes.
not_policies = self.sync_policies(vault_client)
# Handle auth wrapper resources on the next path. The resources
# may update a path on their own. They may also provide mount
# tuning information.
not_auth = self.sync_auth(vault_client, not_policies)
# Handle mounts only on the next pass. This allows us to
# ensure that everything is in order prior to actually
# provisioning secrets. Note we handle removals before
# anything else, allowing us to address mount conflicts.
(active_mounts, not_mounts) = self.sync_mounts(active_mounts, not_auth, vault_client)
# Now handle everything else. If "best practices" are being
# adhered to then every generic mountpoint should exist by now.
# We handle "child" resources after the first batch
sorted_resources = sorted(not_mounts, key=childless_first)
for resource in sorted_resources:
resource.sync(vault_client) # depends on [control=['for'], data=['resource']]
for mount in self.mounts():
if not find_backend(mount.path, active_mounts):
mount.unmount(vault_client) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['mount']]
if opt.remove_unknown:
self.prune(vault_client) # depends on [control=['if'], data=[]] |
def _extract_device_name_from_event(event):
"""Extract device name from a tf.Event proto carrying tensor value."""
plugin_data_content = json.loads(
tf.compat.as_str(event.summary.value[0].metadata.plugin_data.content))
return plugin_data_content['device'] | def function[_extract_device_name_from_event, parameter[event]]:
constant[Extract device name from a tf.Event proto carrying tensor value.]
variable[plugin_data_content] assign[=] call[name[json].loads, parameter[call[name[tf].compat.as_str, parameter[call[name[event].summary.value][constant[0]].metadata.plugin_data.content]]]]
return[call[name[plugin_data_content]][constant[device]]] | keyword[def] identifier[_extract_device_name_from_event] ( identifier[event] ):
literal[string]
identifier[plugin_data_content] = identifier[json] . identifier[loads] (
identifier[tf] . identifier[compat] . identifier[as_str] ( identifier[event] . identifier[summary] . identifier[value] [ literal[int] ]. identifier[metadata] . identifier[plugin_data] . identifier[content] ))
keyword[return] identifier[plugin_data_content] [ literal[string] ] | def _extract_device_name_from_event(event):
"""Extract device name from a tf.Event proto carrying tensor value."""
plugin_data_content = json.loads(tf.compat.as_str(event.summary.value[0].metadata.plugin_data.content))
return plugin_data_content['device'] |
def save(self, specfiles=None, rm=False, ci=False, smi=False, sai=False,
si=False, compress=True, path=None):
"""Writes the specified datatypes to ``mrc`` files on the hard disk.
.. note::
If ``.save()`` is called and no ``mrc`` files are present in the
specified path new files are generated, otherwise old files are
replaced.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: None, str, [str, str]
:param rm: bool, True to select ``self.rmc`` (run metadata)
:param ci: bool, True to select ``self.cic`` (chromatogram items)
:param smi: bool, True to select ``self.smic`` (spectrum metadata items)
:param sai: bool, True to select ``self.saic`` (spectrum array items)
:param si: bool, True to select ``self.sic`` (spectrum items)
:param compress: bool, True to use zip file compression
:param path: filedirectory to which the ``mrc`` files are written. By
default the parameter is set to ``None`` and the filedirectory is
read from ``self.info[specfile]['path']``
"""
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
datatypes = self._processDatatypes(rm, ci, smi, sai, si)
if len(datatypes) == 0:
datatypes = ['rm', 'ci', 'smi', 'sai', 'si']
for specfile in specfiles:
if specfile not in self.info:
warntext = 'Error while calling "MsrunContainer.save()": "%s" '\
'is not present in "MsrunContainer.info"!'\
% (specfile, )
warnings.warn(warntext)
continue
else:
msrunInfo = self.info[specfile]
specfilePath = msrunInfo['path'] if path is None else path
with aux.PartiallySafeReplace() as msr:
for datatype in datatypes:
filename = specfile + '.mrc_' + datatype
filepath = aux.joinpath(specfilePath, filename)
with msr.open(filepath, 'w+b') as openfile:
if datatype == 'rm':
self._writeRmc(openfile, specfile)
elif datatype == 'ci':
self._writeCic(openfile, specfile, compress)
elif datatype == 'si':
self._writeSic(openfile, specfile, compress)
elif datatype == 'smi':
self._writeSmic(openfile, specfile, compress)
elif datatype == 'sai':
self._writeSaic(openfile, specfile, compress) | def function[save, parameter[self, specfiles, rm, ci, smi, sai, si, compress, path]]:
constant[Writes the specified datatypes to ``mrc`` files on the hard disk.
.. note::
If ``.save()`` is called and no ``mrc`` files are present in the
specified path new files are generated, otherwise old files are
replaced.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: None, str, [str, str]
:param rm: bool, True to select ``self.rmc`` (run metadata)
:param ci: bool, True to select ``self.cic`` (chromatogram items)
:param smi: bool, True to select ``self.smic`` (spectrum metadata items)
:param sai: bool, True to select ``self.saic`` (spectrum array items)
:param si: bool, True to select ``self.sic`` (spectrum items)
:param compress: bool, True to use zip file compression
:param path: filedirectory to which the ``mrc`` files are written. By
default the parameter is set to ``None`` and the filedirectory is
read from ``self.info[specfile]['path']``
]
if compare[name[specfiles] is constant[None]] begin[:]
variable[specfiles] assign[=] <ast.ListComp object at 0x7da20e9b0670>
variable[datatypes] assign[=] call[name[self]._processDatatypes, parameter[name[rm], name[ci], name[smi], name[sai], name[si]]]
if compare[call[name[len], parameter[name[datatypes]]] equal[==] constant[0]] begin[:]
variable[datatypes] assign[=] list[[<ast.Constant object at 0x7da20e9b1900>, <ast.Constant object at 0x7da20e9b1d20>, <ast.Constant object at 0x7da20e9b19c0>, <ast.Constant object at 0x7da20e9b0820>, <ast.Constant object at 0x7da20e9b0790>]]
for taget[name[specfile]] in starred[name[specfiles]] begin[:]
if compare[name[specfile] <ast.NotIn object at 0x7da2590d7190> name[self].info] begin[:]
variable[warntext] assign[=] binary_operation[constant[Error while calling "MsrunContainer.save()": "%s" is not present in "MsrunContainer.info"!] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e9b1ea0>]]]
call[name[warnings].warn, parameter[name[warntext]]]
continue
with call[name[aux].PartiallySafeReplace, parameter[]] begin[:]
for taget[name[datatype]] in starred[name[datatypes]] begin[:]
variable[filename] assign[=] binary_operation[binary_operation[name[specfile] + constant[.mrc_]] + name[datatype]]
variable[filepath] assign[=] call[name[aux].joinpath, parameter[name[specfilePath], name[filename]]]
with call[name[msr].open, parameter[name[filepath], constant[w+b]]] begin[:]
if compare[name[datatype] equal[==] constant[rm]] begin[:]
call[name[self]._writeRmc, parameter[name[openfile], name[specfile]]] | keyword[def] identifier[save] ( identifier[self] , identifier[specfiles] = keyword[None] , identifier[rm] = keyword[False] , identifier[ci] = keyword[False] , identifier[smi] = keyword[False] , identifier[sai] = keyword[False] ,
identifier[si] = keyword[False] , identifier[compress] = keyword[True] , identifier[path] = keyword[None] ):
literal[string]
keyword[if] identifier[specfiles] keyword[is] keyword[None] :
identifier[specfiles] =[ identifier[_] keyword[for] identifier[_] keyword[in] identifier[viewkeys] ( identifier[self] . identifier[info] )]
keyword[else] :
identifier[specfiles] = identifier[aux] . identifier[toList] ( identifier[specfiles] )
identifier[datatypes] = identifier[self] . identifier[_processDatatypes] ( identifier[rm] , identifier[ci] , identifier[smi] , identifier[sai] , identifier[si] )
keyword[if] identifier[len] ( identifier[datatypes] )== literal[int] :
identifier[datatypes] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[for] identifier[specfile] keyword[in] identifier[specfiles] :
keyword[if] identifier[specfile] keyword[not] keyword[in] identifier[self] . identifier[info] :
identifier[warntext] = literal[string] literal[string] %( identifier[specfile] ,)
identifier[warnings] . identifier[warn] ( identifier[warntext] )
keyword[continue]
keyword[else] :
identifier[msrunInfo] = identifier[self] . identifier[info] [ identifier[specfile] ]
identifier[specfilePath] = identifier[msrunInfo] [ literal[string] ] keyword[if] identifier[path] keyword[is] keyword[None] keyword[else] identifier[path]
keyword[with] identifier[aux] . identifier[PartiallySafeReplace] () keyword[as] identifier[msr] :
keyword[for] identifier[datatype] keyword[in] identifier[datatypes] :
identifier[filename] = identifier[specfile] + literal[string] + identifier[datatype]
identifier[filepath] = identifier[aux] . identifier[joinpath] ( identifier[specfilePath] , identifier[filename] )
keyword[with] identifier[msr] . identifier[open] ( identifier[filepath] , literal[string] ) keyword[as] identifier[openfile] :
keyword[if] identifier[datatype] == literal[string] :
identifier[self] . identifier[_writeRmc] ( identifier[openfile] , identifier[specfile] )
keyword[elif] identifier[datatype] == literal[string] :
identifier[self] . identifier[_writeCic] ( identifier[openfile] , identifier[specfile] , identifier[compress] )
keyword[elif] identifier[datatype] == literal[string] :
identifier[self] . identifier[_writeSic] ( identifier[openfile] , identifier[specfile] , identifier[compress] )
keyword[elif] identifier[datatype] == literal[string] :
identifier[self] . identifier[_writeSmic] ( identifier[openfile] , identifier[specfile] , identifier[compress] )
keyword[elif] identifier[datatype] == literal[string] :
identifier[self] . identifier[_writeSaic] ( identifier[openfile] , identifier[specfile] , identifier[compress] ) | def save(self, specfiles=None, rm=False, ci=False, smi=False, sai=False, si=False, compress=True, path=None):
"""Writes the specified datatypes to ``mrc`` files on the hard disk.
.. note::
If ``.save()`` is called and no ``mrc`` files are present in the
specified path new files are generated, otherwise old files are
replaced.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: None, str, [str, str]
:param rm: bool, True to select ``self.rmc`` (run metadata)
:param ci: bool, True to select ``self.cic`` (chromatogram items)
:param smi: bool, True to select ``self.smic`` (spectrum metadata items)
:param sai: bool, True to select ``self.saic`` (spectrum array items)
:param si: bool, True to select ``self.sic`` (spectrum items)
:param compress: bool, True to use zip file compression
:param path: filedirectory to which the ``mrc`` files are written. By
default the parameter is set to ``None`` and the filedirectory is
read from ``self.info[specfile]['path']``
"""
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)] # depends on [control=['if'], data=['specfiles']]
else:
specfiles = aux.toList(specfiles)
datatypes = self._processDatatypes(rm, ci, smi, sai, si)
if len(datatypes) == 0:
datatypes = ['rm', 'ci', 'smi', 'sai', 'si'] # depends on [control=['if'], data=[]]
for specfile in specfiles:
if specfile not in self.info:
warntext = 'Error while calling "MsrunContainer.save()": "%s" is not present in "MsrunContainer.info"!' % (specfile,)
warnings.warn(warntext)
continue # depends on [control=['if'], data=['specfile']]
else:
msrunInfo = self.info[specfile]
specfilePath = msrunInfo['path'] if path is None else path
with aux.PartiallySafeReplace() as msr:
for datatype in datatypes:
filename = specfile + '.mrc_' + datatype
filepath = aux.joinpath(specfilePath, filename)
with msr.open(filepath, 'w+b') as openfile:
if datatype == 'rm':
self._writeRmc(openfile, specfile) # depends on [control=['if'], data=[]]
elif datatype == 'ci':
self._writeCic(openfile, specfile, compress) # depends on [control=['if'], data=[]]
elif datatype == 'si':
self._writeSic(openfile, specfile, compress) # depends on [control=['if'], data=[]]
elif datatype == 'smi':
self._writeSmic(openfile, specfile, compress) # depends on [control=['if'], data=[]]
elif datatype == 'sai':
self._writeSaic(openfile, specfile, compress) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['openfile']] # depends on [control=['for'], data=['datatype']] # depends on [control=['with'], data=['msr']] # depends on [control=['for'], data=['specfile']] |
def _get_balance(self, account_number):
"""Get current balance from Fido."""
# Prepare data
data = {"ctn": self.username,
"language": "en-US",
"accountNumber": account_number}
# Http request
try:
raw_res = yield from self._session.post(BALANCE_URL,
data=data,
headers=self._headers,
timeout=self._timeout)
except OSError:
raise PyFidoError("Can not get balance")
# Get balance
try:
json_content = yield from raw_res.json()
balance_str = json_content\
.get("getAccountInfo", {})\
.get("balance")
except (OSError, ValueError):
raise PyFidoError("Can not get balance as json")
if balance_str is None:
raise PyFidoError("Can not get balance")
# Casting to float
try:
balance = float(balance_str)
except ValueError:
raise PyFidoError("Can not get balance as float")
return balance | def function[_get_balance, parameter[self, account_number]]:
constant[Get current balance from Fido.]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da20e748580>, <ast.Constant object at 0x7da20e7485e0>, <ast.Constant object at 0x7da20e74b850>], [<ast.Attribute object at 0x7da20e748640>, <ast.Constant object at 0x7da20e74b670>, <ast.Name object at 0x7da20e749ab0>]]
<ast.Try object at 0x7da20e74b370>
<ast.Try object at 0x7da20e748760>
if compare[name[balance_str] is constant[None]] begin[:]
<ast.Raise object at 0x7da18bc73d90>
<ast.Try object at 0x7da18bc71000>
return[name[balance]] | keyword[def] identifier[_get_balance] ( identifier[self] , identifier[account_number] ):
literal[string]
identifier[data] ={ literal[string] : identifier[self] . identifier[username] ,
literal[string] : literal[string] ,
literal[string] : identifier[account_number] }
keyword[try] :
identifier[raw_res] = keyword[yield] keyword[from] identifier[self] . identifier[_session] . identifier[post] ( identifier[BALANCE_URL] ,
identifier[data] = identifier[data] ,
identifier[headers] = identifier[self] . identifier[_headers] ,
identifier[timeout] = identifier[self] . identifier[_timeout] )
keyword[except] identifier[OSError] :
keyword[raise] identifier[PyFidoError] ( literal[string] )
keyword[try] :
identifier[json_content] = keyword[yield] keyword[from] identifier[raw_res] . identifier[json] ()
identifier[balance_str] = identifier[json_content] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] )
keyword[except] ( identifier[OSError] , identifier[ValueError] ):
keyword[raise] identifier[PyFidoError] ( literal[string] )
keyword[if] identifier[balance_str] keyword[is] keyword[None] :
keyword[raise] identifier[PyFidoError] ( literal[string] )
keyword[try] :
identifier[balance] = identifier[float] ( identifier[balance_str] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[PyFidoError] ( literal[string] )
keyword[return] identifier[balance] | def _get_balance(self, account_number):
"""Get current balance from Fido."""
# Prepare data
data = {'ctn': self.username, 'language': 'en-US', 'accountNumber': account_number}
# Http request
try:
raw_res = (yield from self._session.post(BALANCE_URL, data=data, headers=self._headers, timeout=self._timeout)) # depends on [control=['try'], data=[]]
except OSError:
raise PyFidoError('Can not get balance') # depends on [control=['except'], data=[]]
# Get balance
try:
json_content = (yield from raw_res.json())
balance_str = json_content.get('getAccountInfo', {}).get('balance') # depends on [control=['try'], data=[]]
except (OSError, ValueError):
raise PyFidoError('Can not get balance as json') # depends on [control=['except'], data=[]]
if balance_str is None:
raise PyFidoError('Can not get balance') # depends on [control=['if'], data=[]]
# Casting to float
try:
balance = float(balance_str) # depends on [control=['try'], data=[]]
except ValueError:
raise PyFidoError('Can not get balance as float') # depends on [control=['except'], data=[]]
return balance |
def get_object_example(self, def_name):
'''
Create example for response, from object structure
:param def_name: --deffinition name of structure
:type def_name: str, unicode
:return: example of object
:rtype: dict
'''
def_model = self.definitions[def_name]
example = dict()
for opt_name, opt_value in def_model.get('properties', dict()).items():
var_type = opt_value.get('format', None) or opt_value.get('type', None)
example[opt_name] = self.get_response_example(opt_name, var_type, opt_value)
if var_type == 'string':
example[opt_name] = example[opt_name].format(opt_name)
return example | def function[get_object_example, parameter[self, def_name]]:
constant[
Create example for response, from object structure
:param def_name: --deffinition name of structure
:type def_name: str, unicode
:return: example of object
:rtype: dict
]
variable[def_model] assign[=] call[name[self].definitions][name[def_name]]
variable[example] assign[=] call[name[dict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b0406140>, <ast.Name object at 0x7da1b04061d0>]]] in starred[call[call[name[def_model].get, parameter[constant[properties], call[name[dict], parameter[]]]].items, parameter[]]] begin[:]
variable[var_type] assign[=] <ast.BoolOp object at 0x7da1b0405690>
call[name[example]][name[opt_name]] assign[=] call[name[self].get_response_example, parameter[name[opt_name], name[var_type], name[opt_value]]]
if compare[name[var_type] equal[==] constant[string]] begin[:]
call[name[example]][name[opt_name]] assign[=] call[call[name[example]][name[opt_name]].format, parameter[name[opt_name]]]
return[name[example]] | keyword[def] identifier[get_object_example] ( identifier[self] , identifier[def_name] ):
literal[string]
identifier[def_model] = identifier[self] . identifier[definitions] [ identifier[def_name] ]
identifier[example] = identifier[dict] ()
keyword[for] identifier[opt_name] , identifier[opt_value] keyword[in] identifier[def_model] . identifier[get] ( literal[string] , identifier[dict] ()). identifier[items] ():
identifier[var_type] = identifier[opt_value] . identifier[get] ( literal[string] , keyword[None] ) keyword[or] identifier[opt_value] . identifier[get] ( literal[string] , keyword[None] )
identifier[example] [ identifier[opt_name] ]= identifier[self] . identifier[get_response_example] ( identifier[opt_name] , identifier[var_type] , identifier[opt_value] )
keyword[if] identifier[var_type] == literal[string] :
identifier[example] [ identifier[opt_name] ]= identifier[example] [ identifier[opt_name] ]. identifier[format] ( identifier[opt_name] )
keyword[return] identifier[example] | def get_object_example(self, def_name):
"""
Create example for response, from object structure
:param def_name: --deffinition name of structure
:type def_name: str, unicode
:return: example of object
:rtype: dict
"""
def_model = self.definitions[def_name]
example = dict()
for (opt_name, opt_value) in def_model.get('properties', dict()).items():
var_type = opt_value.get('format', None) or opt_value.get('type', None)
example[opt_name] = self.get_response_example(opt_name, var_type, opt_value)
if var_type == 'string':
example[opt_name] = example[opt_name].format(opt_name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return example |
def rwh_primes1(n):
# http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188
''' Returns a list of primes < n '''
sieve = [True] * (n/2)
for i in _range(3,int(n**0.5)+1,2):
if sieve[i/2]:
sieve[i*i/2::i] = [False] * ((n-i*i-1)/(2*i)+1)
return [2] + [2*i+1 for i in _range(1,n/2) if sieve[i]] | def function[rwh_primes1, parameter[n]]:
constant[ Returns a list of primes < n ]
variable[sieve] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18f7218a0>]] * binary_operation[name[n] / constant[2]]]
for taget[name[i]] in starred[call[name[_range], parameter[constant[3], binary_operation[call[name[int], parameter[binary_operation[name[n] ** constant[0.5]]]] + constant[1]], constant[2]]]] begin[:]
if call[name[sieve]][binary_operation[name[i] / constant[2]]] begin[:]
call[name[sieve]][<ast.Slice object at 0x7da18f7207c0>] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18f7235e0>]] * binary_operation[binary_operation[binary_operation[binary_operation[name[n] - binary_operation[name[i] * name[i]]] - constant[1]] / binary_operation[constant[2] * name[i]]] + constant[1]]]
return[binary_operation[list[[<ast.Constant object at 0x7da18f7232b0>]] + <ast.ListComp object at 0x7da18f720610>]] | keyword[def] identifier[rwh_primes1] ( identifier[n] ):
literal[string]
identifier[sieve] =[ keyword[True] ]*( identifier[n] / literal[int] )
keyword[for] identifier[i] keyword[in] identifier[_range] ( literal[int] , identifier[int] ( identifier[n] ** literal[int] )+ literal[int] , literal[int] ):
keyword[if] identifier[sieve] [ identifier[i] / literal[int] ]:
identifier[sieve] [ identifier[i] * identifier[i] / literal[int] :: identifier[i] ]=[ keyword[False] ]*(( identifier[n] - identifier[i] * identifier[i] - literal[int] )/( literal[int] * identifier[i] )+ literal[int] )
keyword[return] [ literal[int] ]+[ literal[int] * identifier[i] + literal[int] keyword[for] identifier[i] keyword[in] identifier[_range] ( literal[int] , identifier[n] / literal[int] ) keyword[if] identifier[sieve] [ identifier[i] ]] | def rwh_primes1(n):
# http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188
' Returns a list of primes < n '
sieve = [True] * (n / 2)
for i in _range(3, int(n ** 0.5) + 1, 2):
if sieve[i / 2]:
sieve[i * i / 2::i] = [False] * ((n - i * i - 1) / (2 * i) + 1) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return [2] + [2 * i + 1 for i in _range(1, n / 2) if sieve[i]] |
def stripQuotes(value):
"""Strip single or double quotes off string; remove embedded quote pairs"""
if value[:1] == '"':
value = value[1:]
if value[-1:] == '"':
value = value[:-1]
# replace "" with "
value = re.sub(_re_doubleq2, '"', value)
elif value[:1] == "'":
value = value[1:]
if value[-1:] == "'":
value = value[:-1]
# replace '' with '
value = re.sub(_re_singleq2, "'", value)
return value | def function[stripQuotes, parameter[value]]:
constant[Strip single or double quotes off string; remove embedded quote pairs]
if compare[call[name[value]][<ast.Slice object at 0x7da1b0f057e0>] equal[==] constant["]] begin[:]
variable[value] assign[=] call[name[value]][<ast.Slice object at 0x7da1b0f05510>]
if compare[call[name[value]][<ast.Slice object at 0x7da1b0f05e40>] equal[==] constant["]] begin[:]
variable[value] assign[=] call[name[value]][<ast.Slice object at 0x7da1b0f06320>]
variable[value] assign[=] call[name[re].sub, parameter[name[_re_doubleq2], constant["], name[value]]]
return[name[value]] | keyword[def] identifier[stripQuotes] ( identifier[value] ):
literal[string]
keyword[if] identifier[value] [: literal[int] ]== literal[string] :
identifier[value] = identifier[value] [ literal[int] :]
keyword[if] identifier[value] [- literal[int] :]== literal[string] :
identifier[value] = identifier[value] [:- literal[int] ]
identifier[value] = identifier[re] . identifier[sub] ( identifier[_re_doubleq2] , literal[string] , identifier[value] )
keyword[elif] identifier[value] [: literal[int] ]== literal[string] :
identifier[value] = identifier[value] [ literal[int] :]
keyword[if] identifier[value] [- literal[int] :]== literal[string] :
identifier[value] = identifier[value] [:- literal[int] ]
identifier[value] = identifier[re] . identifier[sub] ( identifier[_re_singleq2] , literal[string] , identifier[value] )
keyword[return] identifier[value] | def stripQuotes(value):
"""Strip single or double quotes off string; remove embedded quote pairs"""
if value[:1] == '"':
value = value[1:]
if value[-1:] == '"':
value = value[:-1] # depends on [control=['if'], data=[]]
# replace "" with "
value = re.sub(_re_doubleq2, '"', value) # depends on [control=['if'], data=[]]
elif value[:1] == "'":
value = value[1:]
if value[-1:] == "'":
value = value[:-1] # depends on [control=['if'], data=[]]
# replace '' with '
value = re.sub(_re_singleq2, "'", value) # depends on [control=['if'], data=[]]
return value |
def get_bars(self, assets, data_frequency, bar_count=500):
'''
Interface method.
Return: pd.Dataframe() with columns MultiIndex [asset -> OHLCV]
'''
assets_is_scalar = not isinstance(assets, (list, set, tuple))
is_daily = 'd' in data_frequency # 'daily' or '1d'
if assets_is_scalar:
symbols = [assets.symbol]
else:
symbols = [asset.symbol for asset in assets]
symbol_bars = self._symbol_bars(
symbols, 'day' if is_daily else 'minute', limit=bar_count)
if is_daily:
intra_bars = {}
symbol_bars_minute = self._symbol_bars(
symbols, 'minute', limit=1000)
for symbol, df in symbol_bars_minute.items():
agged = df.resample('1D').agg(dict(
open='first',
high='max',
low='min',
close='last',
volume='sum',
)).dropna()
intra_bars[symbol] = agged
dfs = []
for asset in assets if not assets_is_scalar else [assets]:
symbol = asset.symbol
df = symbol_bars.get(symbol)
if df is None:
dfs.append(pd.DataFrame(
[], columns=[
'open', 'high', 'low', 'close', 'volume']
))
continue
if is_daily:
agged = intra_bars.get(symbol)
if agged is not None and len(
agged.index) > 0 and agged.index[-1] not in df.index:
if not (agged.index[-1] > df.index[-1]):
log.warn(
('agged.index[-1] = {}, df.index[-1] = {} '
'for {}').format(
agged.index[-1], df.index[-1], symbol))
df = df.append(agged.iloc[-1])
df.columns = pd.MultiIndex.from_product([[asset, ], df.columns])
dfs.append(df)
return pd.concat(dfs, axis=1) | def function[get_bars, parameter[self, assets, data_frequency, bar_count]]:
constant[
Interface method.
Return: pd.Dataframe() with columns MultiIndex [asset -> OHLCV]
]
variable[assets_is_scalar] assign[=] <ast.UnaryOp object at 0x7da2054a6230>
variable[is_daily] assign[=] compare[constant[d] in name[data_frequency]]
if name[assets_is_scalar] begin[:]
variable[symbols] assign[=] list[[<ast.Attribute object at 0x7da2054a5840>]]
variable[symbol_bars] assign[=] call[name[self]._symbol_bars, parameter[name[symbols], <ast.IfExp object at 0x7da2054a7ee0>]]
if name[is_daily] begin[:]
variable[intra_bars] assign[=] dictionary[[], []]
variable[symbol_bars_minute] assign[=] call[name[self]._symbol_bars, parameter[name[symbols], constant[minute]]]
for taget[tuple[[<ast.Name object at 0x7da204621db0>, <ast.Name object at 0x7da204623700>]]] in starred[call[name[symbol_bars_minute].items, parameter[]]] begin[:]
variable[agged] assign[=] call[call[call[name[df].resample, parameter[constant[1D]]].agg, parameter[call[name[dict], parameter[]]]].dropna, parameter[]]
call[name[intra_bars]][name[symbol]] assign[=] name[agged]
variable[dfs] assign[=] list[[]]
for taget[name[asset]] in starred[<ast.IfExp object at 0x7da204622ef0>] begin[:]
variable[symbol] assign[=] name[asset].symbol
variable[df] assign[=] call[name[symbol_bars].get, parameter[name[symbol]]]
if compare[name[df] is constant[None]] begin[:]
call[name[dfs].append, parameter[call[name[pd].DataFrame, parameter[list[[]]]]]]
continue
if name[is_daily] begin[:]
variable[agged] assign[=] call[name[intra_bars].get, parameter[name[symbol]]]
if <ast.BoolOp object at 0x7da1b21d7dc0> begin[:]
if <ast.UnaryOp object at 0x7da2054a7d90> begin[:]
call[name[log].warn, parameter[call[constant[agged.index[-1] = {}, df.index[-1] = {} for {}].format, parameter[call[name[agged].index][<ast.UnaryOp object at 0x7da2054a7fd0>], call[name[df].index][<ast.UnaryOp object at 0x7da2054a67a0>], name[symbol]]]]]
variable[df] assign[=] call[name[df].append, parameter[call[name[agged].iloc][<ast.UnaryOp object at 0x7da2054a47c0>]]]
name[df].columns assign[=] call[name[pd].MultiIndex.from_product, parameter[list[[<ast.List object at 0x7da2054a7190>, <ast.Attribute object at 0x7da2054a57e0>]]]]
call[name[dfs].append, parameter[name[df]]]
return[call[name[pd].concat, parameter[name[dfs]]]] | keyword[def] identifier[get_bars] ( identifier[self] , identifier[assets] , identifier[data_frequency] , identifier[bar_count] = literal[int] ):
literal[string]
identifier[assets_is_scalar] = keyword[not] identifier[isinstance] ( identifier[assets] ,( identifier[list] , identifier[set] , identifier[tuple] ))
identifier[is_daily] = literal[string] keyword[in] identifier[data_frequency]
keyword[if] identifier[assets_is_scalar] :
identifier[symbols] =[ identifier[assets] . identifier[symbol] ]
keyword[else] :
identifier[symbols] =[ identifier[asset] . identifier[symbol] keyword[for] identifier[asset] keyword[in] identifier[assets] ]
identifier[symbol_bars] = identifier[self] . identifier[_symbol_bars] (
identifier[symbols] , literal[string] keyword[if] identifier[is_daily] keyword[else] literal[string] , identifier[limit] = identifier[bar_count] )
keyword[if] identifier[is_daily] :
identifier[intra_bars] ={}
identifier[symbol_bars_minute] = identifier[self] . identifier[_symbol_bars] (
identifier[symbols] , literal[string] , identifier[limit] = literal[int] )
keyword[for] identifier[symbol] , identifier[df] keyword[in] identifier[symbol_bars_minute] . identifier[items] ():
identifier[agged] = identifier[df] . identifier[resample] ( literal[string] ). identifier[agg] ( identifier[dict] (
identifier[open] = literal[string] ,
identifier[high] = literal[string] ,
identifier[low] = literal[string] ,
identifier[close] = literal[string] ,
identifier[volume] = literal[string] ,
)). identifier[dropna] ()
identifier[intra_bars] [ identifier[symbol] ]= identifier[agged]
identifier[dfs] =[]
keyword[for] identifier[asset] keyword[in] identifier[assets] keyword[if] keyword[not] identifier[assets_is_scalar] keyword[else] [ identifier[assets] ]:
identifier[symbol] = identifier[asset] . identifier[symbol]
identifier[df] = identifier[symbol_bars] . identifier[get] ( identifier[symbol] )
keyword[if] identifier[df] keyword[is] keyword[None] :
identifier[dfs] . identifier[append] ( identifier[pd] . identifier[DataFrame] (
[], identifier[columns] =[
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
))
keyword[continue]
keyword[if] identifier[is_daily] :
identifier[agged] = identifier[intra_bars] . identifier[get] ( identifier[symbol] )
keyword[if] identifier[agged] keyword[is] keyword[not] keyword[None] keyword[and] identifier[len] (
identifier[agged] . identifier[index] )> literal[int] keyword[and] identifier[agged] . identifier[index] [- literal[int] ] keyword[not] keyword[in] identifier[df] . identifier[index] :
keyword[if] keyword[not] ( identifier[agged] . identifier[index] [- literal[int] ]> identifier[df] . identifier[index] [- literal[int] ]):
identifier[log] . identifier[warn] (
( literal[string]
literal[string] ). identifier[format] (
identifier[agged] . identifier[index] [- literal[int] ], identifier[df] . identifier[index] [- literal[int] ], identifier[symbol] ))
identifier[df] = identifier[df] . identifier[append] ( identifier[agged] . identifier[iloc] [- literal[int] ])
identifier[df] . identifier[columns] = identifier[pd] . identifier[MultiIndex] . identifier[from_product] ([[ identifier[asset] ,], identifier[df] . identifier[columns] ])
identifier[dfs] . identifier[append] ( identifier[df] )
keyword[return] identifier[pd] . identifier[concat] ( identifier[dfs] , identifier[axis] = literal[int] ) | def get_bars(self, assets, data_frequency, bar_count=500):
"""
Interface method.
Return: pd.Dataframe() with columns MultiIndex [asset -> OHLCV]
"""
assets_is_scalar = not isinstance(assets, (list, set, tuple))
is_daily = 'd' in data_frequency # 'daily' or '1d'
if assets_is_scalar:
symbols = [assets.symbol] # depends on [control=['if'], data=[]]
else:
symbols = [asset.symbol for asset in assets]
symbol_bars = self._symbol_bars(symbols, 'day' if is_daily else 'minute', limit=bar_count)
if is_daily:
intra_bars = {}
symbol_bars_minute = self._symbol_bars(symbols, 'minute', limit=1000)
for (symbol, df) in symbol_bars_minute.items():
agged = df.resample('1D').agg(dict(open='first', high='max', low='min', close='last', volume='sum')).dropna()
intra_bars[symbol] = agged # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
dfs = []
for asset in assets if not assets_is_scalar else [assets]:
symbol = asset.symbol
df = symbol_bars.get(symbol)
if df is None:
dfs.append(pd.DataFrame([], columns=['open', 'high', 'low', 'close', 'volume']))
continue # depends on [control=['if'], data=[]]
if is_daily:
agged = intra_bars.get(symbol)
if agged is not None and len(agged.index) > 0 and (agged.index[-1] not in df.index):
if not agged.index[-1] > df.index[-1]:
log.warn('agged.index[-1] = {}, df.index[-1] = {} for {}'.format(agged.index[-1], df.index[-1], symbol)) # depends on [control=['if'], data=[]]
df = df.append(agged.iloc[-1]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
df.columns = pd.MultiIndex.from_product([[asset], df.columns])
dfs.append(df) # depends on [control=['for'], data=['asset']]
return pd.concat(dfs, axis=1) |
def unmount_medium(self, name, controller_port, device, force):
"""Unmounts any currently mounted medium (:py:class:`IMedium` ,
identified by the given UUID @a id) to the given storage controller
(:py:class:`IStorageController` , identified by @a name),
at the indicated port and device. The device must already exist;
This method is intended only for managing removable media, where the
device is fixed but media is changeable at runtime (such as DVDs
and floppies). It cannot be used for fixed media such as hard disks.
The @a controllerPort and @a device parameters specify the device slot
and have have the same meaning as with
:py:func:`IMachine.attach_device` .
The specified device slot must have a medium mounted, which will be
unmounted. If there is no mounted medium it will do nothing.
See :py:class:`IMedium` for more detailed information about
attaching/unmounting media.
in name of type str
Name of the storage controller to unmount the medium from.
in controller_port of type int
Port to unmount the medium from.
in device of type int
Device slot in the given port to unmount the medium from.
in force of type bool
Allows to force unmount of a medium which is locked by
the device slot in the given port medium is attached to.
raises :class:`OleErrorInvalidarg`
SATA device, SATA port, IDE port or IDE slot out of range.
raises :class:`VBoxErrorInvalidObjectState`
Attempt to unmount medium that is not removable - not DVD or floppy.
raises :class:`VBoxErrorInvalidVmState`
Invalid machine state.
raises :class:`VBoxErrorObjectInUse`
Medium already attached to this or another virtual machine.
raises :class:`VBoxErrorObjectNotFound`
Medium not attached to specified port, device, controller.
"""
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
if not isinstance(controller_port, baseinteger):
raise TypeError("controller_port can only be an instance of type baseinteger")
if not isinstance(device, baseinteger):
raise TypeError("device can only be an instance of type baseinteger")
if not isinstance(force, bool):
raise TypeError("force can only be an instance of type bool")
self._call("unmountMedium",
in_p=[name, controller_port, device, force]) | def function[unmount_medium, parameter[self, name, controller_port, device, force]]:
constant[Unmounts any currently mounted medium (:py:class:`IMedium` ,
identified by the given UUID @a id) to the given storage controller
(:py:class:`IStorageController` , identified by @a name),
at the indicated port and device. The device must already exist;
This method is intended only for managing removable media, where the
device is fixed but media is changeable at runtime (such as DVDs
and floppies). It cannot be used for fixed media such as hard disks.
The @a controllerPort and @a device parameters specify the device slot
and have have the same meaning as with
:py:func:`IMachine.attach_device` .
The specified device slot must have a medium mounted, which will be
unmounted. If there is no mounted medium it will do nothing.
See :py:class:`IMedium` for more detailed information about
attaching/unmounting media.
in name of type str
Name of the storage controller to unmount the medium from.
in controller_port of type int
Port to unmount the medium from.
in device of type int
Device slot in the given port to unmount the medium from.
in force of type bool
Allows to force unmount of a medium which is locked by
the device slot in the given port medium is attached to.
raises :class:`OleErrorInvalidarg`
SATA device, SATA port, IDE port or IDE slot out of range.
raises :class:`VBoxErrorInvalidObjectState`
Attempt to unmount medium that is not removable - not DVD or floppy.
raises :class:`VBoxErrorInvalidVmState`
Invalid machine state.
raises :class:`VBoxErrorObjectInUse`
Medium already attached to this or another virtual machine.
raises :class:`VBoxErrorObjectNotFound`
Medium not attached to specified port, device, controller.
]
if <ast.UnaryOp object at 0x7da2047ead10> begin[:]
<ast.Raise object at 0x7da2047eabf0>
if <ast.UnaryOp object at 0x7da2047e80a0> begin[:]
<ast.Raise object at 0x7da2047eae60>
if <ast.UnaryOp object at 0x7da18eb57130> begin[:]
<ast.Raise object at 0x7da18eb569b0>
if <ast.UnaryOp object at 0x7da18eb57760> begin[:]
<ast.Raise object at 0x7da18eb56590>
call[name[self]._call, parameter[constant[unmountMedium]]] | keyword[def] identifier[unmount_medium] ( identifier[self] , identifier[name] , identifier[controller_port] , identifier[device] , identifier[force] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[name] , identifier[basestring] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[controller_port] , identifier[baseinteger] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[device] , identifier[baseinteger] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[force] , identifier[bool] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[self] . identifier[_call] ( literal[string] ,
identifier[in_p] =[ identifier[name] , identifier[controller_port] , identifier[device] , identifier[force] ]) | def unmount_medium(self, name, controller_port, device, force):
"""Unmounts any currently mounted medium (:py:class:`IMedium` ,
identified by the given UUID @a id) to the given storage controller
(:py:class:`IStorageController` , identified by @a name),
at the indicated port and device. The device must already exist;
This method is intended only for managing removable media, where the
device is fixed but media is changeable at runtime (such as DVDs
and floppies). It cannot be used for fixed media such as hard disks.
The @a controllerPort and @a device parameters specify the device slot
and have have the same meaning as with
:py:func:`IMachine.attach_device` .
The specified device slot must have a medium mounted, which will be
unmounted. If there is no mounted medium it will do nothing.
See :py:class:`IMedium` for more detailed information about
attaching/unmounting media.
in name of type str
Name of the storage controller to unmount the medium from.
in controller_port of type int
Port to unmount the medium from.
in device of type int
Device slot in the given port to unmount the medium from.
in force of type bool
Allows to force unmount of a medium which is locked by
the device slot in the given port medium is attached to.
raises :class:`OleErrorInvalidarg`
SATA device, SATA port, IDE port or IDE slot out of range.
raises :class:`VBoxErrorInvalidObjectState`
Attempt to unmount medium that is not removable - not DVD or floppy.
raises :class:`VBoxErrorInvalidVmState`
Invalid machine state.
raises :class:`VBoxErrorObjectInUse`
Medium already attached to this or another virtual machine.
raises :class:`VBoxErrorObjectNotFound`
Medium not attached to specified port, device, controller.
"""
if not isinstance(name, basestring):
raise TypeError('name can only be an instance of type basestring') # depends on [control=['if'], data=[]]
if not isinstance(controller_port, baseinteger):
raise TypeError('controller_port can only be an instance of type baseinteger') # depends on [control=['if'], data=[]]
if not isinstance(device, baseinteger):
raise TypeError('device can only be an instance of type baseinteger') # depends on [control=['if'], data=[]]
if not isinstance(force, bool):
raise TypeError('force can only be an instance of type bool') # depends on [control=['if'], data=[]]
self._call('unmountMedium', in_p=[name, controller_port, device, force]) |
def cli(env, identifier):
"""Cancel a subnet."""
mgr = SoftLayer.NetworkManager(env.client)
subnet_id = helpers.resolve_id(mgr.resolve_subnet_ids, identifier,
name='subnet')
if not (env.skip_confirmations or formatting.no_going_back(subnet_id)):
raise exceptions.CLIAbort('Aborted')
mgr.cancel_subnet(subnet_id) | def function[cli, parameter[env, identifier]]:
constant[Cancel a subnet.]
variable[mgr] assign[=] call[name[SoftLayer].NetworkManager, parameter[name[env].client]]
variable[subnet_id] assign[=] call[name[helpers].resolve_id, parameter[name[mgr].resolve_subnet_ids, name[identifier]]]
if <ast.UnaryOp object at 0x7da18c4cf8b0> begin[:]
<ast.Raise object at 0x7da18c4cdde0>
call[name[mgr].cancel_subnet, parameter[name[subnet_id]]] | keyword[def] identifier[cli] ( identifier[env] , identifier[identifier] ):
literal[string]
identifier[mgr] = identifier[SoftLayer] . identifier[NetworkManager] ( identifier[env] . identifier[client] )
identifier[subnet_id] = identifier[helpers] . identifier[resolve_id] ( identifier[mgr] . identifier[resolve_subnet_ids] , identifier[identifier] ,
identifier[name] = literal[string] )
keyword[if] keyword[not] ( identifier[env] . identifier[skip_confirmations] keyword[or] identifier[formatting] . identifier[no_going_back] ( identifier[subnet_id] )):
keyword[raise] identifier[exceptions] . identifier[CLIAbort] ( literal[string] )
identifier[mgr] . identifier[cancel_subnet] ( identifier[subnet_id] ) | def cli(env, identifier):
"""Cancel a subnet."""
mgr = SoftLayer.NetworkManager(env.client)
subnet_id = helpers.resolve_id(mgr.resolve_subnet_ids, identifier, name='subnet')
if not (env.skip_confirmations or formatting.no_going_back(subnet_id)):
raise exceptions.CLIAbort('Aborted') # depends on [control=['if'], data=[]]
mgr.cancel_subnet(subnet_id) |
def canonicalize(self, mol):
"""Return a canonical tautomer by enumerating and scoring all possible tautomers.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:return: The canonical tautomer.
:rtype: rdkit.Chem.rdchem.Mol
"""
# TODO: Overload the mol parameter to pass a list of pre-enumerated tautomers
tautomers = self._enumerate_tautomers(mol)
if len(tautomers) == 1:
return tautomers[0]
# Calculate score for each tautomer
highest = None
for t in tautomers:
smiles = Chem.MolToSmiles(t, isomericSmiles=True)
log.debug('Tautomer: %s', smiles)
score = 0
# Add aromatic ring scores
ssr = Chem.GetSymmSSSR(t)
for ring in ssr:
btypes = {t.GetBondBetweenAtoms(*pair).GetBondType() for pair in pairwise(ring)}
elements = {t.GetAtomWithIdx(idx).GetAtomicNum() for idx in ring}
if btypes == {BondType.AROMATIC}:
log.debug('Score +100 (aromatic ring)')
score += 100
if elements == {6}:
log.debug('Score +150 (carbocyclic aromatic ring)')
score += 150
# Add SMARTS scores
for tscore in self.scores:
for match in t.GetSubstructMatches(tscore.smarts):
log.debug('Score %+d (%s)', tscore.score, tscore.name)
score += tscore.score
# Add (P,S,Se,Te)-H scores
for atom in t.GetAtoms():
if atom.GetAtomicNum() in {15, 16, 34, 52}:
hs = atom.GetTotalNumHs()
if hs:
log.debug('Score %+d (%s-H bonds)', -hs, atom.GetSymbol())
score -= hs
# Set as highest if score higher or if score equal and smiles comes first alphabetically
if not highest or highest['score'] < score or (highest['score'] == score and smiles < highest['smiles']):
log.debug('New highest tautomer: %s (%s)', smiles, score)
highest = {'smiles': smiles, 'tautomer': t, 'score': score}
return highest['tautomer'] | def function[canonicalize, parameter[self, mol]]:
constant[Return a canonical tautomer by enumerating and scoring all possible tautomers.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:return: The canonical tautomer.
:rtype: rdkit.Chem.rdchem.Mol
]
variable[tautomers] assign[=] call[name[self]._enumerate_tautomers, parameter[name[mol]]]
if compare[call[name[len], parameter[name[tautomers]]] equal[==] constant[1]] begin[:]
return[call[name[tautomers]][constant[0]]]
variable[highest] assign[=] constant[None]
for taget[name[t]] in starred[name[tautomers]] begin[:]
variable[smiles] assign[=] call[name[Chem].MolToSmiles, parameter[name[t]]]
call[name[log].debug, parameter[constant[Tautomer: %s], name[smiles]]]
variable[score] assign[=] constant[0]
variable[ssr] assign[=] call[name[Chem].GetSymmSSSR, parameter[name[t]]]
for taget[name[ring]] in starred[name[ssr]] begin[:]
variable[btypes] assign[=] <ast.SetComp object at 0x7da1b0389c00>
variable[elements] assign[=] <ast.SetComp object at 0x7da1b03ba140>
if compare[name[btypes] equal[==] <ast.Set object at 0x7da1b03ba800>] begin[:]
call[name[log].debug, parameter[constant[Score +100 (aromatic ring)]]]
<ast.AugAssign object at 0x7da1b03bb9d0>
if compare[name[elements] equal[==] <ast.Set object at 0x7da1b03ba080>] begin[:]
call[name[log].debug, parameter[constant[Score +150 (carbocyclic aromatic ring)]]]
<ast.AugAssign object at 0x7da1b03ba7a0>
for taget[name[tscore]] in starred[name[self].scores] begin[:]
for taget[name[match]] in starred[call[name[t].GetSubstructMatches, parameter[name[tscore].smarts]]] begin[:]
call[name[log].debug, parameter[constant[Score %+d (%s)], name[tscore].score, name[tscore].name]]
<ast.AugAssign object at 0x7da1b03b81c0>
for taget[name[atom]] in starred[call[name[t].GetAtoms, parameter[]]] begin[:]
if compare[call[name[atom].GetAtomicNum, parameter[]] in <ast.Set object at 0x7da1b03b8430>] begin[:]
variable[hs] assign[=] call[name[atom].GetTotalNumHs, parameter[]]
if name[hs] begin[:]
call[name[log].debug, parameter[constant[Score %+d (%s-H bonds)], <ast.UnaryOp object at 0x7da1b03885b0>, call[name[atom].GetSymbol, parameter[]]]]
<ast.AugAssign object at 0x7da1b038b5b0>
if <ast.BoolOp object at 0x7da1b03883a0> begin[:]
call[name[log].debug, parameter[constant[New highest tautomer: %s (%s)], name[smiles], name[score]]]
variable[highest] assign[=] dictionary[[<ast.Constant object at 0x7da1b038b940>, <ast.Constant object at 0x7da1b038b970>, <ast.Constant object at 0x7da1b038ab60>], [<ast.Name object at 0x7da1b038b100>, <ast.Name object at 0x7da1b0389ae0>, <ast.Name object at 0x7da1b0388a30>]]
return[call[name[highest]][constant[tautomer]]] | keyword[def] identifier[canonicalize] ( identifier[self] , identifier[mol] ):
literal[string]
identifier[tautomers] = identifier[self] . identifier[_enumerate_tautomers] ( identifier[mol] )
keyword[if] identifier[len] ( identifier[tautomers] )== literal[int] :
keyword[return] identifier[tautomers] [ literal[int] ]
identifier[highest] = keyword[None]
keyword[for] identifier[t] keyword[in] identifier[tautomers] :
identifier[smiles] = identifier[Chem] . identifier[MolToSmiles] ( identifier[t] , identifier[isomericSmiles] = keyword[True] )
identifier[log] . identifier[debug] ( literal[string] , identifier[smiles] )
identifier[score] = literal[int]
identifier[ssr] = identifier[Chem] . identifier[GetSymmSSSR] ( identifier[t] )
keyword[for] identifier[ring] keyword[in] identifier[ssr] :
identifier[btypes] ={ identifier[t] . identifier[GetBondBetweenAtoms] (* identifier[pair] ). identifier[GetBondType] () keyword[for] identifier[pair] keyword[in] identifier[pairwise] ( identifier[ring] )}
identifier[elements] ={ identifier[t] . identifier[GetAtomWithIdx] ( identifier[idx] ). identifier[GetAtomicNum] () keyword[for] identifier[idx] keyword[in] identifier[ring] }
keyword[if] identifier[btypes] =={ identifier[BondType] . identifier[AROMATIC] }:
identifier[log] . identifier[debug] ( literal[string] )
identifier[score] += literal[int]
keyword[if] identifier[elements] =={ literal[int] }:
identifier[log] . identifier[debug] ( literal[string] )
identifier[score] += literal[int]
keyword[for] identifier[tscore] keyword[in] identifier[self] . identifier[scores] :
keyword[for] identifier[match] keyword[in] identifier[t] . identifier[GetSubstructMatches] ( identifier[tscore] . identifier[smarts] ):
identifier[log] . identifier[debug] ( literal[string] , identifier[tscore] . identifier[score] , identifier[tscore] . identifier[name] )
identifier[score] += identifier[tscore] . identifier[score]
keyword[for] identifier[atom] keyword[in] identifier[t] . identifier[GetAtoms] ():
keyword[if] identifier[atom] . identifier[GetAtomicNum] () keyword[in] { literal[int] , literal[int] , literal[int] , literal[int] }:
identifier[hs] = identifier[atom] . identifier[GetTotalNumHs] ()
keyword[if] identifier[hs] :
identifier[log] . identifier[debug] ( literal[string] ,- identifier[hs] , identifier[atom] . identifier[GetSymbol] ())
identifier[score] -= identifier[hs]
keyword[if] keyword[not] identifier[highest] keyword[or] identifier[highest] [ literal[string] ]< identifier[score] keyword[or] ( identifier[highest] [ literal[string] ]== identifier[score] keyword[and] identifier[smiles] < identifier[highest] [ literal[string] ]):
identifier[log] . identifier[debug] ( literal[string] , identifier[smiles] , identifier[score] )
identifier[highest] ={ literal[string] : identifier[smiles] , literal[string] : identifier[t] , literal[string] : identifier[score] }
keyword[return] identifier[highest] [ literal[string] ] | def canonicalize(self, mol):
"""Return a canonical tautomer by enumerating and scoring all possible tautomers.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:return: The canonical tautomer.
:rtype: rdkit.Chem.rdchem.Mol
"""
# TODO: Overload the mol parameter to pass a list of pre-enumerated tautomers
tautomers = self._enumerate_tautomers(mol)
if len(tautomers) == 1:
return tautomers[0] # depends on [control=['if'], data=[]]
# Calculate score for each tautomer
highest = None
for t in tautomers:
smiles = Chem.MolToSmiles(t, isomericSmiles=True)
log.debug('Tautomer: %s', smiles)
score = 0
# Add aromatic ring scores
ssr = Chem.GetSymmSSSR(t)
for ring in ssr:
btypes = {t.GetBondBetweenAtoms(*pair).GetBondType() for pair in pairwise(ring)}
elements = {t.GetAtomWithIdx(idx).GetAtomicNum() for idx in ring}
if btypes == {BondType.AROMATIC}:
log.debug('Score +100 (aromatic ring)')
score += 100
if elements == {6}:
log.debug('Score +150 (carbocyclic aromatic ring)')
score += 150 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ring']]
# Add SMARTS scores
for tscore in self.scores:
for match in t.GetSubstructMatches(tscore.smarts):
log.debug('Score %+d (%s)', tscore.score, tscore.name)
score += tscore.score # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['tscore']]
# Add (P,S,Se,Te)-H scores
for atom in t.GetAtoms():
if atom.GetAtomicNum() in {15, 16, 34, 52}:
hs = atom.GetTotalNumHs()
if hs:
log.debug('Score %+d (%s-H bonds)', -hs, atom.GetSymbol())
score -= hs # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['atom']]
# Set as highest if score higher or if score equal and smiles comes first alphabetically
if not highest or highest['score'] < score or (highest['score'] == score and smiles < highest['smiles']):
log.debug('New highest tautomer: %s (%s)', smiles, score)
highest = {'smiles': smiles, 'tautomer': t, 'score': score} # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['t']]
return highest['tautomer'] |
def get_tunnel(self, tunnel_id):
"""Get information for a tunnel given its ID."""
method = 'GET'
endpoint = '/rest/v1/{}/tunnels/{}'.format(
self.client.sauce_username, tunnel_id)
return self.client.request(method, endpoint) | def function[get_tunnel, parameter[self, tunnel_id]]:
constant[Get information for a tunnel given its ID.]
variable[method] assign[=] constant[GET]
variable[endpoint] assign[=] call[constant[/rest/v1/{}/tunnels/{}].format, parameter[name[self].client.sauce_username, name[tunnel_id]]]
return[call[name[self].client.request, parameter[name[method], name[endpoint]]]] | keyword[def] identifier[get_tunnel] ( identifier[self] , identifier[tunnel_id] ):
literal[string]
identifier[method] = literal[string]
identifier[endpoint] = literal[string] . identifier[format] (
identifier[self] . identifier[client] . identifier[sauce_username] , identifier[tunnel_id] )
keyword[return] identifier[self] . identifier[client] . identifier[request] ( identifier[method] , identifier[endpoint] ) | def get_tunnel(self, tunnel_id):
"""Get information for a tunnel given its ID."""
method = 'GET'
endpoint = '/rest/v1/{}/tunnels/{}'.format(self.client.sauce_username, tunnel_id)
return self.client.request(method, endpoint) |
def enrollment_upload(
self,
enrollment_id,
audio_file,
):
"""
Upload Enrollment Data. Uses PUT to /enrollments/<enrollment_id> interface.
:Args:
* *enrollment_id*: (str) Enrollment's ID
* *audio_file*: (str) Path to the audio file of the recorded words. Not required for phone enrollments.
"""
files = {
"file": os.path.basename(audio_file),
os.path.basename(audio_file): open(audio_file, 'rb')
}
response = self._put(url.enrollments_id.format(id=enrollment_id), files=files)
self._check_response(response, 202) | def function[enrollment_upload, parameter[self, enrollment_id, audio_file]]:
constant[
Upload Enrollment Data. Uses PUT to /enrollments/<enrollment_id> interface.
:Args:
* *enrollment_id*: (str) Enrollment's ID
* *audio_file*: (str) Path to the audio file of the recorded words. Not required for phone enrollments.
]
variable[files] assign[=] dictionary[[<ast.Constant object at 0x7da207f983a0>, <ast.Call object at 0x7da204620250>], [<ast.Call object at 0x7da2046221a0>, <ast.Call object at 0x7da2046238b0>]]
variable[response] assign[=] call[name[self]._put, parameter[call[name[url].enrollments_id.format, parameter[]]]]
call[name[self]._check_response, parameter[name[response], constant[202]]] | keyword[def] identifier[enrollment_upload] (
identifier[self] ,
identifier[enrollment_id] ,
identifier[audio_file] ,
):
literal[string]
identifier[files] ={
literal[string] : identifier[os] . identifier[path] . identifier[basename] ( identifier[audio_file] ),
identifier[os] . identifier[path] . identifier[basename] ( identifier[audio_file] ): identifier[open] ( identifier[audio_file] , literal[string] )
}
identifier[response] = identifier[self] . identifier[_put] ( identifier[url] . identifier[enrollments_id] . identifier[format] ( identifier[id] = identifier[enrollment_id] ), identifier[files] = identifier[files] )
identifier[self] . identifier[_check_response] ( identifier[response] , literal[int] ) | def enrollment_upload(self, enrollment_id, audio_file):
"""
Upload Enrollment Data. Uses PUT to /enrollments/<enrollment_id> interface.
:Args:
* *enrollment_id*: (str) Enrollment's ID
* *audio_file*: (str) Path to the audio file of the recorded words. Not required for phone enrollments.
"""
files = {'file': os.path.basename(audio_file), os.path.basename(audio_file): open(audio_file, 'rb')}
response = self._put(url.enrollments_id.format(id=enrollment_id), files=files)
self._check_response(response, 202) |
def create_file_vdev(size, *vdevs):
'''
Creates file based virtual devices for a zpool
CLI Example:
.. code-block:: bash
salt '*' zpool.create_file_vdev 7G /path/to/vdev1 [/path/to/vdev2] [...]
.. note::
Depending on file size, the above command may take a while to return.
'''
ret = OrderedDict()
err = OrderedDict()
_mkfile_cmd = salt.utils.path.which('mkfile')
for vdev in vdevs:
if os.path.isfile(vdev):
ret[vdev] = 'existed'
else:
res = __salt__['cmd.run_all'](
'{mkfile} {size} {vdev}'.format(
mkfile=_mkfile_cmd,
size=size,
vdev=vdev,
),
python_shell=False,
)
if res['retcode'] != 0:
if 'stderr' in res and ':' in res['stderr']:
ret[vdev] = 'failed'
err[vdev] = ":".join(res['stderr'].strip().split(':')[1:])
else:
ret[vdev] = 'created'
if err:
ret['error'] = err
return ret | def function[create_file_vdev, parameter[size]]:
constant[
Creates file based virtual devices for a zpool
CLI Example:
.. code-block:: bash
salt '*' zpool.create_file_vdev 7G /path/to/vdev1 [/path/to/vdev2] [...]
.. note::
Depending on file size, the above command may take a while to return.
]
variable[ret] assign[=] call[name[OrderedDict], parameter[]]
variable[err] assign[=] call[name[OrderedDict], parameter[]]
variable[_mkfile_cmd] assign[=] call[name[salt].utils.path.which, parameter[constant[mkfile]]]
for taget[name[vdev]] in starred[name[vdevs]] begin[:]
if call[name[os].path.isfile, parameter[name[vdev]]] begin[:]
call[name[ret]][name[vdev]] assign[=] constant[existed]
if name[err] begin[:]
call[name[ret]][constant[error]] assign[=] name[err]
return[name[ret]] | keyword[def] identifier[create_file_vdev] ( identifier[size] ,* identifier[vdevs] ):
literal[string]
identifier[ret] = identifier[OrderedDict] ()
identifier[err] = identifier[OrderedDict] ()
identifier[_mkfile_cmd] = identifier[salt] . identifier[utils] . identifier[path] . identifier[which] ( literal[string] )
keyword[for] identifier[vdev] keyword[in] identifier[vdevs] :
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[vdev] ):
identifier[ret] [ identifier[vdev] ]= literal[string]
keyword[else] :
identifier[res] = identifier[__salt__] [ literal[string] ](
literal[string] . identifier[format] (
identifier[mkfile] = identifier[_mkfile_cmd] ,
identifier[size] = identifier[size] ,
identifier[vdev] = identifier[vdev] ,
),
identifier[python_shell] = keyword[False] ,
)
keyword[if] identifier[res] [ literal[string] ]!= literal[int] :
keyword[if] literal[string] keyword[in] identifier[res] keyword[and] literal[string] keyword[in] identifier[res] [ literal[string] ]:
identifier[ret] [ identifier[vdev] ]= literal[string]
identifier[err] [ identifier[vdev] ]= literal[string] . identifier[join] ( identifier[res] [ literal[string] ]. identifier[strip] (). identifier[split] ( literal[string] )[ literal[int] :])
keyword[else] :
identifier[ret] [ identifier[vdev] ]= literal[string]
keyword[if] identifier[err] :
identifier[ret] [ literal[string] ]= identifier[err]
keyword[return] identifier[ret] | def create_file_vdev(size, *vdevs):
"""
Creates file based virtual devices for a zpool
CLI Example:
.. code-block:: bash
salt '*' zpool.create_file_vdev 7G /path/to/vdev1 [/path/to/vdev2] [...]
.. note::
Depending on file size, the above command may take a while to return.
"""
ret = OrderedDict()
err = OrderedDict()
_mkfile_cmd = salt.utils.path.which('mkfile')
for vdev in vdevs:
if os.path.isfile(vdev):
ret[vdev] = 'existed' # depends on [control=['if'], data=[]]
else:
res = __salt__['cmd.run_all']('{mkfile} {size} {vdev}'.format(mkfile=_mkfile_cmd, size=size, vdev=vdev), python_shell=False)
if res['retcode'] != 0:
if 'stderr' in res and ':' in res['stderr']:
ret[vdev] = 'failed'
err[vdev] = ':'.join(res['stderr'].strip().split(':')[1:]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
ret[vdev] = 'created' # depends on [control=['for'], data=['vdev']]
if err:
ret['error'] = err # depends on [control=['if'], data=[]]
return ret |
def validate_term(self, term, latest_only=False):
""" Check that a term is present in the metadata dictionary.
raises :exc:`~billy.scrape.NoDataForPeriod` if term is invalid
:param term: string representing term to check
:param latest_only: if True, will raise exception if term is not
the current term (default: False)
"""
if latest_only:
if term == self.metadata['terms'][-1]['name']:
return True
else:
raise NoDataForPeriod(term)
for t in self.metadata['terms']:
if term == t['name']:
return True
raise NoDataForPeriod(term) | def function[validate_term, parameter[self, term, latest_only]]:
constant[ Check that a term is present in the metadata dictionary.
raises :exc:`~billy.scrape.NoDataForPeriod` if term is invalid
:param term: string representing term to check
:param latest_only: if True, will raise exception if term is not
the current term (default: False)
]
if name[latest_only] begin[:]
if compare[name[term] equal[==] call[call[call[name[self].metadata][constant[terms]]][<ast.UnaryOp object at 0x7da2041da440>]][constant[name]]] begin[:]
return[constant[True]]
for taget[name[t]] in starred[call[name[self].metadata][constant[terms]]] begin[:]
if compare[name[term] equal[==] call[name[t]][constant[name]]] begin[:]
return[constant[True]]
<ast.Raise object at 0x7da2041dad70> | keyword[def] identifier[validate_term] ( identifier[self] , identifier[term] , identifier[latest_only] = keyword[False] ):
literal[string]
keyword[if] identifier[latest_only] :
keyword[if] identifier[term] == identifier[self] . identifier[metadata] [ literal[string] ][- literal[int] ][ literal[string] ]:
keyword[return] keyword[True]
keyword[else] :
keyword[raise] identifier[NoDataForPeriod] ( identifier[term] )
keyword[for] identifier[t] keyword[in] identifier[self] . identifier[metadata] [ literal[string] ]:
keyword[if] identifier[term] == identifier[t] [ literal[string] ]:
keyword[return] keyword[True]
keyword[raise] identifier[NoDataForPeriod] ( identifier[term] ) | def validate_term(self, term, latest_only=False):
""" Check that a term is present in the metadata dictionary.
raises :exc:`~billy.scrape.NoDataForPeriod` if term is invalid
:param term: string representing term to check
:param latest_only: if True, will raise exception if term is not
the current term (default: False)
"""
if latest_only:
if term == self.metadata['terms'][-1]['name']:
return True # depends on [control=['if'], data=[]]
else:
raise NoDataForPeriod(term) # depends on [control=['if'], data=[]]
for t in self.metadata['terms']:
if term == t['name']:
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['t']]
raise NoDataForPeriod(term) |
def set_item_metadata(self, token, item_id, element, value,
qualifier=None):
"""
Set the metadata associated with an item.
:param token: A valid token for the user in question.
:type token: string
:param item_id: The id of the item for which metadata will be set.
:type item_id: int | long
:param element: The metadata element name.
:type element: string
:param value: The metadata value for the field.
:type value: string
:param qualifier: (optional) The metadata qualifier. Defaults to empty
string.
:type qualifier: None | string
:returns: None.
:rtype: None
"""
parameters = dict()
parameters['token'] = token
parameters['itemId'] = item_id
parameters['element'] = element
parameters['value'] = value
if qualifier:
parameters['qualifier'] = qualifier
response = self.request('midas.item.setmetadata', parameters)
return response | def function[set_item_metadata, parameter[self, token, item_id, element, value, qualifier]]:
constant[
Set the metadata associated with an item.
:param token: A valid token for the user in question.
:type token: string
:param item_id: The id of the item for which metadata will be set.
:type item_id: int | long
:param element: The metadata element name.
:type element: string
:param value: The metadata value for the field.
:type value: string
:param qualifier: (optional) The metadata qualifier. Defaults to empty
string.
:type qualifier: None | string
:returns: None.
:rtype: None
]
variable[parameters] assign[=] call[name[dict], parameter[]]
call[name[parameters]][constant[token]] assign[=] name[token]
call[name[parameters]][constant[itemId]] assign[=] name[item_id]
call[name[parameters]][constant[element]] assign[=] name[element]
call[name[parameters]][constant[value]] assign[=] name[value]
if name[qualifier] begin[:]
call[name[parameters]][constant[qualifier]] assign[=] name[qualifier]
variable[response] assign[=] call[name[self].request, parameter[constant[midas.item.setmetadata], name[parameters]]]
return[name[response]] | keyword[def] identifier[set_item_metadata] ( identifier[self] , identifier[token] , identifier[item_id] , identifier[element] , identifier[value] ,
identifier[qualifier] = keyword[None] ):
literal[string]
identifier[parameters] = identifier[dict] ()
identifier[parameters] [ literal[string] ]= identifier[token]
identifier[parameters] [ literal[string] ]= identifier[item_id]
identifier[parameters] [ literal[string] ]= identifier[element]
identifier[parameters] [ literal[string] ]= identifier[value]
keyword[if] identifier[qualifier] :
identifier[parameters] [ literal[string] ]= identifier[qualifier]
identifier[response] = identifier[self] . identifier[request] ( literal[string] , identifier[parameters] )
keyword[return] identifier[response] | def set_item_metadata(self, token, item_id, element, value, qualifier=None):
"""
Set the metadata associated with an item.
:param token: A valid token for the user in question.
:type token: string
:param item_id: The id of the item for which metadata will be set.
:type item_id: int | long
:param element: The metadata element name.
:type element: string
:param value: The metadata value for the field.
:type value: string
:param qualifier: (optional) The metadata qualifier. Defaults to empty
string.
:type qualifier: None | string
:returns: None.
:rtype: None
"""
parameters = dict()
parameters['token'] = token
parameters['itemId'] = item_id
parameters['element'] = element
parameters['value'] = value
if qualifier:
parameters['qualifier'] = qualifier # depends on [control=['if'], data=[]]
response = self.request('midas.item.setmetadata', parameters)
return response |
def get_t(self):
"""Returns the top border of the cell"""
cell_above = CellBorders(self.cell_attributes,
*self.cell.get_above_key_rect())
return cell_above.get_b() | def function[get_t, parameter[self]]:
constant[Returns the top border of the cell]
variable[cell_above] assign[=] call[name[CellBorders], parameter[name[self].cell_attributes, <ast.Starred object at 0x7da1b1600970>]]
return[call[name[cell_above].get_b, parameter[]]] | keyword[def] identifier[get_t] ( identifier[self] ):
literal[string]
identifier[cell_above] = identifier[CellBorders] ( identifier[self] . identifier[cell_attributes] ,
* identifier[self] . identifier[cell] . identifier[get_above_key_rect] ())
keyword[return] identifier[cell_above] . identifier[get_b] () | def get_t(self):
"""Returns the top border of the cell"""
cell_above = CellBorders(self.cell_attributes, *self.cell.get_above_key_rect())
return cell_above.get_b() |
def add_aggregate(self, name, data_fac):
"""
Add an aggregate target to this nest.
Since nests added after the aggregate can access the construct returned
by the factory function value, it can be mutated to provide additional
values for use when the decorated function is called.
To do something with the aggregates, you must :meth:`SConsWrap.pop`
nest levels created between addition of the aggregate and then can add
any normal targets you would like which take advantage of the targets
added to the data structure.
:param name: Name for the target in the nest
:param data_fac: a nullary factory function which will be called
immediately for each of the current control dictionaries and stored
in each dictionary with the given name as in
:meth:`SConsWrap.add_target`.
"""
@self.add_target(name)
def wrap(outdir, c):
return data_fac() | def function[add_aggregate, parameter[self, name, data_fac]]:
constant[
Add an aggregate target to this nest.
Since nests added after the aggregate can access the construct returned
by the factory function value, it can be mutated to provide additional
values for use when the decorated function is called.
To do something with the aggregates, you must :meth:`SConsWrap.pop`
nest levels created between addition of the aggregate and then can add
any normal targets you would like which take advantage of the targets
added to the data structure.
:param name: Name for the target in the nest
:param data_fac: a nullary factory function which will be called
immediately for each of the current control dictionaries and stored
in each dictionary with the given name as in
:meth:`SConsWrap.add_target`.
]
def function[wrap, parameter[outdir, c]]:
return[call[name[data_fac], parameter[]]] | keyword[def] identifier[add_aggregate] ( identifier[self] , identifier[name] , identifier[data_fac] ):
literal[string]
@ identifier[self] . identifier[add_target] ( identifier[name] )
keyword[def] identifier[wrap] ( identifier[outdir] , identifier[c] ):
keyword[return] identifier[data_fac] () | def add_aggregate(self, name, data_fac):
"""
Add an aggregate target to this nest.
Since nests added after the aggregate can access the construct returned
by the factory function value, it can be mutated to provide additional
values for use when the decorated function is called.
To do something with the aggregates, you must :meth:`SConsWrap.pop`
nest levels created between addition of the aggregate and then can add
any normal targets you would like which take advantage of the targets
added to the data structure.
:param name: Name for the target in the nest
:param data_fac: a nullary factory function which will be called
immediately for each of the current control dictionaries and stored
in each dictionary with the given name as in
:meth:`SConsWrap.add_target`.
"""
@self.add_target(name)
def wrap(outdir, c):
return data_fac() |
def sel(self, indexers=None, method=None, tolerance=None, drop=False,
**indexers_kwargs):
"""Return a new DataArray whose dataset is given by selecting
index labels along the specified dimension(s).
.. warning::
Do not try to assign values when using any of the indexing methods
``isel`` or ``sel``::
da = xr.DataArray([0, 1, 2, 3], dims=['x'])
# DO NOT do this
da.isel(x=[0, 1, 2])[1] = -1
Assigning values with the chained indexing using ``.sel`` or
``.isel`` fails silently.
See Also
--------
Dataset.sel
DataArray.isel
"""
ds = self._to_temp_dataset().sel(
indexers=indexers, drop=drop, method=method, tolerance=tolerance,
**indexers_kwargs)
return self._from_temp_dataset(ds) | def function[sel, parameter[self, indexers, method, tolerance, drop]]:
constant[Return a new DataArray whose dataset is given by selecting
index labels along the specified dimension(s).
.. warning::
Do not try to assign values when using any of the indexing methods
``isel`` or ``sel``::
da = xr.DataArray([0, 1, 2, 3], dims=['x'])
# DO NOT do this
da.isel(x=[0, 1, 2])[1] = -1
Assigning values with the chained indexing using ``.sel`` or
``.isel`` fails silently.
See Also
--------
Dataset.sel
DataArray.isel
]
variable[ds] assign[=] call[call[name[self]._to_temp_dataset, parameter[]].sel, parameter[]]
return[call[name[self]._from_temp_dataset, parameter[name[ds]]]] | keyword[def] identifier[sel] ( identifier[self] , identifier[indexers] = keyword[None] , identifier[method] = keyword[None] , identifier[tolerance] = keyword[None] , identifier[drop] = keyword[False] ,
** identifier[indexers_kwargs] ):
literal[string]
identifier[ds] = identifier[self] . identifier[_to_temp_dataset] (). identifier[sel] (
identifier[indexers] = identifier[indexers] , identifier[drop] = identifier[drop] , identifier[method] = identifier[method] , identifier[tolerance] = identifier[tolerance] ,
** identifier[indexers_kwargs] )
keyword[return] identifier[self] . identifier[_from_temp_dataset] ( identifier[ds] ) | def sel(self, indexers=None, method=None, tolerance=None, drop=False, **indexers_kwargs):
"""Return a new DataArray whose dataset is given by selecting
index labels along the specified dimension(s).
.. warning::
Do not try to assign values when using any of the indexing methods
``isel`` or ``sel``::
da = xr.DataArray([0, 1, 2, 3], dims=['x'])
# DO NOT do this
da.isel(x=[0, 1, 2])[1] = -1
Assigning values with the chained indexing using ``.sel`` or
``.isel`` fails silently.
See Also
--------
Dataset.sel
DataArray.isel
"""
ds = self._to_temp_dataset().sel(indexers=indexers, drop=drop, method=method, tolerance=tolerance, **indexers_kwargs)
return self._from_temp_dataset(ds) |
def gibbs_ask(X, e, bn, N):
"""[Fig. 14.16]
>>> seed(1017)
>>> gibbs_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary, 1000
... ).show_approx()
'False: 0.738, True: 0.262'
"""
assert X not in e, "Query variable must be distinct from evidence"
counts = dict((x, 0) for x in bn.variable_values(X)) # bold N in Fig. 14.16
Z = [var for var in bn.vars if var not in e]
state = dict(e) # boldface x in Fig. 14.16
for Zi in Z:
state[Zi] = choice(bn.variable_values(Zi))
for j in xrange(N):
for Zi in Z:
state[Zi] = markov_blanket_sample(Zi, state, bn)
counts[state[X]] += 1
return ProbDist(X, counts) | def function[gibbs_ask, parameter[X, e, bn, N]]:
constant[[Fig. 14.16]
>>> seed(1017)
>>> gibbs_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary, 1000
... ).show_approx()
'False: 0.738, True: 0.262'
]
assert[compare[name[X] <ast.NotIn object at 0x7da2590d7190> name[e]]]
variable[counts] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da2054a5480>]]
variable[Z] assign[=] <ast.ListComp object at 0x7da2054a4100>
variable[state] assign[=] call[name[dict], parameter[name[e]]]
for taget[name[Zi]] in starred[name[Z]] begin[:]
call[name[state]][name[Zi]] assign[=] call[name[choice], parameter[call[name[bn].variable_values, parameter[name[Zi]]]]]
for taget[name[j]] in starred[call[name[xrange], parameter[name[N]]]] begin[:]
for taget[name[Zi]] in starred[name[Z]] begin[:]
call[name[state]][name[Zi]] assign[=] call[name[markov_blanket_sample], parameter[name[Zi], name[state], name[bn]]]
<ast.AugAssign object at 0x7da2054a7c10>
return[call[name[ProbDist], parameter[name[X], name[counts]]]] | keyword[def] identifier[gibbs_ask] ( identifier[X] , identifier[e] , identifier[bn] , identifier[N] ):
literal[string]
keyword[assert] identifier[X] keyword[not] keyword[in] identifier[e] , literal[string]
identifier[counts] = identifier[dict] (( identifier[x] , literal[int] ) keyword[for] identifier[x] keyword[in] identifier[bn] . identifier[variable_values] ( identifier[X] ))
identifier[Z] =[ identifier[var] keyword[for] identifier[var] keyword[in] identifier[bn] . identifier[vars] keyword[if] identifier[var] keyword[not] keyword[in] identifier[e] ]
identifier[state] = identifier[dict] ( identifier[e] )
keyword[for] identifier[Zi] keyword[in] identifier[Z] :
identifier[state] [ identifier[Zi] ]= identifier[choice] ( identifier[bn] . identifier[variable_values] ( identifier[Zi] ))
keyword[for] identifier[j] keyword[in] identifier[xrange] ( identifier[N] ):
keyword[for] identifier[Zi] keyword[in] identifier[Z] :
identifier[state] [ identifier[Zi] ]= identifier[markov_blanket_sample] ( identifier[Zi] , identifier[state] , identifier[bn] )
identifier[counts] [ identifier[state] [ identifier[X] ]]+= literal[int]
keyword[return] identifier[ProbDist] ( identifier[X] , identifier[counts] ) | def gibbs_ask(X, e, bn, N):
"""[Fig. 14.16]
>>> seed(1017)
>>> gibbs_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary, 1000
... ).show_approx()
'False: 0.738, True: 0.262'
"""
assert X not in e, 'Query variable must be distinct from evidence'
counts = dict(((x, 0) for x in bn.variable_values(X))) # bold N in Fig. 14.16
Z = [var for var in bn.vars if var not in e]
state = dict(e) # boldface x in Fig. 14.16
for Zi in Z:
state[Zi] = choice(bn.variable_values(Zi)) # depends on [control=['for'], data=['Zi']]
for j in xrange(N):
for Zi in Z:
state[Zi] = markov_blanket_sample(Zi, state, bn)
counts[state[X]] += 1 # depends on [control=['for'], data=['Zi']] # depends on [control=['for'], data=[]]
return ProbDist(X, counts) |
def fetch(self, category=CATEGORY_MESSAGE, from_date=DEFAULT_DATETIME):
"""Fetch the messages from the channel.
This method fetches the messages stored on the channel that were
sent since the given date.
:param category: the category of items to fetch
:param from_date: obtain messages sent since this date
:returns: a generator of messages
"""
if not from_date:
from_date = DEFAULT_DATETIME
from_date = datetime_to_utc(from_date)
latest = datetime_utcnow().timestamp()
kwargs = {'from_date': from_date, 'latest': latest}
items = super().fetch(category, **kwargs)
return items | def function[fetch, parameter[self, category, from_date]]:
constant[Fetch the messages from the channel.
This method fetches the messages stored on the channel that were
sent since the given date.
:param category: the category of items to fetch
:param from_date: obtain messages sent since this date
:returns: a generator of messages
]
if <ast.UnaryOp object at 0x7da1b03a5060> begin[:]
variable[from_date] assign[=] name[DEFAULT_DATETIME]
variable[from_date] assign[=] call[name[datetime_to_utc], parameter[name[from_date]]]
variable[latest] assign[=] call[call[name[datetime_utcnow], parameter[]].timestamp, parameter[]]
variable[kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da1b03a6530>, <ast.Constant object at 0x7da1b03a4550>], [<ast.Name object at 0x7da1b03a50c0>, <ast.Name object at 0x7da1b03a5000>]]
variable[items] assign[=] call[call[name[super], parameter[]].fetch, parameter[name[category]]]
return[name[items]] | keyword[def] identifier[fetch] ( identifier[self] , identifier[category] = identifier[CATEGORY_MESSAGE] , identifier[from_date] = identifier[DEFAULT_DATETIME] ):
literal[string]
keyword[if] keyword[not] identifier[from_date] :
identifier[from_date] = identifier[DEFAULT_DATETIME]
identifier[from_date] = identifier[datetime_to_utc] ( identifier[from_date] )
identifier[latest] = identifier[datetime_utcnow] (). identifier[timestamp] ()
identifier[kwargs] ={ literal[string] : identifier[from_date] , literal[string] : identifier[latest] }
identifier[items] = identifier[super] (). identifier[fetch] ( identifier[category] ,** identifier[kwargs] )
keyword[return] identifier[items] | def fetch(self, category=CATEGORY_MESSAGE, from_date=DEFAULT_DATETIME):
"""Fetch the messages from the channel.
This method fetches the messages stored on the channel that were
sent since the given date.
:param category: the category of items to fetch
:param from_date: obtain messages sent since this date
:returns: a generator of messages
"""
if not from_date:
from_date = DEFAULT_DATETIME # depends on [control=['if'], data=[]]
from_date = datetime_to_utc(from_date)
latest = datetime_utcnow().timestamp()
kwargs = {'from_date': from_date, 'latest': latest}
items = super().fetch(category, **kwargs)
return items |
def split_recursive(
self,
depth: int,
min_width: int,
min_height: int,
max_horizontal_ratio: float,
max_vertical_ratio: float,
seed: Optional[tcod.random.Random] = None,
) -> None:
"""Divide this partition recursively.
Args:
depth (int): The maximum depth to divide this object recursively.
min_width (int): The minimum width of any individual partition.
min_height (int): The minimum height of any individual partition.
max_horizontal_ratio (float):
Prevent creating a horizontal ratio more extreme than this.
max_vertical_ratio (float):
Prevent creating a vertical ratio more extreme than this.
seed (Optional[tcod.random.Random]):
The random number generator to use.
"""
cdata = self._as_cdata()
lib.TCOD_bsp_split_recursive(
cdata,
seed or ffi.NULL,
depth,
min_width,
min_height,
max_horizontal_ratio,
max_vertical_ratio,
)
self._unpack_bsp_tree(cdata) | def function[split_recursive, parameter[self, depth, min_width, min_height, max_horizontal_ratio, max_vertical_ratio, seed]]:
constant[Divide this partition recursively.
Args:
depth (int): The maximum depth to divide this object recursively.
min_width (int): The minimum width of any individual partition.
min_height (int): The minimum height of any individual partition.
max_horizontal_ratio (float):
Prevent creating a horizontal ratio more extreme than this.
max_vertical_ratio (float):
Prevent creating a vertical ratio more extreme than this.
seed (Optional[tcod.random.Random]):
The random number generator to use.
]
variable[cdata] assign[=] call[name[self]._as_cdata, parameter[]]
call[name[lib].TCOD_bsp_split_recursive, parameter[name[cdata], <ast.BoolOp object at 0x7da18eb56e30>, name[depth], name[min_width], name[min_height], name[max_horizontal_ratio], name[max_vertical_ratio]]]
call[name[self]._unpack_bsp_tree, parameter[name[cdata]]] | keyword[def] identifier[split_recursive] (
identifier[self] ,
identifier[depth] : identifier[int] ,
identifier[min_width] : identifier[int] ,
identifier[min_height] : identifier[int] ,
identifier[max_horizontal_ratio] : identifier[float] ,
identifier[max_vertical_ratio] : identifier[float] ,
identifier[seed] : identifier[Optional] [ identifier[tcod] . identifier[random] . identifier[Random] ]= keyword[None] ,
)-> keyword[None] :
literal[string]
identifier[cdata] = identifier[self] . identifier[_as_cdata] ()
identifier[lib] . identifier[TCOD_bsp_split_recursive] (
identifier[cdata] ,
identifier[seed] keyword[or] identifier[ffi] . identifier[NULL] ,
identifier[depth] ,
identifier[min_width] ,
identifier[min_height] ,
identifier[max_horizontal_ratio] ,
identifier[max_vertical_ratio] ,
)
identifier[self] . identifier[_unpack_bsp_tree] ( identifier[cdata] ) | def split_recursive(self, depth: int, min_width: int, min_height: int, max_horizontal_ratio: float, max_vertical_ratio: float, seed: Optional[tcod.random.Random]=None) -> None:
"""Divide this partition recursively.
Args:
depth (int): The maximum depth to divide this object recursively.
min_width (int): The minimum width of any individual partition.
min_height (int): The minimum height of any individual partition.
max_horizontal_ratio (float):
Prevent creating a horizontal ratio more extreme than this.
max_vertical_ratio (float):
Prevent creating a vertical ratio more extreme than this.
seed (Optional[tcod.random.Random]):
The random number generator to use.
"""
cdata = self._as_cdata()
lib.TCOD_bsp_split_recursive(cdata, seed or ffi.NULL, depth, min_width, min_height, max_horizontal_ratio, max_vertical_ratio)
self._unpack_bsp_tree(cdata) |
def send_identity(self):
"""
Send the identity of the service.
"""
service_name = {'service_name': self.messaging._service_name}
service_name = _json.dumps(service_name).encode('utf8')
identify_frame = (b'',
b'IDENT',
_json.dumps([]).encode('utf8'),
service_name)
# NOTE: Have to do this manually since we built the frame
if self.messaging._run_control_loop:
# pep8 alias
send = self.messaging.command_socket.send_multipart
self.messaging.add_callback(send, identify_frame)
else:
self.messaging.command_socket.send_multipart(identify_frame)
self.logger.info(' Service Identity sent: %s',
self.messaging._service_name)
if self.identity_callback:
self.identity_callback() | def function[send_identity, parameter[self]]:
constant[
Send the identity of the service.
]
variable[service_name] assign[=] dictionary[[<ast.Constant object at 0x7da1b0e3af20>], [<ast.Attribute object at 0x7da1b0e3bca0>]]
variable[service_name] assign[=] call[call[name[_json].dumps, parameter[name[service_name]]].encode, parameter[constant[utf8]]]
variable[identify_frame] assign[=] tuple[[<ast.Constant object at 0x7da1b0e3bd90>, <ast.Constant object at 0x7da1b0e39870>, <ast.Call object at 0x7da1b0e3a440>, <ast.Name object at 0x7da1b0e3b2b0>]]
if name[self].messaging._run_control_loop begin[:]
variable[send] assign[=] name[self].messaging.command_socket.send_multipart
call[name[self].messaging.add_callback, parameter[name[send], name[identify_frame]]]
call[name[self].logger.info, parameter[constant[ Service Identity sent: %s], name[self].messaging._service_name]]
if name[self].identity_callback begin[:]
call[name[self].identity_callback, parameter[]] | keyword[def] identifier[send_identity] ( identifier[self] ):
literal[string]
identifier[service_name] ={ literal[string] : identifier[self] . identifier[messaging] . identifier[_service_name] }
identifier[service_name] = identifier[_json] . identifier[dumps] ( identifier[service_name] ). identifier[encode] ( literal[string] )
identifier[identify_frame] =( literal[string] ,
literal[string] ,
identifier[_json] . identifier[dumps] ([]). identifier[encode] ( literal[string] ),
identifier[service_name] )
keyword[if] identifier[self] . identifier[messaging] . identifier[_run_control_loop] :
identifier[send] = identifier[self] . identifier[messaging] . identifier[command_socket] . identifier[send_multipart]
identifier[self] . identifier[messaging] . identifier[add_callback] ( identifier[send] , identifier[identify_frame] )
keyword[else] :
identifier[self] . identifier[messaging] . identifier[command_socket] . identifier[send_multipart] ( identifier[identify_frame] )
identifier[self] . identifier[logger] . identifier[info] ( literal[string] ,
identifier[self] . identifier[messaging] . identifier[_service_name] )
keyword[if] identifier[self] . identifier[identity_callback] :
identifier[self] . identifier[identity_callback] () | def send_identity(self):
"""
Send the identity of the service.
"""
service_name = {'service_name': self.messaging._service_name}
service_name = _json.dumps(service_name).encode('utf8')
identify_frame = (b'', b'IDENT', _json.dumps([]).encode('utf8'), service_name)
# NOTE: Have to do this manually since we built the frame
if self.messaging._run_control_loop:
# pep8 alias
send = self.messaging.command_socket.send_multipart
self.messaging.add_callback(send, identify_frame) # depends on [control=['if'], data=[]]
else:
self.messaging.command_socket.send_multipart(identify_frame)
self.logger.info(' Service Identity sent: %s', self.messaging._service_name)
if self.identity_callback:
self.identity_callback() # depends on [control=['if'], data=[]] |
def _connect(self):
"""Establish connection to MySQL Database."""
if self._connParams:
self._conn = MySQLdb.connect(**self._connParams)
else:
self._conn = MySQLdb.connect('') | def function[_connect, parameter[self]]:
constant[Establish connection to MySQL Database.]
if name[self]._connParams begin[:]
name[self]._conn assign[=] call[name[MySQLdb].connect, parameter[]] | keyword[def] identifier[_connect] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_connParams] :
identifier[self] . identifier[_conn] = identifier[MySQLdb] . identifier[connect] (** identifier[self] . identifier[_connParams] )
keyword[else] :
identifier[self] . identifier[_conn] = identifier[MySQLdb] . identifier[connect] ( literal[string] ) | def _connect(self):
"""Establish connection to MySQL Database."""
if self._connParams:
self._conn = MySQLdb.connect(**self._connParams) # depends on [control=['if'], data=[]]
else:
self._conn = MySQLdb.connect('') |
def match_file(filename, exclude):
"""Return True if file is okay for modifying/recursing."""
base_name = os.path.basename(filename)
if base_name.startswith('.'):
return False
for pattern in exclude:
if fnmatch.fnmatch(base_name, pattern):
return False
if fnmatch.fnmatch(filename, pattern):
return False
if not os.path.isdir(filename) and not is_python_file(filename):
return False
return True | def function[match_file, parameter[filename, exclude]]:
constant[Return True if file is okay for modifying/recursing.]
variable[base_name] assign[=] call[name[os].path.basename, parameter[name[filename]]]
if call[name[base_name].startswith, parameter[constant[.]]] begin[:]
return[constant[False]]
for taget[name[pattern]] in starred[name[exclude]] begin[:]
if call[name[fnmatch].fnmatch, parameter[name[base_name], name[pattern]]] begin[:]
return[constant[False]]
if call[name[fnmatch].fnmatch, parameter[name[filename], name[pattern]]] begin[:]
return[constant[False]]
if <ast.BoolOp object at 0x7da18dc986d0> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[match_file] ( identifier[filename] , identifier[exclude] ):
literal[string]
identifier[base_name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[filename] )
keyword[if] identifier[base_name] . identifier[startswith] ( literal[string] ):
keyword[return] keyword[False]
keyword[for] identifier[pattern] keyword[in] identifier[exclude] :
keyword[if] identifier[fnmatch] . identifier[fnmatch] ( identifier[base_name] , identifier[pattern] ):
keyword[return] keyword[False]
keyword[if] identifier[fnmatch] . identifier[fnmatch] ( identifier[filename] , identifier[pattern] ):
keyword[return] keyword[False]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[filename] ) keyword[and] keyword[not] identifier[is_python_file] ( identifier[filename] ):
keyword[return] keyword[False]
keyword[return] keyword[True] | def match_file(filename, exclude):
"""Return True if file is okay for modifying/recursing."""
base_name = os.path.basename(filename)
if base_name.startswith('.'):
return False # depends on [control=['if'], data=[]]
for pattern in exclude:
if fnmatch.fnmatch(base_name, pattern):
return False # depends on [control=['if'], data=[]]
if fnmatch.fnmatch(filename, pattern):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pattern']]
if not os.path.isdir(filename) and (not is_python_file(filename)):
return False # depends on [control=['if'], data=[]]
return True |
def set_poll_func(self, func, func_err_handler=None):
'''Can be used to integrate pulse client into existing eventloop.
Function will be passed a list of pollfd structs and timeout value (seconds, float),
which it is responsible to use and modify (set poll flags) accordingly,
returning int value >= 0 with number of fds that had any new events within timeout.
func_err_handler defaults to traceback.print_exception(),
and will be called on any exceptions from callback (to e.g. log these),
returning poll error code (-1) to libpulse after that.'''
if not func_err_handler: func_err_handler = traceback.print_exception
self._pa_poll_cb = c.PA_POLL_FUNC_T(ft.partial(self._pulse_poll_cb, func, func_err_handler))
c.pa.mainloop_set_poll_func(self._loop, self._pa_poll_cb, None) | def function[set_poll_func, parameter[self, func, func_err_handler]]:
constant[Can be used to integrate pulse client into existing eventloop.
Function will be passed a list of pollfd structs and timeout value (seconds, float),
which it is responsible to use and modify (set poll flags) accordingly,
returning int value >= 0 with number of fds that had any new events within timeout.
func_err_handler defaults to traceback.print_exception(),
and will be called on any exceptions from callback (to e.g. log these),
returning poll error code (-1) to libpulse after that.]
if <ast.UnaryOp object at 0x7da1b040d1e0> begin[:]
variable[func_err_handler] assign[=] name[traceback].print_exception
name[self]._pa_poll_cb assign[=] call[name[c].PA_POLL_FUNC_T, parameter[call[name[ft].partial, parameter[name[self]._pulse_poll_cb, name[func], name[func_err_handler]]]]]
call[name[c].pa.mainloop_set_poll_func, parameter[name[self]._loop, name[self]._pa_poll_cb, constant[None]]] | keyword[def] identifier[set_poll_func] ( identifier[self] , identifier[func] , identifier[func_err_handler] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[func_err_handler] : identifier[func_err_handler] = identifier[traceback] . identifier[print_exception]
identifier[self] . identifier[_pa_poll_cb] = identifier[c] . identifier[PA_POLL_FUNC_T] ( identifier[ft] . identifier[partial] ( identifier[self] . identifier[_pulse_poll_cb] , identifier[func] , identifier[func_err_handler] ))
identifier[c] . identifier[pa] . identifier[mainloop_set_poll_func] ( identifier[self] . identifier[_loop] , identifier[self] . identifier[_pa_poll_cb] , keyword[None] ) | def set_poll_func(self, func, func_err_handler=None):
"""Can be used to integrate pulse client into existing eventloop.
Function will be passed a list of pollfd structs and timeout value (seconds, float),
which it is responsible to use and modify (set poll flags) accordingly,
returning int value >= 0 with number of fds that had any new events within timeout.
func_err_handler defaults to traceback.print_exception(),
and will be called on any exceptions from callback (to e.g. log these),
returning poll error code (-1) to libpulse after that."""
if not func_err_handler:
func_err_handler = traceback.print_exception # depends on [control=['if'], data=[]]
self._pa_poll_cb = c.PA_POLL_FUNC_T(ft.partial(self._pulse_poll_cb, func, func_err_handler))
c.pa.mainloop_set_poll_func(self._loop, self._pa_poll_cb, None) |
def projection_pp(site, normal, dist_to_plane, reference):
'''
This method finds the projection of the site onto the plane containing
the slipped area, defined as the Pp(i.e. 'perpendicular projection of
site location onto the fault plane' Spudich et al. (2013) - page 88)
given a site.
:param site:
Location of the site, [lon, lat, dep]
:param normal:
Normal to the plane including the fault patch,
describe by a normal vector[a, b, c]
:param dist_to_plane:
D in the plane equation, ax + by + cz = d
:param reference:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of project reference point
:returns:
pp, the projection point, [ppx, ppy, ppz], in xyz domain
, a numpy array.
'''
# Transform to xyz coordinate
[site_x, site_y, site_z] = get_xyz_from_ll(site, reference)
a = np.array([(1, 0, 0, -normal[0]),
(0, 1, 0, -normal[1]),
(0, 0, 1, -normal[2]),
(normal[0], normal[1], normal[2], 0)])
b = np.array([site_x, site_y, site_z, dist_to_plane])
x = np.linalg.solve(a, b)
pp = np.array([x[0], x[1], x[2]])
return pp | def function[projection_pp, parameter[site, normal, dist_to_plane, reference]]:
constant[
This method finds the projection of the site onto the plane containing
the slipped area, defined as the Pp(i.e. 'perpendicular projection of
site location onto the fault plane' Spudich et al. (2013) - page 88)
given a site.
:param site:
Location of the site, [lon, lat, dep]
:param normal:
Normal to the plane including the fault patch,
describe by a normal vector[a, b, c]
:param dist_to_plane:
D in the plane equation, ax + by + cz = d
:param reference:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of project reference point
:returns:
pp, the projection point, [ppx, ppy, ppz], in xyz domain
, a numpy array.
]
<ast.List object at 0x7da18f00cdf0> assign[=] call[name[get_xyz_from_ll], parameter[name[site], name[reference]]]
variable[a] assign[=] call[name[np].array, parameter[list[[<ast.Tuple object at 0x7da18f00fe20>, <ast.Tuple object at 0x7da18f00e200>, <ast.Tuple object at 0x7da18f00e470>, <ast.Tuple object at 0x7da18f00e1d0>]]]]
variable[b] assign[=] call[name[np].array, parameter[list[[<ast.Name object at 0x7da18f00ea40>, <ast.Name object at 0x7da18f00d570>, <ast.Name object at 0x7da18f00da50>, <ast.Name object at 0x7da18f00e710>]]]]
variable[x] assign[=] call[name[np].linalg.solve, parameter[name[a], name[b]]]
variable[pp] assign[=] call[name[np].array, parameter[list[[<ast.Subscript object at 0x7da1b0651d80>, <ast.Subscript object at 0x7da1b0651db0>, <ast.Subscript object at 0x7da18f00e770>]]]]
return[name[pp]] | keyword[def] identifier[projection_pp] ( identifier[site] , identifier[normal] , identifier[dist_to_plane] , identifier[reference] ):
literal[string]
[ identifier[site_x] , identifier[site_y] , identifier[site_z] ]= identifier[get_xyz_from_ll] ( identifier[site] , identifier[reference] )
identifier[a] = identifier[np] . identifier[array] ([( literal[int] , literal[int] , literal[int] ,- identifier[normal] [ literal[int] ]),
( literal[int] , literal[int] , literal[int] ,- identifier[normal] [ literal[int] ]),
( literal[int] , literal[int] , literal[int] ,- identifier[normal] [ literal[int] ]),
( identifier[normal] [ literal[int] ], identifier[normal] [ literal[int] ], identifier[normal] [ literal[int] ], literal[int] )])
identifier[b] = identifier[np] . identifier[array] ([ identifier[site_x] , identifier[site_y] , identifier[site_z] , identifier[dist_to_plane] ])
identifier[x] = identifier[np] . identifier[linalg] . identifier[solve] ( identifier[a] , identifier[b] )
identifier[pp] = identifier[np] . identifier[array] ([ identifier[x] [ literal[int] ], identifier[x] [ literal[int] ], identifier[x] [ literal[int] ]])
keyword[return] identifier[pp] | def projection_pp(site, normal, dist_to_plane, reference):
"""
This method finds the projection of the site onto the plane containing
the slipped area, defined as the Pp(i.e. 'perpendicular projection of
site location onto the fault plane' Spudich et al. (2013) - page 88)
given a site.
:param site:
Location of the site, [lon, lat, dep]
:param normal:
Normal to the plane including the fault patch,
describe by a normal vector[a, b, c]
:param dist_to_plane:
D in the plane equation, ax + by + cz = d
:param reference:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of project reference point
:returns:
pp, the projection point, [ppx, ppy, ppz], in xyz domain
, a numpy array.
"""
# Transform to xyz coordinate
[site_x, site_y, site_z] = get_xyz_from_ll(site, reference)
a = np.array([(1, 0, 0, -normal[0]), (0, 1, 0, -normal[1]), (0, 0, 1, -normal[2]), (normal[0], normal[1], normal[2], 0)])
b = np.array([site_x, site_y, site_z, dist_to_plane])
x = np.linalg.solve(a, b)
pp = np.array([x[0], x[1], x[2]])
return pp |
def elem_add(self, idx=None, name=None, **kwargs):
"""overloading elem_add function of a JIT class"""
self.jit_load()
if self.loaded:
return self.system.__dict__[self.name].elem_add(
idx, name, **kwargs) | def function[elem_add, parameter[self, idx, name]]:
constant[overloading elem_add function of a JIT class]
call[name[self].jit_load, parameter[]]
if name[self].loaded begin[:]
return[call[call[name[self].system.__dict__][name[self].name].elem_add, parameter[name[idx], name[name]]]] | keyword[def] identifier[elem_add] ( identifier[self] , identifier[idx] = keyword[None] , identifier[name] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[jit_load] ()
keyword[if] identifier[self] . identifier[loaded] :
keyword[return] identifier[self] . identifier[system] . identifier[__dict__] [ identifier[self] . identifier[name] ]. identifier[elem_add] (
identifier[idx] , identifier[name] ,** identifier[kwargs] ) | def elem_add(self, idx=None, name=None, **kwargs):
"""overloading elem_add function of a JIT class"""
self.jit_load()
if self.loaded:
return self.system.__dict__[self.name].elem_add(idx, name, **kwargs) # depends on [control=['if'], data=[]] |
def fetch_fieldnames(self, sql: str, *args) -> List[str]:
"""Executes SQL; returns just the output fieldnames."""
self.ensure_db_open()
cursor = self.db.cursor()
self.db_exec_with_cursor(cursor, sql, *args)
try:
return [i[0] for i in cursor.description]
except: # nopep8
log.exception("fetch_fieldnames: SQL was: " + sql)
raise | def function[fetch_fieldnames, parameter[self, sql]]:
constant[Executes SQL; returns just the output fieldnames.]
call[name[self].ensure_db_open, parameter[]]
variable[cursor] assign[=] call[name[self].db.cursor, parameter[]]
call[name[self].db_exec_with_cursor, parameter[name[cursor], name[sql], <ast.Starred object at 0x7da1b1849120>]]
<ast.Try object at 0x7da1b184ba90> | keyword[def] identifier[fetch_fieldnames] ( identifier[self] , identifier[sql] : identifier[str] ,* identifier[args] )-> identifier[List] [ identifier[str] ]:
literal[string]
identifier[self] . identifier[ensure_db_open] ()
identifier[cursor] = identifier[self] . identifier[db] . identifier[cursor] ()
identifier[self] . identifier[db_exec_with_cursor] ( identifier[cursor] , identifier[sql] ,* identifier[args] )
keyword[try] :
keyword[return] [ identifier[i] [ literal[int] ] keyword[for] identifier[i] keyword[in] identifier[cursor] . identifier[description] ]
keyword[except] :
identifier[log] . identifier[exception] ( literal[string] + identifier[sql] )
keyword[raise] | def fetch_fieldnames(self, sql: str, *args) -> List[str]:
"""Executes SQL; returns just the output fieldnames."""
self.ensure_db_open()
cursor = self.db.cursor()
self.db_exec_with_cursor(cursor, sql, *args)
try:
return [i[0] for i in cursor.description] # depends on [control=['try'], data=[]]
except: # nopep8
log.exception('fetch_fieldnames: SQL was: ' + sql)
raise # depends on [control=['except'], data=[]] |
def _domain_event_block_threshold_cb(conn, domain, dev, path, threshold, excess, opaque):
'''
Domain block threshold events handler
'''
_salt_send_domain_event(opaque, conn, domain, opaque['event'], {
'dev': dev,
'path': path,
'threshold': threshold,
'excess': excess
}) | def function[_domain_event_block_threshold_cb, parameter[conn, domain, dev, path, threshold, excess, opaque]]:
constant[
Domain block threshold events handler
]
call[name[_salt_send_domain_event], parameter[name[opaque], name[conn], name[domain], call[name[opaque]][constant[event]], dictionary[[<ast.Constant object at 0x7da20e9b2020>, <ast.Constant object at 0x7da20e9b1480>, <ast.Constant object at 0x7da20e9b0700>, <ast.Constant object at 0x7da20e9b35e0>], [<ast.Name object at 0x7da20e9b31c0>, <ast.Name object at 0x7da20e9b1cf0>, <ast.Name object at 0x7da20e9b1870>, <ast.Name object at 0x7da20e9b1b70>]]]] | keyword[def] identifier[_domain_event_block_threshold_cb] ( identifier[conn] , identifier[domain] , identifier[dev] , identifier[path] , identifier[threshold] , identifier[excess] , identifier[opaque] ):
literal[string]
identifier[_salt_send_domain_event] ( identifier[opaque] , identifier[conn] , identifier[domain] , identifier[opaque] [ literal[string] ],{
literal[string] : identifier[dev] ,
literal[string] : identifier[path] ,
literal[string] : identifier[threshold] ,
literal[string] : identifier[excess]
}) | def _domain_event_block_threshold_cb(conn, domain, dev, path, threshold, excess, opaque):
"""
Domain block threshold events handler
"""
_salt_send_domain_event(opaque, conn, domain, opaque['event'], {'dev': dev, 'path': path, 'threshold': threshold, 'excess': excess}) |
def mod_run_check_cmd(cmd, filename, **check_cmd_opts):
'''
Execute the check_cmd logic.
Return a result dict if ``check_cmd`` succeeds (check_cmd == 0)
otherwise return True
'''
log.debug('running our check_cmd')
_cmd = '{0} {1}'.format(cmd, filename)
cret = __salt__['cmd.run_all'](_cmd, **check_cmd_opts)
if cret['retcode'] != 0:
ret = {'comment': 'check_cmd execution failed',
'skip_watch': True,
'result': False}
if cret.get('stdout'):
ret['comment'] += '\n' + cret['stdout']
if cret.get('stderr'):
ret['comment'] += '\n' + cret['stderr']
return ret
# No reason to stop, return True
return True | def function[mod_run_check_cmd, parameter[cmd, filename]]:
constant[
Execute the check_cmd logic.
Return a result dict if ``check_cmd`` succeeds (check_cmd == 0)
otherwise return True
]
call[name[log].debug, parameter[constant[running our check_cmd]]]
variable[_cmd] assign[=] call[constant[{0} {1}].format, parameter[name[cmd], name[filename]]]
variable[cret] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[name[_cmd]]]
if compare[call[name[cret]][constant[retcode]] not_equal[!=] constant[0]] begin[:]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c14ee0>, <ast.Constant object at 0x7da1b1c146d0>, <ast.Constant object at 0x7da1b1c16b00>], [<ast.Constant object at 0x7da1b1c14d90>, <ast.Constant object at 0x7da1b1c14d60>, <ast.Constant object at 0x7da1b1c16170>]]
if call[name[cret].get, parameter[constant[stdout]]] begin[:]
<ast.AugAssign object at 0x7da1b1c17010>
if call[name[cret].get, parameter[constant[stderr]]] begin[:]
<ast.AugAssign object at 0x7da1b1c14580>
return[name[ret]]
return[constant[True]] | keyword[def] identifier[mod_run_check_cmd] ( identifier[cmd] , identifier[filename] ,** identifier[check_cmd_opts] ):
literal[string]
identifier[log] . identifier[debug] ( literal[string] )
identifier[_cmd] = literal[string] . identifier[format] ( identifier[cmd] , identifier[filename] )
identifier[cret] = identifier[__salt__] [ literal[string] ]( identifier[_cmd] ,** identifier[check_cmd_opts] )
keyword[if] identifier[cret] [ literal[string] ]!= literal[int] :
identifier[ret] ={ literal[string] : literal[string] ,
literal[string] : keyword[True] ,
literal[string] : keyword[False] }
keyword[if] identifier[cret] . identifier[get] ( literal[string] ):
identifier[ret] [ literal[string] ]+= literal[string] + identifier[cret] [ literal[string] ]
keyword[if] identifier[cret] . identifier[get] ( literal[string] ):
identifier[ret] [ literal[string] ]+= literal[string] + identifier[cret] [ literal[string] ]
keyword[return] identifier[ret]
keyword[return] keyword[True] | def mod_run_check_cmd(cmd, filename, **check_cmd_opts):
"""
Execute the check_cmd logic.
Return a result dict if ``check_cmd`` succeeds (check_cmd == 0)
otherwise return True
"""
log.debug('running our check_cmd')
_cmd = '{0} {1}'.format(cmd, filename)
cret = __salt__['cmd.run_all'](_cmd, **check_cmd_opts)
if cret['retcode'] != 0:
ret = {'comment': 'check_cmd execution failed', 'skip_watch': True, 'result': False}
if cret.get('stdout'):
ret['comment'] += '\n' + cret['stdout'] # depends on [control=['if'], data=[]]
if cret.get('stderr'):
ret['comment'] += '\n' + cret['stderr'] # depends on [control=['if'], data=[]]
return ret # depends on [control=['if'], data=[]]
# No reason to stop, return True
return True |
def read_plink(file_prefix, verbose=True):
r"""Read PLINK files into Pandas data frames.
Parameters
----------
file_prefix : str
Path prefix to the set of PLINK files. It supports loading many BED
files at once using globstrings wildcard.
verbose : bool
``True`` for progress information; ``False`` otherwise.
Returns
-------
:class:`pandas.DataFrame`
Alleles.
:class:`pandas.DataFrame`
Samples.
:class:`numpy.ndarray`
Genotype.
Examples
--------
We have shipped this package with an example so can load and inspect by
doing
.. doctest::
>>> from pandas_plink import read_plink
>>> from pandas_plink import example_file_prefix
>>> (bim, fam, bed) = read_plink(example_file_prefix(), verbose=False)
>>> print(bim.head()) #doctest: +NORMALIZE_WHITESPACE
chrom snp cm pos a0 a1 i
0 1 rs10399749 0.0 45162 G C 0
1 1 rs2949420 0.0 45257 C T 1
2 1 rs2949421 0.0 45413 0 0 2
3 1 rs2691310 0.0 46844 A T 3
4 1 rs4030303 0.0 72434 0 G 4
>>> print(fam.head()) #doctest: +NORMALIZE_WHITESPACE
fid iid father mother gender trait i
0 Sample_1 Sample_1 0 0 1 -9 0
1 Sample_2 Sample_2 0 0 2 -9 1
2 Sample_3 Sample_3 Sample_1 Sample_2 2 -9 2
>>> print(bed.compute()) #doctest: +NORMALIZE_WHITESPACE
[[ 2. 2. 1.]
[ 2. 1. 2.]
[nan nan nan]
[nan nan 1.]
[ 2. 2. 2.]
[ 2. 2. 2.]
[ 2. 1. 0.]
[ 2. 2. 2.]
[ 1. 2. 2.]
[ 2. 1. 2.]]
The values of the ``bed`` matrix denote how many alleles ``a1`` (see
output of data frame ``bim``) are in the corresponding position and
individual. Notice the column ``i`` in ``bim`` and ``fam`` data frames.
It maps to the corresponding position of the bed matrix:
.. doctest::
>>> chrom1 = bim.query("chrom=='1'")
>>> X = bed[chrom1.i.values, :].compute()
>>> print(X) #doctest: +NORMALIZE_WHITESPACE
[[ 2. 2. 1.]
[ 2. 1. 2.]
[nan nan nan]
[nan nan 1.]
[ 2. 2. 2.]
[ 2. 2. 2.]
[ 2. 1. 0.]
[ 2. 2. 2.]
[ 1. 2. 2.]
[ 2. 1. 2.]]
It also allows the use of the wildcard character ``*`` for mapping
multiple BED files at
once: ``(bim, fam, bed) = read_plink("chrom*")``.
In this case, only one of the FAM files will be used to define
sample information. Data from BIM and BED files are concatenated to
provide a single view of the files.
"""
from dask.array import concatenate
file_prefixes = sorted(glob(file_prefix))
if len(file_prefixes) == 0:
file_prefixes = [file_prefix.replace("*", "")]
file_prefixes = sorted(_clean_prefixes(file_prefixes))
fn = []
for fp in file_prefixes:
fn.append({s: "%s.%s" % (fp, s) for s in ["bed", "bim", "fam"]})
pbar = tqdm(desc="Mapping files", total=3 * len(fn), disable=not verbose)
msg = "Reading bim file(s)..."
bim = _read_file(fn, msg, lambda fn: _read_bim(fn["bim"]), pbar)
if len(file_prefixes) > 1:
if verbose:
msg = "Multiple files read in this order: {}"
print(msg.format([basename(f) for f in file_prefixes]))
nmarkers = dict()
index_offset = 0
for i, bi in enumerate(bim):
nmarkers[fn[i]["bed"]] = bi.shape[0]
bi["i"] += index_offset
index_offset += bi.shape[0]
bim = pd.concat(bim, axis=0, ignore_index=True)
msg = "Reading fam file(s)..."
fam = _read_file([fn[0]], msg, lambda fn: _read_fam(fn["fam"]), pbar)[0]
nsamples = fam.shape[0]
bed = _read_file(
fn,
"Reading bed file(s)...",
lambda fn: _read_bed(fn["bed"], nsamples, nmarkers[fn["bed"]]),
pbar,
)
bed = concatenate(bed, axis=0)
pbar.close()
return (bim, fam, bed) | def function[read_plink, parameter[file_prefix, verbose]]:
constant[Read PLINK files into Pandas data frames.
Parameters
----------
file_prefix : str
Path prefix to the set of PLINK files. It supports loading many BED
files at once using globstrings wildcard.
verbose : bool
``True`` for progress information; ``False`` otherwise.
Returns
-------
:class:`pandas.DataFrame`
Alleles.
:class:`pandas.DataFrame`
Samples.
:class:`numpy.ndarray`
Genotype.
Examples
--------
We have shipped this package with an example so can load and inspect by
doing
.. doctest::
>>> from pandas_plink import read_plink
>>> from pandas_plink import example_file_prefix
>>> (bim, fam, bed) = read_plink(example_file_prefix(), verbose=False)
>>> print(bim.head()) #doctest: +NORMALIZE_WHITESPACE
chrom snp cm pos a0 a1 i
0 1 rs10399749 0.0 45162 G C 0
1 1 rs2949420 0.0 45257 C T 1
2 1 rs2949421 0.0 45413 0 0 2
3 1 rs2691310 0.0 46844 A T 3
4 1 rs4030303 0.0 72434 0 G 4
>>> print(fam.head()) #doctest: +NORMALIZE_WHITESPACE
fid iid father mother gender trait i
0 Sample_1 Sample_1 0 0 1 -9 0
1 Sample_2 Sample_2 0 0 2 -9 1
2 Sample_3 Sample_3 Sample_1 Sample_2 2 -9 2
>>> print(bed.compute()) #doctest: +NORMALIZE_WHITESPACE
[[ 2. 2. 1.]
[ 2. 1. 2.]
[nan nan nan]
[nan nan 1.]
[ 2. 2. 2.]
[ 2. 2. 2.]
[ 2. 1. 0.]
[ 2. 2. 2.]
[ 1. 2. 2.]
[ 2. 1. 2.]]
The values of the ``bed`` matrix denote how many alleles ``a1`` (see
output of data frame ``bim``) are in the corresponding position and
individual. Notice the column ``i`` in ``bim`` and ``fam`` data frames.
It maps to the corresponding position of the bed matrix:
.. doctest::
>>> chrom1 = bim.query("chrom=='1'")
>>> X = bed[chrom1.i.values, :].compute()
>>> print(X) #doctest: +NORMALIZE_WHITESPACE
[[ 2. 2. 1.]
[ 2. 1. 2.]
[nan nan nan]
[nan nan 1.]
[ 2. 2. 2.]
[ 2. 2. 2.]
[ 2. 1. 0.]
[ 2. 2. 2.]
[ 1. 2. 2.]
[ 2. 1. 2.]]
It also allows the use of the wildcard character ``*`` for mapping
multiple BED files at
once: ``(bim, fam, bed) = read_plink("chrom*")``.
In this case, only one of the FAM files will be used to define
sample information. Data from BIM and BED files are concatenated to
provide a single view of the files.
]
from relative_module[dask.array] import module[concatenate]
variable[file_prefixes] assign[=] call[name[sorted], parameter[call[name[glob], parameter[name[file_prefix]]]]]
if compare[call[name[len], parameter[name[file_prefixes]]] equal[==] constant[0]] begin[:]
variable[file_prefixes] assign[=] list[[<ast.Call object at 0x7da1b0c51d50>]]
variable[file_prefixes] assign[=] call[name[sorted], parameter[call[name[_clean_prefixes], parameter[name[file_prefixes]]]]]
variable[fn] assign[=] list[[]]
for taget[name[fp]] in starred[name[file_prefixes]] begin[:]
call[name[fn].append, parameter[<ast.DictComp object at 0x7da1b0c52230>]]
variable[pbar] assign[=] call[name[tqdm], parameter[]]
variable[msg] assign[=] constant[Reading bim file(s)...]
variable[bim] assign[=] call[name[_read_file], parameter[name[fn], name[msg], <ast.Lambda object at 0x7da1b0c52080>, name[pbar]]]
if compare[call[name[len], parameter[name[file_prefixes]]] greater[>] constant[1]] begin[:]
if name[verbose] begin[:]
variable[msg] assign[=] constant[Multiple files read in this order: {}]
call[name[print], parameter[call[name[msg].format, parameter[<ast.ListComp object at 0x7da1b0c51ea0>]]]]
variable[nmarkers] assign[=] call[name[dict], parameter[]]
variable[index_offset] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da1b0ed1030>, <ast.Name object at 0x7da1b0ed1600>]]] in starred[call[name[enumerate], parameter[name[bim]]]] begin[:]
call[name[nmarkers]][call[call[name[fn]][name[i]]][constant[bed]]] assign[=] call[name[bi].shape][constant[0]]
<ast.AugAssign object at 0x7da1b0ed2a10>
<ast.AugAssign object at 0x7da1b0ed0550>
variable[bim] assign[=] call[name[pd].concat, parameter[name[bim]]]
variable[msg] assign[=] constant[Reading fam file(s)...]
variable[fam] assign[=] call[call[name[_read_file], parameter[list[[<ast.Subscript object at 0x7da1b0ed1cc0>]], name[msg], <ast.Lambda object at 0x7da1b0ed0820>, name[pbar]]]][constant[0]]
variable[nsamples] assign[=] call[name[fam].shape][constant[0]]
variable[bed] assign[=] call[name[_read_file], parameter[name[fn], constant[Reading bed file(s)...], <ast.Lambda object at 0x7da1b0ed0250>, name[pbar]]]
variable[bed] assign[=] call[name[concatenate], parameter[name[bed]]]
call[name[pbar].close, parameter[]]
return[tuple[[<ast.Name object at 0x7da1b0ed18a0>, <ast.Name object at 0x7da1b0ed3130>, <ast.Name object at 0x7da1b0ed31c0>]]] | keyword[def] identifier[read_plink] ( identifier[file_prefix] , identifier[verbose] = keyword[True] ):
literal[string]
keyword[from] identifier[dask] . identifier[array] keyword[import] identifier[concatenate]
identifier[file_prefixes] = identifier[sorted] ( identifier[glob] ( identifier[file_prefix] ))
keyword[if] identifier[len] ( identifier[file_prefixes] )== literal[int] :
identifier[file_prefixes] =[ identifier[file_prefix] . identifier[replace] ( literal[string] , literal[string] )]
identifier[file_prefixes] = identifier[sorted] ( identifier[_clean_prefixes] ( identifier[file_prefixes] ))
identifier[fn] =[]
keyword[for] identifier[fp] keyword[in] identifier[file_prefixes] :
identifier[fn] . identifier[append] ({ identifier[s] : literal[string] %( identifier[fp] , identifier[s] ) keyword[for] identifier[s] keyword[in] [ literal[string] , literal[string] , literal[string] ]})
identifier[pbar] = identifier[tqdm] ( identifier[desc] = literal[string] , identifier[total] = literal[int] * identifier[len] ( identifier[fn] ), identifier[disable] = keyword[not] identifier[verbose] )
identifier[msg] = literal[string]
identifier[bim] = identifier[_read_file] ( identifier[fn] , identifier[msg] , keyword[lambda] identifier[fn] : identifier[_read_bim] ( identifier[fn] [ literal[string] ]), identifier[pbar] )
keyword[if] identifier[len] ( identifier[file_prefixes] )> literal[int] :
keyword[if] identifier[verbose] :
identifier[msg] = literal[string]
identifier[print] ( identifier[msg] . identifier[format] ([ identifier[basename] ( identifier[f] ) keyword[for] identifier[f] keyword[in] identifier[file_prefixes] ]))
identifier[nmarkers] = identifier[dict] ()
identifier[index_offset] = literal[int]
keyword[for] identifier[i] , identifier[bi] keyword[in] identifier[enumerate] ( identifier[bim] ):
identifier[nmarkers] [ identifier[fn] [ identifier[i] ][ literal[string] ]]= identifier[bi] . identifier[shape] [ literal[int] ]
identifier[bi] [ literal[string] ]+= identifier[index_offset]
identifier[index_offset] += identifier[bi] . identifier[shape] [ literal[int] ]
identifier[bim] = identifier[pd] . identifier[concat] ( identifier[bim] , identifier[axis] = literal[int] , identifier[ignore_index] = keyword[True] )
identifier[msg] = literal[string]
identifier[fam] = identifier[_read_file] ([ identifier[fn] [ literal[int] ]], identifier[msg] , keyword[lambda] identifier[fn] : identifier[_read_fam] ( identifier[fn] [ literal[string] ]), identifier[pbar] )[ literal[int] ]
identifier[nsamples] = identifier[fam] . identifier[shape] [ literal[int] ]
identifier[bed] = identifier[_read_file] (
identifier[fn] ,
literal[string] ,
keyword[lambda] identifier[fn] : identifier[_read_bed] ( identifier[fn] [ literal[string] ], identifier[nsamples] , identifier[nmarkers] [ identifier[fn] [ literal[string] ]]),
identifier[pbar] ,
)
identifier[bed] = identifier[concatenate] ( identifier[bed] , identifier[axis] = literal[int] )
identifier[pbar] . identifier[close] ()
keyword[return] ( identifier[bim] , identifier[fam] , identifier[bed] ) | def read_plink(file_prefix, verbose=True):
"""Read PLINK files into Pandas data frames.
Parameters
----------
file_prefix : str
Path prefix to the set of PLINK files. It supports loading many BED
files at once using globstrings wildcard.
verbose : bool
``True`` for progress information; ``False`` otherwise.
Returns
-------
:class:`pandas.DataFrame`
Alleles.
:class:`pandas.DataFrame`
Samples.
:class:`numpy.ndarray`
Genotype.
Examples
--------
We have shipped this package with an example so can load and inspect by
doing
.. doctest::
>>> from pandas_plink import read_plink
>>> from pandas_plink import example_file_prefix
>>> (bim, fam, bed) = read_plink(example_file_prefix(), verbose=False)
>>> print(bim.head()) #doctest: +NORMALIZE_WHITESPACE
chrom snp cm pos a0 a1 i
0 1 rs10399749 0.0 45162 G C 0
1 1 rs2949420 0.0 45257 C T 1
2 1 rs2949421 0.0 45413 0 0 2
3 1 rs2691310 0.0 46844 A T 3
4 1 rs4030303 0.0 72434 0 G 4
>>> print(fam.head()) #doctest: +NORMALIZE_WHITESPACE
fid iid father mother gender trait i
0 Sample_1 Sample_1 0 0 1 -9 0
1 Sample_2 Sample_2 0 0 2 -9 1
2 Sample_3 Sample_3 Sample_1 Sample_2 2 -9 2
>>> print(bed.compute()) #doctest: +NORMALIZE_WHITESPACE
[[ 2. 2. 1.]
[ 2. 1. 2.]
[nan nan nan]
[nan nan 1.]
[ 2. 2. 2.]
[ 2. 2. 2.]
[ 2. 1. 0.]
[ 2. 2. 2.]
[ 1. 2. 2.]
[ 2. 1. 2.]]
The values of the ``bed`` matrix denote how many alleles ``a1`` (see
output of data frame ``bim``) are in the corresponding position and
individual. Notice the column ``i`` in ``bim`` and ``fam`` data frames.
It maps to the corresponding position of the bed matrix:
.. doctest::
>>> chrom1 = bim.query("chrom=='1'")
>>> X = bed[chrom1.i.values, :].compute()
>>> print(X) #doctest: +NORMALIZE_WHITESPACE
[[ 2. 2. 1.]
[ 2. 1. 2.]
[nan nan nan]
[nan nan 1.]
[ 2. 2. 2.]
[ 2. 2. 2.]
[ 2. 1. 0.]
[ 2. 2. 2.]
[ 1. 2. 2.]
[ 2. 1. 2.]]
It also allows the use of the wildcard character ``*`` for mapping
multiple BED files at
once: ``(bim, fam, bed) = read_plink("chrom*")``.
In this case, only one of the FAM files will be used to define
sample information. Data from BIM and BED files are concatenated to
provide a single view of the files.
"""
from dask.array import concatenate
file_prefixes = sorted(glob(file_prefix))
if len(file_prefixes) == 0:
file_prefixes = [file_prefix.replace('*', '')] # depends on [control=['if'], data=[]]
file_prefixes = sorted(_clean_prefixes(file_prefixes))
fn = []
for fp in file_prefixes:
fn.append({s: '%s.%s' % (fp, s) for s in ['bed', 'bim', 'fam']}) # depends on [control=['for'], data=['fp']]
pbar = tqdm(desc='Mapping files', total=3 * len(fn), disable=not verbose)
msg = 'Reading bim file(s)...'
bim = _read_file(fn, msg, lambda fn: _read_bim(fn['bim']), pbar)
if len(file_prefixes) > 1:
if verbose:
msg = 'Multiple files read in this order: {}'
print(msg.format([basename(f) for f in file_prefixes])) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
nmarkers = dict()
index_offset = 0
for (i, bi) in enumerate(bim):
nmarkers[fn[i]['bed']] = bi.shape[0]
bi['i'] += index_offset
index_offset += bi.shape[0] # depends on [control=['for'], data=[]]
bim = pd.concat(bim, axis=0, ignore_index=True)
msg = 'Reading fam file(s)...'
fam = _read_file([fn[0]], msg, lambda fn: _read_fam(fn['fam']), pbar)[0]
nsamples = fam.shape[0]
bed = _read_file(fn, 'Reading bed file(s)...', lambda fn: _read_bed(fn['bed'], nsamples, nmarkers[fn['bed']]), pbar)
bed = concatenate(bed, axis=0)
pbar.close()
return (bim, fam, bed) |
def scale(p, factor, o=(0, 0)):
""" scale vector
Args:
p: point (x, y)
factor: scaling factor
o: origin (x, y)
"""
v = vector(o, p)
sv = v[0] * factor, v[1] * factor
return translate(sv, o) | def function[scale, parameter[p, factor, o]]:
constant[ scale vector
Args:
p: point (x, y)
factor: scaling factor
o: origin (x, y)
]
variable[v] assign[=] call[name[vector], parameter[name[o], name[p]]]
variable[sv] assign[=] tuple[[<ast.BinOp object at 0x7da1b24efc40>, <ast.BinOp object at 0x7da1b24eed10>]]
return[call[name[translate], parameter[name[sv], name[o]]]] | keyword[def] identifier[scale] ( identifier[p] , identifier[factor] , identifier[o] =( literal[int] , literal[int] )):
literal[string]
identifier[v] = identifier[vector] ( identifier[o] , identifier[p] )
identifier[sv] = identifier[v] [ literal[int] ]* identifier[factor] , identifier[v] [ literal[int] ]* identifier[factor]
keyword[return] identifier[translate] ( identifier[sv] , identifier[o] ) | def scale(p, factor, o=(0, 0)):
""" scale vector
Args:
p: point (x, y)
factor: scaling factor
o: origin (x, y)
"""
v = vector(o, p)
sv = (v[0] * factor, v[1] * factor)
return translate(sv, o) |
def opening_hours(location=None, concise=False):
"""
Creates a rendered listing of hours.
"""
template_name = 'openinghours/opening_hours_list.html'
days = [] # [{'hours': '9:00am to 5:00pm', 'name': u'Monday'}, {'hours...
# Without `location`, choose the first company.
if location:
ohrs = OpeningHours.objects.filter(company=location)
else:
try:
Location = utils.get_premises_model()
ohrs = Location.objects.first().openinghours_set.all()
except AttributeError:
raise Exception("You must define some opening hours"
" to use the opening hours tags.")
ohrs.order_by('weekday', 'from_hour')
for o in ohrs:
days.append({
'day_number': o.weekday,
'name': o.get_weekday_display(),
'from_hour': o.from_hour,
'to_hour': o.to_hour,
'hours': '%s%s to %s%s' % (
o.from_hour.strftime('%I:%M').lstrip('0'),
o.from_hour.strftime('%p').lower(),
o.to_hour.strftime('%I:%M').lstrip('0'),
o.to_hour.strftime('%p').lower()
)
})
open_days = [o.weekday for o in ohrs]
for day_number, day_name in WEEKDAYS:
if day_number not in open_days:
days.append({
'day_number': day_number,
'name': day_name,
'hours': 'Closed'
})
days = sorted(days, key=lambda k: k['day_number'])
if concise:
# [{'hours': '9:00am to 5:00pm', 'day_names': u'Monday to Friday'},
# {'hours':...
template_name = 'openinghours/opening_hours_list_concise.html'
concise_days = []
current_set = {}
for day in days:
if 'hours' not in current_set.keys():
current_set = {'day_names': [day['name']],
'hours': day['hours']}
elif day['hours'] != current_set['hours']:
concise_days.append(current_set)
current_set = {'day_names': [day['name']],
'hours': day['hours']}
else:
current_set['day_names'].append(day['name'])
concise_days.append(current_set)
for day_set in concise_days:
if len(day_set['day_names']) > 2:
day_set['day_names'] = '%s to %s' % (day_set['day_names'][0],
day_set['day_names'][-1])
elif len(day_set['day_names']) > 1:
day_set['day_names'] = '%s and %s' % (day_set['day_names'][0],
day_set['day_names'][-1])
else:
day_set['day_names'] = '%s' % day_set['day_names'][0]
days = concise_days
template = get_template(template_name)
return template.render({'days': days}) | def function[opening_hours, parameter[location, concise]]:
constant[
Creates a rendered listing of hours.
]
variable[template_name] assign[=] constant[openinghours/opening_hours_list.html]
variable[days] assign[=] list[[]]
if name[location] begin[:]
variable[ohrs] assign[=] call[name[OpeningHours].objects.filter, parameter[]]
call[name[ohrs].order_by, parameter[constant[weekday], constant[from_hour]]]
for taget[name[o]] in starred[name[ohrs]] begin[:]
call[name[days].append, parameter[dictionary[[<ast.Constant object at 0x7da204344e20>, <ast.Constant object at 0x7da204344700>, <ast.Constant object at 0x7da2043444c0>, <ast.Constant object at 0x7da2043470d0>, <ast.Constant object at 0x7da1b0fe83d0>], [<ast.Attribute object at 0x7da1b0fe9de0>, <ast.Call object at 0x7da1b0febfd0>, <ast.Attribute object at 0x7da1b0fe8ca0>, <ast.Attribute object at 0x7da1b0fea560>, <ast.BinOp object at 0x7da1b0febd00>]]]]
variable[open_days] assign[=] <ast.ListComp object at 0x7da1b0feb760>
for taget[tuple[[<ast.Name object at 0x7da1b0fe8b50>, <ast.Name object at 0x7da1b0feb880>]]] in starred[name[WEEKDAYS]] begin[:]
if compare[name[day_number] <ast.NotIn object at 0x7da2590d7190> name[open_days]] begin[:]
call[name[days].append, parameter[dictionary[[<ast.Constant object at 0x7da1b0fe8e50>, <ast.Constant object at 0x7da1b0feb340>, <ast.Constant object at 0x7da1b0fea2f0>], [<ast.Name object at 0x7da1b0fe9db0>, <ast.Name object at 0x7da1b0fe80d0>, <ast.Constant object at 0x7da1b0fe8c10>]]]]
variable[days] assign[=] call[name[sorted], parameter[name[days]]]
if name[concise] begin[:]
variable[template_name] assign[=] constant[openinghours/opening_hours_list_concise.html]
variable[concise_days] assign[=] list[[]]
variable[current_set] assign[=] dictionary[[], []]
for taget[name[day]] in starred[name[days]] begin[:]
if compare[constant[hours] <ast.NotIn object at 0x7da2590d7190> call[name[current_set].keys, parameter[]]] begin[:]
variable[current_set] assign[=] dictionary[[<ast.Constant object at 0x7da18bccbb50>, <ast.Constant object at 0x7da18bcc94e0>], [<ast.List object at 0x7da18bcc96c0>, <ast.Subscript object at 0x7da18bcc9e40>]]
call[name[concise_days].append, parameter[name[current_set]]]
for taget[name[day_set]] in starred[name[concise_days]] begin[:]
if compare[call[name[len], parameter[call[name[day_set]][constant[day_names]]]] greater[>] constant[2]] begin[:]
call[name[day_set]][constant[day_names]] assign[=] binary_operation[constant[%s to %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b0feb2b0>, <ast.Subscript object at 0x7da1b0fe99c0>]]]
variable[days] assign[=] name[concise_days]
variable[template] assign[=] call[name[get_template], parameter[name[template_name]]]
return[call[name[template].render, parameter[dictionary[[<ast.Constant object at 0x7da1b0fe9360>], [<ast.Name object at 0x7da1b0febc40>]]]]] | keyword[def] identifier[opening_hours] ( identifier[location] = keyword[None] , identifier[concise] = keyword[False] ):
literal[string]
identifier[template_name] = literal[string]
identifier[days] =[]
keyword[if] identifier[location] :
identifier[ohrs] = identifier[OpeningHours] . identifier[objects] . identifier[filter] ( identifier[company] = identifier[location] )
keyword[else] :
keyword[try] :
identifier[Location] = identifier[utils] . identifier[get_premises_model] ()
identifier[ohrs] = identifier[Location] . identifier[objects] . identifier[first] (). identifier[openinghours_set] . identifier[all] ()
keyword[except] identifier[AttributeError] :
keyword[raise] identifier[Exception] ( literal[string]
literal[string] )
identifier[ohrs] . identifier[order_by] ( literal[string] , literal[string] )
keyword[for] identifier[o] keyword[in] identifier[ohrs] :
identifier[days] . identifier[append] ({
literal[string] : identifier[o] . identifier[weekday] ,
literal[string] : identifier[o] . identifier[get_weekday_display] (),
literal[string] : identifier[o] . identifier[from_hour] ,
literal[string] : identifier[o] . identifier[to_hour] ,
literal[string] : literal[string] %(
identifier[o] . identifier[from_hour] . identifier[strftime] ( literal[string] ). identifier[lstrip] ( literal[string] ),
identifier[o] . identifier[from_hour] . identifier[strftime] ( literal[string] ). identifier[lower] (),
identifier[o] . identifier[to_hour] . identifier[strftime] ( literal[string] ). identifier[lstrip] ( literal[string] ),
identifier[o] . identifier[to_hour] . identifier[strftime] ( literal[string] ). identifier[lower] ()
)
})
identifier[open_days] =[ identifier[o] . identifier[weekday] keyword[for] identifier[o] keyword[in] identifier[ohrs] ]
keyword[for] identifier[day_number] , identifier[day_name] keyword[in] identifier[WEEKDAYS] :
keyword[if] identifier[day_number] keyword[not] keyword[in] identifier[open_days] :
identifier[days] . identifier[append] ({
literal[string] : identifier[day_number] ,
literal[string] : identifier[day_name] ,
literal[string] : literal[string]
})
identifier[days] = identifier[sorted] ( identifier[days] , identifier[key] = keyword[lambda] identifier[k] : identifier[k] [ literal[string] ])
keyword[if] identifier[concise] :
identifier[template_name] = literal[string]
identifier[concise_days] =[]
identifier[current_set] ={}
keyword[for] identifier[day] keyword[in] identifier[days] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[current_set] . identifier[keys] ():
identifier[current_set] ={ literal[string] :[ identifier[day] [ literal[string] ]],
literal[string] : identifier[day] [ literal[string] ]}
keyword[elif] identifier[day] [ literal[string] ]!= identifier[current_set] [ literal[string] ]:
identifier[concise_days] . identifier[append] ( identifier[current_set] )
identifier[current_set] ={ literal[string] :[ identifier[day] [ literal[string] ]],
literal[string] : identifier[day] [ literal[string] ]}
keyword[else] :
identifier[current_set] [ literal[string] ]. identifier[append] ( identifier[day] [ literal[string] ])
identifier[concise_days] . identifier[append] ( identifier[current_set] )
keyword[for] identifier[day_set] keyword[in] identifier[concise_days] :
keyword[if] identifier[len] ( identifier[day_set] [ literal[string] ])> literal[int] :
identifier[day_set] [ literal[string] ]= literal[string] %( identifier[day_set] [ literal[string] ][ literal[int] ],
identifier[day_set] [ literal[string] ][- literal[int] ])
keyword[elif] identifier[len] ( identifier[day_set] [ literal[string] ])> literal[int] :
identifier[day_set] [ literal[string] ]= literal[string] %( identifier[day_set] [ literal[string] ][ literal[int] ],
identifier[day_set] [ literal[string] ][- literal[int] ])
keyword[else] :
identifier[day_set] [ literal[string] ]= literal[string] % identifier[day_set] [ literal[string] ][ literal[int] ]
identifier[days] = identifier[concise_days]
identifier[template] = identifier[get_template] ( identifier[template_name] )
keyword[return] identifier[template] . identifier[render] ({ literal[string] : identifier[days] }) | def opening_hours(location=None, concise=False):
"""
Creates a rendered listing of hours.
"""
template_name = 'openinghours/opening_hours_list.html'
days = [] # [{'hours': '9:00am to 5:00pm', 'name': u'Monday'}, {'hours...
# Without `location`, choose the first company.
if location:
ohrs = OpeningHours.objects.filter(company=location) # depends on [control=['if'], data=[]]
else:
try:
Location = utils.get_premises_model()
ohrs = Location.objects.first().openinghours_set.all() # depends on [control=['try'], data=[]]
except AttributeError:
raise Exception('You must define some opening hours to use the opening hours tags.') # depends on [control=['except'], data=[]]
ohrs.order_by('weekday', 'from_hour')
for o in ohrs:
days.append({'day_number': o.weekday, 'name': o.get_weekday_display(), 'from_hour': o.from_hour, 'to_hour': o.to_hour, 'hours': '%s%s to %s%s' % (o.from_hour.strftime('%I:%M').lstrip('0'), o.from_hour.strftime('%p').lower(), o.to_hour.strftime('%I:%M').lstrip('0'), o.to_hour.strftime('%p').lower())}) # depends on [control=['for'], data=['o']]
open_days = [o.weekday for o in ohrs]
for (day_number, day_name) in WEEKDAYS:
if day_number not in open_days:
days.append({'day_number': day_number, 'name': day_name, 'hours': 'Closed'}) # depends on [control=['if'], data=['day_number']] # depends on [control=['for'], data=[]]
days = sorted(days, key=lambda k: k['day_number'])
if concise:
# [{'hours': '9:00am to 5:00pm', 'day_names': u'Monday to Friday'},
# {'hours':...
template_name = 'openinghours/opening_hours_list_concise.html'
concise_days = []
current_set = {}
for day in days:
if 'hours' not in current_set.keys():
current_set = {'day_names': [day['name']], 'hours': day['hours']} # depends on [control=['if'], data=[]]
elif day['hours'] != current_set['hours']:
concise_days.append(current_set)
current_set = {'day_names': [day['name']], 'hours': day['hours']} # depends on [control=['if'], data=[]]
else:
current_set['day_names'].append(day['name']) # depends on [control=['for'], data=['day']]
concise_days.append(current_set)
for day_set in concise_days:
if len(day_set['day_names']) > 2:
day_set['day_names'] = '%s to %s' % (day_set['day_names'][0], day_set['day_names'][-1]) # depends on [control=['if'], data=[]]
elif len(day_set['day_names']) > 1:
day_set['day_names'] = '%s and %s' % (day_set['day_names'][0], day_set['day_names'][-1]) # depends on [control=['if'], data=[]]
else:
day_set['day_names'] = '%s' % day_set['day_names'][0] # depends on [control=['for'], data=['day_set']]
days = concise_days # depends on [control=['if'], data=[]]
template = get_template(template_name)
return template.render({'days': days}) |
def get_config_tuple_from_egrc(egrc_path):
"""
Create a Config named tuple from the values specified in the .egrc. Expands
any paths as necessary.
egrc_path must exist and point a file.
If not present in the .egrc, properties of the Config are returned as None.
"""
with open(egrc_path, 'r') as egrc:
try:
config = ConfigParser.RawConfigParser()
except AttributeError:
config = ConfigParser()
config.readfp(egrc)
# default to None
examples_dir = None
custom_dir = None
use_color = None
pager_cmd = None
squeeze = None
subs = None
editor_cmd = None
if config.has_option(DEFAULT_SECTION, EG_EXAMPLES_DIR):
examples_dir = config.get(DEFAULT_SECTION, EG_EXAMPLES_DIR)
examples_dir = get_expanded_path(examples_dir)
if config.has_option(DEFAULT_SECTION, CUSTOM_EXAMPLES_DIR):
custom_dir = config.get(DEFAULT_SECTION, CUSTOM_EXAMPLES_DIR)
custom_dir = get_expanded_path(custom_dir)
if config.has_option(DEFAULT_SECTION, USE_COLOR):
use_color_raw = config.get(DEFAULT_SECTION, USE_COLOR)
use_color = _parse_bool_from_raw_egrc_value(use_color_raw)
if config.has_option(DEFAULT_SECTION, PAGER_CMD):
pager_cmd_raw = config.get(DEFAULT_SECTION, PAGER_CMD)
pager_cmd = ast.literal_eval(pager_cmd_raw)
if config.has_option(DEFAULT_SECTION, EDITOR_CMD):
editor_cmd_raw = config.get(DEFAULT_SECTION, EDITOR_CMD)
editor_cmd = ast.literal_eval(editor_cmd_raw)
color_config = get_custom_color_config_from_egrc(config)
if config.has_option(DEFAULT_SECTION, SQUEEZE):
squeeze_raw = config.get(DEFAULT_SECTION, SQUEEZE)
squeeze = _parse_bool_from_raw_egrc_value(squeeze_raw)
if config.has_section(SUBSTITUTION_SECTION):
subs = get_substitutions_from_config(config)
return Config(
examples_dir=examples_dir,
custom_dir=custom_dir,
color_config=color_config,
use_color=use_color,
pager_cmd=pager_cmd,
editor_cmd=editor_cmd,
squeeze=squeeze,
subs=subs,
) | def function[get_config_tuple_from_egrc, parameter[egrc_path]]:
constant[
Create a Config named tuple from the values specified in the .egrc. Expands
any paths as necessary.
egrc_path must exist and point a file.
If not present in the .egrc, properties of the Config are returned as None.
]
with call[name[open], parameter[name[egrc_path], constant[r]]] begin[:]
<ast.Try object at 0x7da20c6c5e40>
call[name[config].readfp, parameter[name[egrc]]]
variable[examples_dir] assign[=] constant[None]
variable[custom_dir] assign[=] constant[None]
variable[use_color] assign[=] constant[None]
variable[pager_cmd] assign[=] constant[None]
variable[squeeze] assign[=] constant[None]
variable[subs] assign[=] constant[None]
variable[editor_cmd] assign[=] constant[None]
if call[name[config].has_option, parameter[name[DEFAULT_SECTION], name[EG_EXAMPLES_DIR]]] begin[:]
variable[examples_dir] assign[=] call[name[config].get, parameter[name[DEFAULT_SECTION], name[EG_EXAMPLES_DIR]]]
variable[examples_dir] assign[=] call[name[get_expanded_path], parameter[name[examples_dir]]]
if call[name[config].has_option, parameter[name[DEFAULT_SECTION], name[CUSTOM_EXAMPLES_DIR]]] begin[:]
variable[custom_dir] assign[=] call[name[config].get, parameter[name[DEFAULT_SECTION], name[CUSTOM_EXAMPLES_DIR]]]
variable[custom_dir] assign[=] call[name[get_expanded_path], parameter[name[custom_dir]]]
if call[name[config].has_option, parameter[name[DEFAULT_SECTION], name[USE_COLOR]]] begin[:]
variable[use_color_raw] assign[=] call[name[config].get, parameter[name[DEFAULT_SECTION], name[USE_COLOR]]]
variable[use_color] assign[=] call[name[_parse_bool_from_raw_egrc_value], parameter[name[use_color_raw]]]
if call[name[config].has_option, parameter[name[DEFAULT_SECTION], name[PAGER_CMD]]] begin[:]
variable[pager_cmd_raw] assign[=] call[name[config].get, parameter[name[DEFAULT_SECTION], name[PAGER_CMD]]]
variable[pager_cmd] assign[=] call[name[ast].literal_eval, parameter[name[pager_cmd_raw]]]
if call[name[config].has_option, parameter[name[DEFAULT_SECTION], name[EDITOR_CMD]]] begin[:]
variable[editor_cmd_raw] assign[=] call[name[config].get, parameter[name[DEFAULT_SECTION], name[EDITOR_CMD]]]
variable[editor_cmd] assign[=] call[name[ast].literal_eval, parameter[name[editor_cmd_raw]]]
variable[color_config] assign[=] call[name[get_custom_color_config_from_egrc], parameter[name[config]]]
if call[name[config].has_option, parameter[name[DEFAULT_SECTION], name[SQUEEZE]]] begin[:]
variable[squeeze_raw] assign[=] call[name[config].get, parameter[name[DEFAULT_SECTION], name[SQUEEZE]]]
variable[squeeze] assign[=] call[name[_parse_bool_from_raw_egrc_value], parameter[name[squeeze_raw]]]
if call[name[config].has_section, parameter[name[SUBSTITUTION_SECTION]]] begin[:]
variable[subs] assign[=] call[name[get_substitutions_from_config], parameter[name[config]]]
return[call[name[Config], parameter[]]] | keyword[def] identifier[get_config_tuple_from_egrc] ( identifier[egrc_path] ):
literal[string]
keyword[with] identifier[open] ( identifier[egrc_path] , literal[string] ) keyword[as] identifier[egrc] :
keyword[try] :
identifier[config] = identifier[ConfigParser] . identifier[RawConfigParser] ()
keyword[except] identifier[AttributeError] :
identifier[config] = identifier[ConfigParser] ()
identifier[config] . identifier[readfp] ( identifier[egrc] )
identifier[examples_dir] = keyword[None]
identifier[custom_dir] = keyword[None]
identifier[use_color] = keyword[None]
identifier[pager_cmd] = keyword[None]
identifier[squeeze] = keyword[None]
identifier[subs] = keyword[None]
identifier[editor_cmd] = keyword[None]
keyword[if] identifier[config] . identifier[has_option] ( identifier[DEFAULT_SECTION] , identifier[EG_EXAMPLES_DIR] ):
identifier[examples_dir] = identifier[config] . identifier[get] ( identifier[DEFAULT_SECTION] , identifier[EG_EXAMPLES_DIR] )
identifier[examples_dir] = identifier[get_expanded_path] ( identifier[examples_dir] )
keyword[if] identifier[config] . identifier[has_option] ( identifier[DEFAULT_SECTION] , identifier[CUSTOM_EXAMPLES_DIR] ):
identifier[custom_dir] = identifier[config] . identifier[get] ( identifier[DEFAULT_SECTION] , identifier[CUSTOM_EXAMPLES_DIR] )
identifier[custom_dir] = identifier[get_expanded_path] ( identifier[custom_dir] )
keyword[if] identifier[config] . identifier[has_option] ( identifier[DEFAULT_SECTION] , identifier[USE_COLOR] ):
identifier[use_color_raw] = identifier[config] . identifier[get] ( identifier[DEFAULT_SECTION] , identifier[USE_COLOR] )
identifier[use_color] = identifier[_parse_bool_from_raw_egrc_value] ( identifier[use_color_raw] )
keyword[if] identifier[config] . identifier[has_option] ( identifier[DEFAULT_SECTION] , identifier[PAGER_CMD] ):
identifier[pager_cmd_raw] = identifier[config] . identifier[get] ( identifier[DEFAULT_SECTION] , identifier[PAGER_CMD] )
identifier[pager_cmd] = identifier[ast] . identifier[literal_eval] ( identifier[pager_cmd_raw] )
keyword[if] identifier[config] . identifier[has_option] ( identifier[DEFAULT_SECTION] , identifier[EDITOR_CMD] ):
identifier[editor_cmd_raw] = identifier[config] . identifier[get] ( identifier[DEFAULT_SECTION] , identifier[EDITOR_CMD] )
identifier[editor_cmd] = identifier[ast] . identifier[literal_eval] ( identifier[editor_cmd_raw] )
identifier[color_config] = identifier[get_custom_color_config_from_egrc] ( identifier[config] )
keyword[if] identifier[config] . identifier[has_option] ( identifier[DEFAULT_SECTION] , identifier[SQUEEZE] ):
identifier[squeeze_raw] = identifier[config] . identifier[get] ( identifier[DEFAULT_SECTION] , identifier[SQUEEZE] )
identifier[squeeze] = identifier[_parse_bool_from_raw_egrc_value] ( identifier[squeeze_raw] )
keyword[if] identifier[config] . identifier[has_section] ( identifier[SUBSTITUTION_SECTION] ):
identifier[subs] = identifier[get_substitutions_from_config] ( identifier[config] )
keyword[return] identifier[Config] (
identifier[examples_dir] = identifier[examples_dir] ,
identifier[custom_dir] = identifier[custom_dir] ,
identifier[color_config] = identifier[color_config] ,
identifier[use_color] = identifier[use_color] ,
identifier[pager_cmd] = identifier[pager_cmd] ,
identifier[editor_cmd] = identifier[editor_cmd] ,
identifier[squeeze] = identifier[squeeze] ,
identifier[subs] = identifier[subs] ,
) | def get_config_tuple_from_egrc(egrc_path):
"""
Create a Config named tuple from the values specified in the .egrc. Expands
any paths as necessary.
egrc_path must exist and point a file.
If not present in the .egrc, properties of the Config are returned as None.
"""
with open(egrc_path, 'r') as egrc:
try:
config = ConfigParser.RawConfigParser() # depends on [control=['try'], data=[]]
except AttributeError:
config = ConfigParser() # depends on [control=['except'], data=[]]
config.readfp(egrc)
# default to None
examples_dir = None
custom_dir = None
use_color = None
pager_cmd = None
squeeze = None
subs = None
editor_cmd = None
if config.has_option(DEFAULT_SECTION, EG_EXAMPLES_DIR):
examples_dir = config.get(DEFAULT_SECTION, EG_EXAMPLES_DIR)
examples_dir = get_expanded_path(examples_dir) # depends on [control=['if'], data=[]]
if config.has_option(DEFAULT_SECTION, CUSTOM_EXAMPLES_DIR):
custom_dir = config.get(DEFAULT_SECTION, CUSTOM_EXAMPLES_DIR)
custom_dir = get_expanded_path(custom_dir) # depends on [control=['if'], data=[]]
if config.has_option(DEFAULT_SECTION, USE_COLOR):
use_color_raw = config.get(DEFAULT_SECTION, USE_COLOR)
use_color = _parse_bool_from_raw_egrc_value(use_color_raw) # depends on [control=['if'], data=[]]
if config.has_option(DEFAULT_SECTION, PAGER_CMD):
pager_cmd_raw = config.get(DEFAULT_SECTION, PAGER_CMD)
pager_cmd = ast.literal_eval(pager_cmd_raw) # depends on [control=['if'], data=[]]
if config.has_option(DEFAULT_SECTION, EDITOR_CMD):
editor_cmd_raw = config.get(DEFAULT_SECTION, EDITOR_CMD)
editor_cmd = ast.literal_eval(editor_cmd_raw) # depends on [control=['if'], data=[]]
color_config = get_custom_color_config_from_egrc(config)
if config.has_option(DEFAULT_SECTION, SQUEEZE):
squeeze_raw = config.get(DEFAULT_SECTION, SQUEEZE)
squeeze = _parse_bool_from_raw_egrc_value(squeeze_raw) # depends on [control=['if'], data=[]]
if config.has_section(SUBSTITUTION_SECTION):
subs = get_substitutions_from_config(config) # depends on [control=['if'], data=[]]
return Config(examples_dir=examples_dir, custom_dir=custom_dir, color_config=color_config, use_color=use_color, pager_cmd=pager_cmd, editor_cmd=editor_cmd, squeeze=squeeze, subs=subs) # depends on [control=['with'], data=['egrc']] |
def check(self, var):
"""Return True if the variable matches this type, and False otherwise."""
if self._class is None: self._init()
return self._class and self._checker(var, self._class) | def function[check, parameter[self, var]]:
constant[Return True if the variable matches this type, and False otherwise.]
if compare[name[self]._class is constant[None]] begin[:]
call[name[self]._init, parameter[]]
return[<ast.BoolOp object at 0x7da1b05be320>] | keyword[def] identifier[check] ( identifier[self] , identifier[var] ):
literal[string]
keyword[if] identifier[self] . identifier[_class] keyword[is] keyword[None] : identifier[self] . identifier[_init] ()
keyword[return] identifier[self] . identifier[_class] keyword[and] identifier[self] . identifier[_checker] ( identifier[var] , identifier[self] . identifier[_class] ) | def check(self, var):
"""Return True if the variable matches this type, and False otherwise."""
if self._class is None:
self._init() # depends on [control=['if'], data=[]]
return self._class and self._checker(var, self._class) |
def diff(self, mail_a, mail_b):
""" Return difference in bytes between two mails' normalized body.
TODO: rewrite the diff algorithm to not rely on naive unified diff
result parsing.
"""
return len(''.join(unified_diff(
mail_a.body_lines, mail_b.body_lines,
# Ignore difference in filename lenghts and timestamps.
fromfile='a', tofile='b',
fromfiledate='', tofiledate='',
n=0, lineterm='\n'))) | def function[diff, parameter[self, mail_a, mail_b]]:
constant[ Return difference in bytes between two mails' normalized body.
TODO: rewrite the diff algorithm to not rely on naive unified diff
result parsing.
]
return[call[name[len], parameter[call[constant[].join, parameter[call[name[unified_diff], parameter[name[mail_a].body_lines, name[mail_b].body_lines]]]]]]] | keyword[def] identifier[diff] ( identifier[self] , identifier[mail_a] , identifier[mail_b] ):
literal[string]
keyword[return] identifier[len] ( literal[string] . identifier[join] ( identifier[unified_diff] (
identifier[mail_a] . identifier[body_lines] , identifier[mail_b] . identifier[body_lines] ,
identifier[fromfile] = literal[string] , identifier[tofile] = literal[string] ,
identifier[fromfiledate] = literal[string] , identifier[tofiledate] = literal[string] ,
identifier[n] = literal[int] , identifier[lineterm] = literal[string] ))) | def diff(self, mail_a, mail_b):
""" Return difference in bytes between two mails' normalized body.
TODO: rewrite the diff algorithm to not rely on naive unified diff
result parsing.
"""
# Ignore difference in filename lenghts and timestamps.
return len(''.join(unified_diff(mail_a.body_lines, mail_b.body_lines, fromfile='a', tofile='b', fromfiledate='', tofiledate='', n=0, lineterm='\n'))) |
def _get_asset_load(self, asset_type):
"""
Helper function to dynamically create *_load_time properties. Return
value is in ms.
"""
if asset_type == 'initial':
return self.actual_page['time']
elif asset_type == 'content':
return self.pageTimings['onContentLoad']
elif asset_type == 'page':
if self.page_id == 'unknown':
return None
return self.pageTimings['onLoad']
# TODO - should we return a slightly fake total load time to
# accomodate HAR data that cannot understand things like JS
# rendering or just throw a warning?
#return self.get_load_time(request_type='.*',content_type='.*', status_code='.*', asynchronous=False)
else:
return self.get_load_time(
content_type=self.asset_types[asset_type]
) | def function[_get_asset_load, parameter[self, asset_type]]:
constant[
Helper function to dynamically create *_load_time properties. Return
value is in ms.
]
if compare[name[asset_type] equal[==] constant[initial]] begin[:]
return[call[name[self].actual_page][constant[time]]] | keyword[def] identifier[_get_asset_load] ( identifier[self] , identifier[asset_type] ):
literal[string]
keyword[if] identifier[asset_type] == literal[string] :
keyword[return] identifier[self] . identifier[actual_page] [ literal[string] ]
keyword[elif] identifier[asset_type] == literal[string] :
keyword[return] identifier[self] . identifier[pageTimings] [ literal[string] ]
keyword[elif] identifier[asset_type] == literal[string] :
keyword[if] identifier[self] . identifier[page_id] == literal[string] :
keyword[return] keyword[None]
keyword[return] identifier[self] . identifier[pageTimings] [ literal[string] ]
keyword[else] :
keyword[return] identifier[self] . identifier[get_load_time] (
identifier[content_type] = identifier[self] . identifier[asset_types] [ identifier[asset_type] ]
) | def _get_asset_load(self, asset_type):
"""
Helper function to dynamically create *_load_time properties. Return
value is in ms.
"""
if asset_type == 'initial':
return self.actual_page['time'] # depends on [control=['if'], data=[]]
elif asset_type == 'content':
return self.pageTimings['onContentLoad'] # depends on [control=['if'], data=[]]
elif asset_type == 'page':
if self.page_id == 'unknown':
return None # depends on [control=['if'], data=[]]
return self.pageTimings['onLoad'] # depends on [control=['if'], data=[]]
else:
# TODO - should we return a slightly fake total load time to
# accomodate HAR data that cannot understand things like JS
# rendering or just throw a warning?
#return self.get_load_time(request_type='.*',content_type='.*', status_code='.*', asynchronous=False)
return self.get_load_time(content_type=self.asset_types[asset_type]) |
def _quantile_function(self, alpha=0.5, smallest_count=None):
"""Return a function that returns the quantile values for this
histogram.
"""
total = float(self.total())
smallest_observed_count = min(itervalues(self))
if smallest_count is None:
smallest_count = smallest_observed_count
else:
smallest_count = min(smallest_count, smallest_observed_count)
beta = alpha * smallest_count
debug_plot = []
cumulative_sum = 0.0
inverse = sortedcontainers.SortedDict()
for value, count in iteritems(self):
debug_plot.append((cumulative_sum / total, value))
inverse[(cumulative_sum + beta) / total] = value
cumulative_sum += count
inverse[(cumulative_sum - beta) / total] = value
debug_plot.append((cumulative_sum / total, value))
# get maximum and minumum q values
q_min = inverse.iloc[0]
q_max = inverse.iloc[-1]
# this stuff if helpful for debugging -- keep it in here
# for i, j in debug_plot:
# print i, j
# print ''
# for i, j in inverse.iteritems():
# print i, j
# print ''
def function(q):
if q < 0.0 or q > 1.0:
msg = 'invalid quantile %s, need `0 <= q <= 1`' % q
raise ValueError(msg)
elif q < q_min:
q = q_min
elif q > q_max:
q = q_max
# if beta is
if beta > 0:
if q in inverse:
result = inverse[q]
else:
previous_index = inverse.bisect_left(q) - 1
x1 = inverse.iloc[previous_index]
x2 = inverse.iloc[previous_index + 1]
y1 = inverse[x1]
y2 = inverse[x2]
result = (y2 - y1) * (q - x1) / float(x2 - x1) + y1
else:
if q in inverse:
previous_index = inverse.bisect_left(q) - 1
x1 = inverse.iloc[previous_index]
x2 = inverse.iloc[previous_index + 1]
y1 = inverse[x1]
y2 = inverse[x2]
result = 0.5 * (y1 + y2)
else:
previous_index = inverse.bisect_left(q) - 1
x1 = inverse.iloc[previous_index]
result = inverse[x1]
return float(result)
return function | def function[_quantile_function, parameter[self, alpha, smallest_count]]:
constant[Return a function that returns the quantile values for this
histogram.
]
variable[total] assign[=] call[name[float], parameter[call[name[self].total, parameter[]]]]
variable[smallest_observed_count] assign[=] call[name[min], parameter[call[name[itervalues], parameter[name[self]]]]]
if compare[name[smallest_count] is constant[None]] begin[:]
variable[smallest_count] assign[=] name[smallest_observed_count]
variable[beta] assign[=] binary_operation[name[alpha] * name[smallest_count]]
variable[debug_plot] assign[=] list[[]]
variable[cumulative_sum] assign[=] constant[0.0]
variable[inverse] assign[=] call[name[sortedcontainers].SortedDict, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b08f8790>, <ast.Name object at 0x7da1b08fadd0>]]] in starred[call[name[iteritems], parameter[name[self]]]] begin[:]
call[name[debug_plot].append, parameter[tuple[[<ast.BinOp object at 0x7da1b08f92d0>, <ast.Name object at 0x7da1b08f9360>]]]]
call[name[inverse]][binary_operation[binary_operation[name[cumulative_sum] + name[beta]] / name[total]]] assign[=] name[value]
<ast.AugAssign object at 0x7da1b08faef0>
call[name[inverse]][binary_operation[binary_operation[name[cumulative_sum] - name[beta]] / name[total]]] assign[=] name[value]
call[name[debug_plot].append, parameter[tuple[[<ast.BinOp object at 0x7da1b08f8ca0>, <ast.Name object at 0x7da1b08fad10>]]]]
variable[q_min] assign[=] call[name[inverse].iloc][constant[0]]
variable[q_max] assign[=] call[name[inverse].iloc][<ast.UnaryOp object at 0x7da1b08f8640>]
def function[function, parameter[q]]:
if <ast.BoolOp object at 0x7da1b08f8460> begin[:]
variable[msg] assign[=] binary_operation[constant[invalid quantile %s, need `0 <= q <= 1`] <ast.Mod object at 0x7da2590d6920> name[q]]
<ast.Raise object at 0x7da1b08f8160>
if compare[name[beta] greater[>] constant[0]] begin[:]
if compare[name[q] in name[inverse]] begin[:]
variable[result] assign[=] call[name[inverse]][name[q]]
return[call[name[float], parameter[name[result]]]]
return[name[function]] | keyword[def] identifier[_quantile_function] ( identifier[self] , identifier[alpha] = literal[int] , identifier[smallest_count] = keyword[None] ):
literal[string]
identifier[total] = identifier[float] ( identifier[self] . identifier[total] ())
identifier[smallest_observed_count] = identifier[min] ( identifier[itervalues] ( identifier[self] ))
keyword[if] identifier[smallest_count] keyword[is] keyword[None] :
identifier[smallest_count] = identifier[smallest_observed_count]
keyword[else] :
identifier[smallest_count] = identifier[min] ( identifier[smallest_count] , identifier[smallest_observed_count] )
identifier[beta] = identifier[alpha] * identifier[smallest_count]
identifier[debug_plot] =[]
identifier[cumulative_sum] = literal[int]
identifier[inverse] = identifier[sortedcontainers] . identifier[SortedDict] ()
keyword[for] identifier[value] , identifier[count] keyword[in] identifier[iteritems] ( identifier[self] ):
identifier[debug_plot] . identifier[append] (( identifier[cumulative_sum] / identifier[total] , identifier[value] ))
identifier[inverse] [( identifier[cumulative_sum] + identifier[beta] )/ identifier[total] ]= identifier[value]
identifier[cumulative_sum] += identifier[count]
identifier[inverse] [( identifier[cumulative_sum] - identifier[beta] )/ identifier[total] ]= identifier[value]
identifier[debug_plot] . identifier[append] (( identifier[cumulative_sum] / identifier[total] , identifier[value] ))
identifier[q_min] = identifier[inverse] . identifier[iloc] [ literal[int] ]
identifier[q_max] = identifier[inverse] . identifier[iloc] [- literal[int] ]
keyword[def] identifier[function] ( identifier[q] ):
keyword[if] identifier[q] < literal[int] keyword[or] identifier[q] > literal[int] :
identifier[msg] = literal[string] % identifier[q]
keyword[raise] identifier[ValueError] ( identifier[msg] )
keyword[elif] identifier[q] < identifier[q_min] :
identifier[q] = identifier[q_min]
keyword[elif] identifier[q] > identifier[q_max] :
identifier[q] = identifier[q_max]
keyword[if] identifier[beta] > literal[int] :
keyword[if] identifier[q] keyword[in] identifier[inverse] :
identifier[result] = identifier[inverse] [ identifier[q] ]
keyword[else] :
identifier[previous_index] = identifier[inverse] . identifier[bisect_left] ( identifier[q] )- literal[int]
identifier[x1] = identifier[inverse] . identifier[iloc] [ identifier[previous_index] ]
identifier[x2] = identifier[inverse] . identifier[iloc] [ identifier[previous_index] + literal[int] ]
identifier[y1] = identifier[inverse] [ identifier[x1] ]
identifier[y2] = identifier[inverse] [ identifier[x2] ]
identifier[result] =( identifier[y2] - identifier[y1] )*( identifier[q] - identifier[x1] )/ identifier[float] ( identifier[x2] - identifier[x1] )+ identifier[y1]
keyword[else] :
keyword[if] identifier[q] keyword[in] identifier[inverse] :
identifier[previous_index] = identifier[inverse] . identifier[bisect_left] ( identifier[q] )- literal[int]
identifier[x1] = identifier[inverse] . identifier[iloc] [ identifier[previous_index] ]
identifier[x2] = identifier[inverse] . identifier[iloc] [ identifier[previous_index] + literal[int] ]
identifier[y1] = identifier[inverse] [ identifier[x1] ]
identifier[y2] = identifier[inverse] [ identifier[x2] ]
identifier[result] = literal[int] *( identifier[y1] + identifier[y2] )
keyword[else] :
identifier[previous_index] = identifier[inverse] . identifier[bisect_left] ( identifier[q] )- literal[int]
identifier[x1] = identifier[inverse] . identifier[iloc] [ identifier[previous_index] ]
identifier[result] = identifier[inverse] [ identifier[x1] ]
keyword[return] identifier[float] ( identifier[result] )
keyword[return] identifier[function] | def _quantile_function(self, alpha=0.5, smallest_count=None):
"""Return a function that returns the quantile values for this
histogram.
"""
total = float(self.total())
smallest_observed_count = min(itervalues(self))
if smallest_count is None:
smallest_count = smallest_observed_count # depends on [control=['if'], data=['smallest_count']]
else:
smallest_count = min(smallest_count, smallest_observed_count)
beta = alpha * smallest_count
debug_plot = []
cumulative_sum = 0.0
inverse = sortedcontainers.SortedDict()
for (value, count) in iteritems(self):
debug_plot.append((cumulative_sum / total, value))
inverse[(cumulative_sum + beta) / total] = value
cumulative_sum += count
inverse[(cumulative_sum - beta) / total] = value
debug_plot.append((cumulative_sum / total, value)) # depends on [control=['for'], data=[]]
# get maximum and minumum q values
q_min = inverse.iloc[0]
q_max = inverse.iloc[-1]
# this stuff if helpful for debugging -- keep it in here
# for i, j in debug_plot:
# print i, j
# print ''
# for i, j in inverse.iteritems():
# print i, j
# print ''
def function(q):
if q < 0.0 or q > 1.0:
msg = 'invalid quantile %s, need `0 <= q <= 1`' % q
raise ValueError(msg) # depends on [control=['if'], data=[]]
elif q < q_min:
q = q_min # depends on [control=['if'], data=['q', 'q_min']]
elif q > q_max:
q = q_max # depends on [control=['if'], data=['q', 'q_max']]
# if beta is
if beta > 0:
if q in inverse:
result = inverse[q] # depends on [control=['if'], data=['q', 'inverse']]
else:
previous_index = inverse.bisect_left(q) - 1
x1 = inverse.iloc[previous_index]
x2 = inverse.iloc[previous_index + 1]
y1 = inverse[x1]
y2 = inverse[x2]
result = (y2 - y1) * (q - x1) / float(x2 - x1) + y1 # depends on [control=['if'], data=[]]
elif q in inverse:
previous_index = inverse.bisect_left(q) - 1
x1 = inverse.iloc[previous_index]
x2 = inverse.iloc[previous_index + 1]
y1 = inverse[x1]
y2 = inverse[x2]
result = 0.5 * (y1 + y2) # depends on [control=['if'], data=['q', 'inverse']]
else:
previous_index = inverse.bisect_left(q) - 1
x1 = inverse.iloc[previous_index]
result = inverse[x1]
return float(result)
return function |
def init():
"""Initialize the communities file storage."""
try:
initialize_communities_bucket()
click.secho('Community init successful.', fg='green')
except FilesException as e:
click.secho(e.message, fg='red') | def function[init, parameter[]]:
constant[Initialize the communities file storage.]
<ast.Try object at 0x7da18ede5690> | keyword[def] identifier[init] ():
literal[string]
keyword[try] :
identifier[initialize_communities_bucket] ()
identifier[click] . identifier[secho] ( literal[string] , identifier[fg] = literal[string] )
keyword[except] identifier[FilesException] keyword[as] identifier[e] :
identifier[click] . identifier[secho] ( identifier[e] . identifier[message] , identifier[fg] = literal[string] ) | def init():
"""Initialize the communities file storage."""
try:
initialize_communities_bucket()
click.secho('Community init successful.', fg='green') # depends on [control=['try'], data=[]]
except FilesException as e:
click.secho(e.message, fg='red') # depends on [control=['except'], data=['e']] |
def count_unique_sequences(
allele_reads,
max_prefix_size=None,
max_suffix_size=None):
"""
Given a list of AlleleRead objects, extracts all unique
(prefix, allele, suffix) sequences and associate each with the number
of reads that contain that sequence.
"""
groups = group_unique_sequences(
allele_reads,
max_prefix_size=max_prefix_size,
max_suffix_size=max_suffix_size)
return {
seq_tuple: len(read_names)
for (seq_tuple, read_names) in groups.items()
} | def function[count_unique_sequences, parameter[allele_reads, max_prefix_size, max_suffix_size]]:
constant[
Given a list of AlleleRead objects, extracts all unique
(prefix, allele, suffix) sequences and associate each with the number
of reads that contain that sequence.
]
variable[groups] assign[=] call[name[group_unique_sequences], parameter[name[allele_reads]]]
return[<ast.DictComp object at 0x7da18dc996f0>] | keyword[def] identifier[count_unique_sequences] (
identifier[allele_reads] ,
identifier[max_prefix_size] = keyword[None] ,
identifier[max_suffix_size] = keyword[None] ):
literal[string]
identifier[groups] = identifier[group_unique_sequences] (
identifier[allele_reads] ,
identifier[max_prefix_size] = identifier[max_prefix_size] ,
identifier[max_suffix_size] = identifier[max_suffix_size] )
keyword[return] {
identifier[seq_tuple] : identifier[len] ( identifier[read_names] )
keyword[for] ( identifier[seq_tuple] , identifier[read_names] ) keyword[in] identifier[groups] . identifier[items] ()
} | def count_unique_sequences(allele_reads, max_prefix_size=None, max_suffix_size=None):
"""
Given a list of AlleleRead objects, extracts all unique
(prefix, allele, suffix) sequences and associate each with the number
of reads that contain that sequence.
"""
groups = group_unique_sequences(allele_reads, max_prefix_size=max_prefix_size, max_suffix_size=max_suffix_size)
return {seq_tuple: len(read_names) for (seq_tuple, read_names) in groups.items()} |
def coin_toss(self):
"""Gets information relating to the opening coin toss.
Keys are:
* wonToss - contains the ID of the team that won the toss
* deferred - bool whether the team that won the toss deferred it
:returns: Dictionary of coin toss-related info.
"""
doc = self.get_doc()
table = doc('table#game_info')
giTable = sportsref.utils.parse_info_table(table)
if 'Won Toss' in giTable:
# TODO: finish coinToss function
pass
else:
return None | def function[coin_toss, parameter[self]]:
constant[Gets information relating to the opening coin toss.
Keys are:
* wonToss - contains the ID of the team that won the toss
* deferred - bool whether the team that won the toss deferred it
:returns: Dictionary of coin toss-related info.
]
variable[doc] assign[=] call[name[self].get_doc, parameter[]]
variable[table] assign[=] call[name[doc], parameter[constant[table#game_info]]]
variable[giTable] assign[=] call[name[sportsref].utils.parse_info_table, parameter[name[table]]]
if compare[constant[Won Toss] in name[giTable]] begin[:]
pass | keyword[def] identifier[coin_toss] ( identifier[self] ):
literal[string]
identifier[doc] = identifier[self] . identifier[get_doc] ()
identifier[table] = identifier[doc] ( literal[string] )
identifier[giTable] = identifier[sportsref] . identifier[utils] . identifier[parse_info_table] ( identifier[table] )
keyword[if] literal[string] keyword[in] identifier[giTable] :
keyword[pass]
keyword[else] :
keyword[return] keyword[None] | def coin_toss(self):
"""Gets information relating to the opening coin toss.
Keys are:
* wonToss - contains the ID of the team that won the toss
* deferred - bool whether the team that won the toss deferred it
:returns: Dictionary of coin toss-related info.
"""
doc = self.get_doc()
table = doc('table#game_info')
giTable = sportsref.utils.parse_info_table(table)
if 'Won Toss' in giTable:
# TODO: finish coinToss function
pass # depends on [control=['if'], data=[]]
else:
return None |
def update_anomalous_score(self):
"""Update anomalous score.
New anomalous score is a weighted average of differences
between current summary and reviews. The weights come from credibilities.
Therefore, the new anomalous score of reviewer :math:`p` is as
.. math::
{\\rm anomalous}(r) = \\frac{
\\sum_{p \\in P} {\\rm credibility}(p)|
{\\rm review}(r, p)-{\\rm summary}(p)|
}{
\\sum_{p \\in P} {\\rm credibility}(p)
}
where :math:`P` is a set of products reviewed by reviewer :math:`p`,
review(:math:`r`, :math:`p`) is the rating reviewer :math:`r` posted
to product :math:`p`, summary(:math:`p`) and credibility(:math:`p`) are
summary and credibility of product :math:`p`, respectively.
Returns:
absolute difference between old anomalous score and updated one.
"""
products = self._graph.retrieve_products(self)
diffs = [
p.summary.difference(self._graph.retrieve_review(self, p))
for p in products
]
old = self.anomalous_score
try:
self.anomalous_score = np.average(
diffs, weights=list(map(self._credibility, products)))
except ZeroDivisionError:
self.anomalous_score = np.average(diffs)
return abs(self.anomalous_score - old) | def function[update_anomalous_score, parameter[self]]:
constant[Update anomalous score.
New anomalous score is a weighted average of differences
between current summary and reviews. The weights come from credibilities.
Therefore, the new anomalous score of reviewer :math:`p` is as
.. math::
{\rm anomalous}(r) = \frac{
\sum_{p \in P} {\rm credibility}(p)|
{\rm review}(r, p)-{\rm summary}(p)|
}{
\sum_{p \in P} {\rm credibility}(p)
}
where :math:`P` is a set of products reviewed by reviewer :math:`p`,
review(:math:`r`, :math:`p`) is the rating reviewer :math:`r` posted
to product :math:`p`, summary(:math:`p`) and credibility(:math:`p`) are
summary and credibility of product :math:`p`, respectively.
Returns:
absolute difference between old anomalous score and updated one.
]
variable[products] assign[=] call[name[self]._graph.retrieve_products, parameter[name[self]]]
variable[diffs] assign[=] <ast.ListComp object at 0x7da1b1520160>
variable[old] assign[=] name[self].anomalous_score
<ast.Try object at 0x7da1b1521480>
return[call[name[abs], parameter[binary_operation[name[self].anomalous_score - name[old]]]]] | keyword[def] identifier[update_anomalous_score] ( identifier[self] ):
literal[string]
identifier[products] = identifier[self] . identifier[_graph] . identifier[retrieve_products] ( identifier[self] )
identifier[diffs] =[
identifier[p] . identifier[summary] . identifier[difference] ( identifier[self] . identifier[_graph] . identifier[retrieve_review] ( identifier[self] , identifier[p] ))
keyword[for] identifier[p] keyword[in] identifier[products]
]
identifier[old] = identifier[self] . identifier[anomalous_score]
keyword[try] :
identifier[self] . identifier[anomalous_score] = identifier[np] . identifier[average] (
identifier[diffs] , identifier[weights] = identifier[list] ( identifier[map] ( identifier[self] . identifier[_credibility] , identifier[products] )))
keyword[except] identifier[ZeroDivisionError] :
identifier[self] . identifier[anomalous_score] = identifier[np] . identifier[average] ( identifier[diffs] )
keyword[return] identifier[abs] ( identifier[self] . identifier[anomalous_score] - identifier[old] ) | def update_anomalous_score(self):
"""Update anomalous score.
New anomalous score is a weighted average of differences
between current summary and reviews. The weights come from credibilities.
Therefore, the new anomalous score of reviewer :math:`p` is as
.. math::
{\\rm anomalous}(r) = \\frac{
\\sum_{p \\in P} {\\rm credibility}(p)|
{\\rm review}(r, p)-{\\rm summary}(p)|
}{
\\sum_{p \\in P} {\\rm credibility}(p)
}
where :math:`P` is a set of products reviewed by reviewer :math:`p`,
review(:math:`r`, :math:`p`) is the rating reviewer :math:`r` posted
to product :math:`p`, summary(:math:`p`) and credibility(:math:`p`) are
summary and credibility of product :math:`p`, respectively.
Returns:
absolute difference between old anomalous score and updated one.
"""
products = self._graph.retrieve_products(self)
diffs = [p.summary.difference(self._graph.retrieve_review(self, p)) for p in products]
old = self.anomalous_score
try:
self.anomalous_score = np.average(diffs, weights=list(map(self._credibility, products))) # depends on [control=['try'], data=[]]
except ZeroDivisionError:
self.anomalous_score = np.average(diffs) # depends on [control=['except'], data=[]]
return abs(self.anomalous_score - old) |
def read_namespaced_pod_preset(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_pod_preset # noqa: E501
read the specified PodPreset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_pod_preset(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodPreset (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1alpha1PodPreset
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_pod_preset_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.read_namespaced_pod_preset_with_http_info(name, namespace, **kwargs) # noqa: E501
return data | def function[read_namespaced_pod_preset, parameter[self, name, namespace]]:
constant[read_namespaced_pod_preset # noqa: E501
read the specified PodPreset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_pod_preset(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodPreset (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1alpha1PodPreset
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].read_namespaced_pod_preset_with_http_info, parameter[name[name], name[namespace]]]] | keyword[def] identifier[read_namespaced_pod_preset] ( identifier[self] , identifier[name] , identifier[namespace] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[read_namespaced_pod_preset_with_http_info] ( identifier[name] , identifier[namespace] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[read_namespaced_pod_preset_with_http_info] ( identifier[name] , identifier[namespace] ,** identifier[kwargs] )
keyword[return] identifier[data] | def read_namespaced_pod_preset(self, name, namespace, **kwargs): # noqa: E501
"read_namespaced_pod_preset # noqa: E501\n\n read the specified PodPreset # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.read_namespaced_pod_preset(name, namespace, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str name: name of the PodPreset (required)\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param str pretty: If 'true', then the output is pretty printed.\n :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.\n :param bool export: Should this value be exported. Export strips fields that a user can not specify.\n :return: V1alpha1PodPreset\n If the method is called asynchronously,\n returns the request thread.\n "
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_pod_preset_with_http_info(name, namespace, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.read_namespaced_pod_preset_with_http_info(name, namespace, **kwargs) # noqa: E501
return data |
def patch_namespaced_role_binding(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_role_binding # noqa: E501
partially update the specified RoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_role_binding(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the RoleBinding (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1RoleBinding
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_role_binding_with_http_info(name, namespace, body, **kwargs) # noqa: E501
else:
(data) = self.patch_namespaced_role_binding_with_http_info(name, namespace, body, **kwargs) # noqa: E501
return data | def function[patch_namespaced_role_binding, parameter[self, name, namespace, body]]:
constant[patch_namespaced_role_binding # noqa: E501
partially update the specified RoleBinding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_role_binding(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the RoleBinding (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1RoleBinding
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async_req]]] begin[:]
return[call[name[self].patch_namespaced_role_binding_with_http_info, parameter[name[name], name[namespace], name[body]]]] | keyword[def] identifier[patch_namespaced_role_binding] ( identifier[self] , identifier[name] , identifier[namespace] , identifier[body] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[patch_namespaced_role_binding_with_http_info] ( identifier[name] , identifier[namespace] , identifier[body] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[patch_namespaced_role_binding_with_http_info] ( identifier[name] , identifier[namespace] , identifier[body] ,** identifier[kwargs] )
keyword[return] identifier[data] | def patch_namespaced_role_binding(self, name, namespace, body, **kwargs): # noqa: E501
"patch_namespaced_role_binding # noqa: E501\n\n partially update the specified RoleBinding # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.patch_namespaced_role_binding(name, namespace, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str name: name of the RoleBinding (required)\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param UNKNOWN_BASE_TYPE body: (required)\n :param str pretty: If 'true', then the output is pretty printed.\n :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed\n :return: V1RoleBinding\n If the method is called asynchronously,\n returns the request thread.\n "
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_role_binding_with_http_info(name, namespace, body, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.patch_namespaced_role_binding_with_http_info(name, namespace, body, **kwargs) # noqa: E501
return data |
def migrate(pool, from_connection, to_connection):
"""
Migrate tool for pyspider
"""
f = connect_database(from_connection)
t = connect_database(to_connection)
if isinstance(f, ProjectDB):
for each in f.get_all():
each = unicode_obj(each)
logging.info("projectdb: %s", each['name'])
t.drop(each['name'])
t.insert(each['name'], each)
elif isinstance(f, TaskDB):
pool = Pool(pool)
pool.map(
lambda x, f=from_connection, t=to_connection: taskdb_migrating(x, f, t),
f.projects)
elif isinstance(f, ResultDB):
pool = Pool(pool)
pool.map(
lambda x, f=from_connection, t=to_connection: resultdb_migrating(x, f, t),
f.projects) | def function[migrate, parameter[pool, from_connection, to_connection]]:
constant[
Migrate tool for pyspider
]
variable[f] assign[=] call[name[connect_database], parameter[name[from_connection]]]
variable[t] assign[=] call[name[connect_database], parameter[name[to_connection]]]
if call[name[isinstance], parameter[name[f], name[ProjectDB]]] begin[:]
for taget[name[each]] in starred[call[name[f].get_all, parameter[]]] begin[:]
variable[each] assign[=] call[name[unicode_obj], parameter[name[each]]]
call[name[logging].info, parameter[constant[projectdb: %s], call[name[each]][constant[name]]]]
call[name[t].drop, parameter[call[name[each]][constant[name]]]]
call[name[t].insert, parameter[call[name[each]][constant[name]], name[each]]] | keyword[def] identifier[migrate] ( identifier[pool] , identifier[from_connection] , identifier[to_connection] ):
literal[string]
identifier[f] = identifier[connect_database] ( identifier[from_connection] )
identifier[t] = identifier[connect_database] ( identifier[to_connection] )
keyword[if] identifier[isinstance] ( identifier[f] , identifier[ProjectDB] ):
keyword[for] identifier[each] keyword[in] identifier[f] . identifier[get_all] ():
identifier[each] = identifier[unicode_obj] ( identifier[each] )
identifier[logging] . identifier[info] ( literal[string] , identifier[each] [ literal[string] ])
identifier[t] . identifier[drop] ( identifier[each] [ literal[string] ])
identifier[t] . identifier[insert] ( identifier[each] [ literal[string] ], identifier[each] )
keyword[elif] identifier[isinstance] ( identifier[f] , identifier[TaskDB] ):
identifier[pool] = identifier[Pool] ( identifier[pool] )
identifier[pool] . identifier[map] (
keyword[lambda] identifier[x] , identifier[f] = identifier[from_connection] , identifier[t] = identifier[to_connection] : identifier[taskdb_migrating] ( identifier[x] , identifier[f] , identifier[t] ),
identifier[f] . identifier[projects] )
keyword[elif] identifier[isinstance] ( identifier[f] , identifier[ResultDB] ):
identifier[pool] = identifier[Pool] ( identifier[pool] )
identifier[pool] . identifier[map] (
keyword[lambda] identifier[x] , identifier[f] = identifier[from_connection] , identifier[t] = identifier[to_connection] : identifier[resultdb_migrating] ( identifier[x] , identifier[f] , identifier[t] ),
identifier[f] . identifier[projects] ) | def migrate(pool, from_connection, to_connection):
"""
Migrate tool for pyspider
"""
f = connect_database(from_connection)
t = connect_database(to_connection)
if isinstance(f, ProjectDB):
for each in f.get_all():
each = unicode_obj(each)
logging.info('projectdb: %s', each['name'])
t.drop(each['name'])
t.insert(each['name'], each) # depends on [control=['for'], data=['each']] # depends on [control=['if'], data=[]]
elif isinstance(f, TaskDB):
pool = Pool(pool)
pool.map(lambda x, f=from_connection, t=to_connection: taskdb_migrating(x, f, t), f.projects) # depends on [control=['if'], data=[]]
elif isinstance(f, ResultDB):
pool = Pool(pool)
pool.map(lambda x, f=from_connection, t=to_connection: resultdb_migrating(x, f, t), f.projects) # depends on [control=['if'], data=[]] |
def load(path=None, **kwargs):
'''
Loads the configuration from the file provided onto the device.
path (required)
Path where the configuration/template file is present. If the file has
a ``.conf`` extension, the content is treated as text format. If the
file has a ``.xml`` extension, the content is treated as XML format. If
the file has a ``.set`` extension, the content is treated as Junos OS
``set`` commands.
overwrite : False
Set to ``True`` if you want this file is to completely replace the
configuration file.
replace : False
Specify whether the configuration file uses ``replace:`` statements. If
``True``, only those statements under the ``replace`` tag will be
changed.
format
Determines the format of the contents
update : False
Compare a complete loaded configuration against the candidate
configuration. For each hierarchy level or configuration object that is
different in the two configurations, the version in the loaded
configuration replaces the version in the candidate configuration. When
the configuration is later committed, only system processes that are
affected by the changed configuration elements parse the new
configuration. This action is supported from PyEZ 2.1.
template_vars
Variables to be passed into the template processing engine in addition to
those present in pillar, the minion configuration, grains, etc. You may
reference these variables in your template like so:
.. code-block:: jinja
{{ template_vars["var_name"] }}
CLI Examples:
.. code-block:: bash
salt 'device_name' junos.load 'salt://production/network/routers/config.set'
salt 'device_name' junos.load 'salt://templates/replace_config.conf' replace=True
salt 'device_name' junos.load 'salt://my_new_configuration.conf' overwrite=True
salt 'device_name' junos.load 'salt://syslog_template.conf' template_vars='{"syslog_host": "10.180.222.7"}'
'''
conn = __proxy__['junos.conn']()
ret = {}
ret['out'] = True
if path is None:
ret['message'] = \
'Please provide the salt path where the configuration is present'
ret['out'] = False
return ret
op = {}
if '__pub_arg' in kwargs:
if kwargs['__pub_arg']:
if isinstance(kwargs['__pub_arg'][-1], dict):
op.update(kwargs['__pub_arg'][-1])
else:
op.update(kwargs)
template_vars = {}
if "template_vars" in op:
template_vars = op["template_vars"]
template_cached_path = salt.utils.files.mkstemp()
__salt__['cp.get_template'](
path,
template_cached_path,
template_vars=template_vars)
if not os.path.isfile(template_cached_path):
ret['message'] = 'Invalid file path.'
ret['out'] = False
return ret
if os.path.getsize(template_cached_path) == 0:
ret['message'] = 'Template failed to render'
ret['out'] = False
return ret
op['path'] = template_cached_path
if 'format' not in op:
if path.endswith('set'):
template_format = 'set'
elif path.endswith('xml'):
template_format = 'xml'
else:
template_format = 'text'
op['format'] = template_format
if 'replace' in op and op['replace']:
op['merge'] = False
del op['replace']
elif 'overwrite' in op and op['overwrite']:
op['overwrite'] = True
elif 'overwrite' in op and not op['overwrite']:
op['merge'] = True
del op['overwrite']
try:
conn.cu.load(**op)
ret['message'] = "Successfully loaded the configuration."
except Exception as exception:
ret['message'] = 'Could not load configuration due to : "{0}"'.format(
exception)
ret['format'] = op['format']
ret['out'] = False
return ret
finally:
salt.utils.files.safe_rm(template_cached_path)
return ret | def function[load, parameter[path]]:
constant[
Loads the configuration from the file provided onto the device.
path (required)
Path where the configuration/template file is present. If the file has
a ``.conf`` extension, the content is treated as text format. If the
file has a ``.xml`` extension, the content is treated as XML format. If
the file has a ``.set`` extension, the content is treated as Junos OS
``set`` commands.
overwrite : False
Set to ``True`` if you want this file is to completely replace the
configuration file.
replace : False
Specify whether the configuration file uses ``replace:`` statements. If
``True``, only those statements under the ``replace`` tag will be
changed.
format
Determines the format of the contents
update : False
Compare a complete loaded configuration against the candidate
configuration. For each hierarchy level or configuration object that is
different in the two configurations, the version in the loaded
configuration replaces the version in the candidate configuration. When
the configuration is later committed, only system processes that are
affected by the changed configuration elements parse the new
configuration. This action is supported from PyEZ 2.1.
template_vars
Variables to be passed into the template processing engine in addition to
those present in pillar, the minion configuration, grains, etc. You may
reference these variables in your template like so:
.. code-block:: jinja
{{ template_vars["var_name"] }}
CLI Examples:
.. code-block:: bash
salt 'device_name' junos.load 'salt://production/network/routers/config.set'
salt 'device_name' junos.load 'salt://templates/replace_config.conf' replace=True
salt 'device_name' junos.load 'salt://my_new_configuration.conf' overwrite=True
salt 'device_name' junos.load 'salt://syslog_template.conf' template_vars='{"syslog_host": "10.180.222.7"}'
]
variable[conn] assign[=] call[call[name[__proxy__]][constant[junos.conn]], parameter[]]
variable[ret] assign[=] dictionary[[], []]
call[name[ret]][constant[out]] assign[=] constant[True]
if compare[name[path] is constant[None]] begin[:]
call[name[ret]][constant[message]] assign[=] constant[Please provide the salt path where the configuration is present]
call[name[ret]][constant[out]] assign[=] constant[False]
return[name[ret]]
variable[op] assign[=] dictionary[[], []]
if compare[constant[__pub_arg] in name[kwargs]] begin[:]
if call[name[kwargs]][constant[__pub_arg]] begin[:]
if call[name[isinstance], parameter[call[call[name[kwargs]][constant[__pub_arg]]][<ast.UnaryOp object at 0x7da207f03a00>], name[dict]]] begin[:]
call[name[op].update, parameter[call[call[name[kwargs]][constant[__pub_arg]]][<ast.UnaryOp object at 0x7da207f02260>]]]
variable[template_vars] assign[=] dictionary[[], []]
if compare[constant[template_vars] in name[op]] begin[:]
variable[template_vars] assign[=] call[name[op]][constant[template_vars]]
variable[template_cached_path] assign[=] call[name[salt].utils.files.mkstemp, parameter[]]
call[call[name[__salt__]][constant[cp.get_template]], parameter[name[path], name[template_cached_path]]]
if <ast.UnaryOp object at 0x7da1b2186920> begin[:]
call[name[ret]][constant[message]] assign[=] constant[Invalid file path.]
call[name[ret]][constant[out]] assign[=] constant[False]
return[name[ret]]
if compare[call[name[os].path.getsize, parameter[name[template_cached_path]]] equal[==] constant[0]] begin[:]
call[name[ret]][constant[message]] assign[=] constant[Template failed to render]
call[name[ret]][constant[out]] assign[=] constant[False]
return[name[ret]]
call[name[op]][constant[path]] assign[=] name[template_cached_path]
if compare[constant[format] <ast.NotIn object at 0x7da2590d7190> name[op]] begin[:]
if call[name[path].endswith, parameter[constant[set]]] begin[:]
variable[template_format] assign[=] constant[set]
call[name[op]][constant[format]] assign[=] name[template_format]
if <ast.BoolOp object at 0x7da1b21840a0> begin[:]
call[name[op]][constant[merge]] assign[=] constant[False]
<ast.Delete object at 0x7da1b2184c10>
<ast.Try object at 0x7da2041d85b0>
return[name[ret]] | keyword[def] identifier[load] ( identifier[path] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[conn] = identifier[__proxy__] [ literal[string] ]()
identifier[ret] ={}
identifier[ret] [ literal[string] ]= keyword[True]
keyword[if] identifier[path] keyword[is] keyword[None] :
identifier[ret] [ literal[string] ]= literal[string]
identifier[ret] [ literal[string] ]= keyword[False]
keyword[return] identifier[ret]
identifier[op] ={}
keyword[if] literal[string] keyword[in] identifier[kwargs] :
keyword[if] identifier[kwargs] [ literal[string] ]:
keyword[if] identifier[isinstance] ( identifier[kwargs] [ literal[string] ][- literal[int] ], identifier[dict] ):
identifier[op] . identifier[update] ( identifier[kwargs] [ literal[string] ][- literal[int] ])
keyword[else] :
identifier[op] . identifier[update] ( identifier[kwargs] )
identifier[template_vars] ={}
keyword[if] literal[string] keyword[in] identifier[op] :
identifier[template_vars] = identifier[op] [ literal[string] ]
identifier[template_cached_path] = identifier[salt] . identifier[utils] . identifier[files] . identifier[mkstemp] ()
identifier[__salt__] [ literal[string] ](
identifier[path] ,
identifier[template_cached_path] ,
identifier[template_vars] = identifier[template_vars] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[template_cached_path] ):
identifier[ret] [ literal[string] ]= literal[string]
identifier[ret] [ literal[string] ]= keyword[False]
keyword[return] identifier[ret]
keyword[if] identifier[os] . identifier[path] . identifier[getsize] ( identifier[template_cached_path] )== literal[int] :
identifier[ret] [ literal[string] ]= literal[string]
identifier[ret] [ literal[string] ]= keyword[False]
keyword[return] identifier[ret]
identifier[op] [ literal[string] ]= identifier[template_cached_path]
keyword[if] literal[string] keyword[not] keyword[in] identifier[op] :
keyword[if] identifier[path] . identifier[endswith] ( literal[string] ):
identifier[template_format] = literal[string]
keyword[elif] identifier[path] . identifier[endswith] ( literal[string] ):
identifier[template_format] = literal[string]
keyword[else] :
identifier[template_format] = literal[string]
identifier[op] [ literal[string] ]= identifier[template_format]
keyword[if] literal[string] keyword[in] identifier[op] keyword[and] identifier[op] [ literal[string] ]:
identifier[op] [ literal[string] ]= keyword[False]
keyword[del] identifier[op] [ literal[string] ]
keyword[elif] literal[string] keyword[in] identifier[op] keyword[and] identifier[op] [ literal[string] ]:
identifier[op] [ literal[string] ]= keyword[True]
keyword[elif] literal[string] keyword[in] identifier[op] keyword[and] keyword[not] identifier[op] [ literal[string] ]:
identifier[op] [ literal[string] ]= keyword[True]
keyword[del] identifier[op] [ literal[string] ]
keyword[try] :
identifier[conn] . identifier[cu] . identifier[load] (** identifier[op] )
identifier[ret] [ literal[string] ]= literal[string]
keyword[except] identifier[Exception] keyword[as] identifier[exception] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] (
identifier[exception] )
identifier[ret] [ literal[string] ]= identifier[op] [ literal[string] ]
identifier[ret] [ literal[string] ]= keyword[False]
keyword[return] identifier[ret]
keyword[finally] :
identifier[salt] . identifier[utils] . identifier[files] . identifier[safe_rm] ( identifier[template_cached_path] )
keyword[return] identifier[ret] | def load(path=None, **kwargs):
"""
Loads the configuration from the file provided onto the device.
path (required)
Path where the configuration/template file is present. If the file has
a ``.conf`` extension, the content is treated as text format. If the
file has a ``.xml`` extension, the content is treated as XML format. If
the file has a ``.set`` extension, the content is treated as Junos OS
``set`` commands.
overwrite : False
Set to ``True`` if you want this file is to completely replace the
configuration file.
replace : False
Specify whether the configuration file uses ``replace:`` statements. If
``True``, only those statements under the ``replace`` tag will be
changed.
format
Determines the format of the contents
update : False
Compare a complete loaded configuration against the candidate
configuration. For each hierarchy level or configuration object that is
different in the two configurations, the version in the loaded
configuration replaces the version in the candidate configuration. When
the configuration is later committed, only system processes that are
affected by the changed configuration elements parse the new
configuration. This action is supported from PyEZ 2.1.
template_vars
Variables to be passed into the template processing engine in addition to
those present in pillar, the minion configuration, grains, etc. You may
reference these variables in your template like so:
.. code-block:: jinja
{{ template_vars["var_name"] }}
CLI Examples:
.. code-block:: bash
salt 'device_name' junos.load 'salt://production/network/routers/config.set'
salt 'device_name' junos.load 'salt://templates/replace_config.conf' replace=True
salt 'device_name' junos.load 'salt://my_new_configuration.conf' overwrite=True
salt 'device_name' junos.load 'salt://syslog_template.conf' template_vars='{"syslog_host": "10.180.222.7"}'
"""
conn = __proxy__['junos.conn']()
ret = {}
ret['out'] = True
if path is None:
ret['message'] = 'Please provide the salt path where the configuration is present'
ret['out'] = False
return ret # depends on [control=['if'], data=[]]
op = {}
if '__pub_arg' in kwargs:
if kwargs['__pub_arg']:
if isinstance(kwargs['__pub_arg'][-1], dict):
op.update(kwargs['__pub_arg'][-1]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['kwargs']]
else:
op.update(kwargs)
template_vars = {}
if 'template_vars' in op:
template_vars = op['template_vars'] # depends on [control=['if'], data=['op']]
template_cached_path = salt.utils.files.mkstemp()
__salt__['cp.get_template'](path, template_cached_path, template_vars=template_vars)
if not os.path.isfile(template_cached_path):
ret['message'] = 'Invalid file path.'
ret['out'] = False
return ret # depends on [control=['if'], data=[]]
if os.path.getsize(template_cached_path) == 0:
ret['message'] = 'Template failed to render'
ret['out'] = False
return ret # depends on [control=['if'], data=[]]
op['path'] = template_cached_path
if 'format' not in op:
if path.endswith('set'):
template_format = 'set' # depends on [control=['if'], data=[]]
elif path.endswith('xml'):
template_format = 'xml' # depends on [control=['if'], data=[]]
else:
template_format = 'text'
op['format'] = template_format # depends on [control=['if'], data=['op']]
if 'replace' in op and op['replace']:
op['merge'] = False
del op['replace'] # depends on [control=['if'], data=[]]
elif 'overwrite' in op and op['overwrite']:
op['overwrite'] = True # depends on [control=['if'], data=[]]
elif 'overwrite' in op and (not op['overwrite']):
op['merge'] = True
del op['overwrite'] # depends on [control=['if'], data=[]]
try:
conn.cu.load(**op)
ret['message'] = 'Successfully loaded the configuration.' # depends on [control=['try'], data=[]]
except Exception as exception:
ret['message'] = 'Could not load configuration due to : "{0}"'.format(exception)
ret['format'] = op['format']
ret['out'] = False
return ret # depends on [control=['except'], data=['exception']]
finally:
salt.utils.files.safe_rm(template_cached_path)
return ret |
def get_member_groups(
self, object_id, security_enabled_only, additional_properties=None, custom_headers=None, raw=False, **operation_config):
"""Gets a collection that contains the object IDs of the groups of which
the user is a member.
:param object_id: The object ID of the user for which to get group
membership.
:type object_id: str
:param security_enabled_only: If true, only membership in
security-enabled groups should be checked. Otherwise, membership in
all groups should be checked.
:type security_enabled_only: bool
:param additional_properties: Unmatched properties from the message
are deserialized this collection
:type additional_properties: dict[str, object]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of str
:rtype: ~azure.graphrbac.models.StrPaged[str]
:raises:
:class:`GraphErrorException<azure.graphrbac.models.GraphErrorException>`
"""
parameters = models.UserGetMemberGroupsParameters(additional_properties=additional_properties, security_enabled_only=security_enabled_only)
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.get_member_groups.metadata['url']
path_format_arguments = {
'objectId': self._serialize.url("object_id", object_id, 'str'),
'tenantID': self._serialize.url("self.config.tenant_id", self.config.tenant_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'UserGetMemberGroupsParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.GraphErrorException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.StrPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.StrPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized | def function[get_member_groups, parameter[self, object_id, security_enabled_only, additional_properties, custom_headers, raw]]:
constant[Gets a collection that contains the object IDs of the groups of which
the user is a member.
:param object_id: The object ID of the user for which to get group
membership.
:type object_id: str
:param security_enabled_only: If true, only membership in
security-enabled groups should be checked. Otherwise, membership in
all groups should be checked.
:type security_enabled_only: bool
:param additional_properties: Unmatched properties from the message
are deserialized this collection
:type additional_properties: dict[str, object]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of str
:rtype: ~azure.graphrbac.models.StrPaged[str]
:raises:
:class:`GraphErrorException<azure.graphrbac.models.GraphErrorException>`
]
variable[parameters] assign[=] call[name[models].UserGetMemberGroupsParameters, parameter[]]
def function[internal_paging, parameter[next_link, raw]]:
if <ast.UnaryOp object at 0x7da204621f30> begin[:]
variable[url] assign[=] call[name[self].get_member_groups.metadata][constant[url]]
variable[path_format_arguments] assign[=] dictionary[[<ast.Constant object at 0x7da204623820>, <ast.Constant object at 0x7da204621ab0>], [<ast.Call object at 0x7da204623160>, <ast.Call object at 0x7da204623430>]]
variable[url] assign[=] call[name[self]._client.format_url, parameter[name[url]]]
variable[query_parameters] assign[=] dictionary[[], []]
call[name[query_parameters]][constant[api-version]] assign[=] call[name[self]._serialize.query, parameter[constant[self.api_version], name[self].api_version, constant[str]]]
variable[header_parameters] assign[=] dictionary[[], []]
call[name[header_parameters]][constant[Accept]] assign[=] constant[application/json]
call[name[header_parameters]][constant[Content-Type]] assign[=] constant[application/json; charset=utf-8]
if name[self].config.generate_client_request_id begin[:]
call[name[header_parameters]][constant[x-ms-client-request-id]] assign[=] call[name[str], parameter[call[name[uuid].uuid1, parameter[]]]]
if name[custom_headers] begin[:]
call[name[header_parameters].update, parameter[name[custom_headers]]]
if compare[name[self].config.accept_language is_not constant[None]] begin[:]
call[name[header_parameters]][constant[accept-language]] assign[=] call[name[self]._serialize.header, parameter[constant[self.config.accept_language], name[self].config.accept_language, constant[str]]]
variable[body_content] assign[=] call[name[self]._serialize.body, parameter[name[parameters], constant[UserGetMemberGroupsParameters]]]
variable[request] assign[=] call[name[self]._client.post, parameter[name[url], name[query_parameters], name[header_parameters], name[body_content]]]
variable[response] assign[=] call[name[self]._client.send, parameter[name[request]]]
if compare[name[response].status_code <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da2054a7850>]]] begin[:]
<ast.Raise object at 0x7da2054a4610>
return[name[response]]
variable[deserialized] assign[=] call[name[models].StrPaged, parameter[name[internal_paging], name[self]._deserialize.dependencies]]
if name[raw] begin[:]
variable[header_dict] assign[=] dictionary[[], []]
variable[client_raw_response] assign[=] call[name[models].StrPaged, parameter[name[internal_paging], name[self]._deserialize.dependencies, name[header_dict]]]
return[name[client_raw_response]]
return[name[deserialized]] | keyword[def] identifier[get_member_groups] (
identifier[self] , identifier[object_id] , identifier[security_enabled_only] , identifier[additional_properties] = keyword[None] , identifier[custom_headers] = keyword[None] , identifier[raw] = keyword[False] ,** identifier[operation_config] ):
literal[string]
identifier[parameters] = identifier[models] . identifier[UserGetMemberGroupsParameters] ( identifier[additional_properties] = identifier[additional_properties] , identifier[security_enabled_only] = identifier[security_enabled_only] )
keyword[def] identifier[internal_paging] ( identifier[next_link] = keyword[None] , identifier[raw] = keyword[False] ):
keyword[if] keyword[not] identifier[next_link] :
identifier[url] = identifier[self] . identifier[get_member_groups] . identifier[metadata] [ literal[string] ]
identifier[path_format_arguments] ={
literal[string] : identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[object_id] , literal[string] ),
literal[string] : identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[self] . identifier[config] . identifier[tenant_id] , literal[string] )
}
identifier[url] = identifier[self] . identifier[_client] . identifier[format_url] ( identifier[url] ,** identifier[path_format_arguments] )
identifier[query_parameters] ={}
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[self] . identifier[api_version] , literal[string] )
keyword[else] :
identifier[url] = identifier[next_link]
identifier[query_parameters] ={}
identifier[header_parameters] ={}
identifier[header_parameters] [ literal[string] ]= literal[string]
identifier[header_parameters] [ literal[string] ]= literal[string]
keyword[if] identifier[self] . identifier[config] . identifier[generate_client_request_id] :
identifier[header_parameters] [ literal[string] ]= identifier[str] ( identifier[uuid] . identifier[uuid1] ())
keyword[if] identifier[custom_headers] :
identifier[header_parameters] . identifier[update] ( identifier[custom_headers] )
keyword[if] identifier[self] . identifier[config] . identifier[accept_language] keyword[is] keyword[not] keyword[None] :
identifier[header_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[header] ( literal[string] , identifier[self] . identifier[config] . identifier[accept_language] , literal[string] )
identifier[body_content] = identifier[self] . identifier[_serialize] . identifier[body] ( identifier[parameters] , literal[string] )
identifier[request] = identifier[self] . identifier[_client] . identifier[post] ( identifier[url] , identifier[query_parameters] , identifier[header_parameters] , identifier[body_content] )
identifier[response] = identifier[self] . identifier[_client] . identifier[send] ( identifier[request] , identifier[stream] = keyword[False] ,** identifier[operation_config] )
keyword[if] identifier[response] . identifier[status_code] keyword[not] keyword[in] [ literal[int] ]:
keyword[raise] identifier[models] . identifier[GraphErrorException] ( identifier[self] . identifier[_deserialize] , identifier[response] )
keyword[return] identifier[response]
identifier[deserialized] = identifier[models] . identifier[StrPaged] ( identifier[internal_paging] , identifier[self] . identifier[_deserialize] . identifier[dependencies] )
keyword[if] identifier[raw] :
identifier[header_dict] ={}
identifier[client_raw_response] = identifier[models] . identifier[StrPaged] ( identifier[internal_paging] , identifier[self] . identifier[_deserialize] . identifier[dependencies] , identifier[header_dict] )
keyword[return] identifier[client_raw_response]
keyword[return] identifier[deserialized] | def get_member_groups(self, object_id, security_enabled_only, additional_properties=None, custom_headers=None, raw=False, **operation_config):
"""Gets a collection that contains the object IDs of the groups of which
the user is a member.
:param object_id: The object ID of the user for which to get group
membership.
:type object_id: str
:param security_enabled_only: If true, only membership in
security-enabled groups should be checked. Otherwise, membership in
all groups should be checked.
:type security_enabled_only: bool
:param additional_properties: Unmatched properties from the message
are deserialized this collection
:type additional_properties: dict[str, object]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of str
:rtype: ~azure.graphrbac.models.StrPaged[str]
:raises:
:class:`GraphErrorException<azure.graphrbac.models.GraphErrorException>`
"""
parameters = models.UserGetMemberGroupsParameters(additional_properties=additional_properties, security_enabled_only=security_enabled_only)
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.get_member_groups.metadata['url']
path_format_arguments = {'objectId': self._serialize.url('object_id', object_id, 'str'), 'tenantID': self._serialize.url('self.config.tenant_id', self.config.tenant_id, 'str')}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query('self.api_version', self.api_version, 'str') # depends on [control=['if'], data=[]]
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) # depends on [control=['if'], data=[]]
if custom_headers:
header_parameters.update(custom_headers) # depends on [control=['if'], data=[]]
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header('self.config.accept_language', self.config.accept_language, 'str') # depends on [control=['if'], data=[]]
# Construct body
body_content = self._serialize.body(parameters, 'UserGetMemberGroupsParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.GraphErrorException(self._deserialize, response) # depends on [control=['if'], data=[]]
return response
# Deserialize response
deserialized = models.StrPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.StrPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response # depends on [control=['if'], data=[]]
return deserialized |
def create_connection(self, session=None):
"""
Create connection in the Connection table, according to whether it uses
proxy, TCP, UNIX sockets, SSL. Connection ID will be randomly generated.
:param session: Session of the SQL Alchemy ORM (automatically generated with
decorator).
"""
connection = Connection(conn_id=self.db_conn_id)
uri = self._generate_connection_uri()
self.log.info("Creating connection %s", self.db_conn_id)
connection.parse_from_uri(uri)
session.add(connection)
session.commit() | def function[create_connection, parameter[self, session]]:
constant[
Create connection in the Connection table, according to whether it uses
proxy, TCP, UNIX sockets, SSL. Connection ID will be randomly generated.
:param session: Session of the SQL Alchemy ORM (automatically generated with
decorator).
]
variable[connection] assign[=] call[name[Connection], parameter[]]
variable[uri] assign[=] call[name[self]._generate_connection_uri, parameter[]]
call[name[self].log.info, parameter[constant[Creating connection %s], name[self].db_conn_id]]
call[name[connection].parse_from_uri, parameter[name[uri]]]
call[name[session].add, parameter[name[connection]]]
call[name[session].commit, parameter[]] | keyword[def] identifier[create_connection] ( identifier[self] , identifier[session] = keyword[None] ):
literal[string]
identifier[connection] = identifier[Connection] ( identifier[conn_id] = identifier[self] . identifier[db_conn_id] )
identifier[uri] = identifier[self] . identifier[_generate_connection_uri] ()
identifier[self] . identifier[log] . identifier[info] ( literal[string] , identifier[self] . identifier[db_conn_id] )
identifier[connection] . identifier[parse_from_uri] ( identifier[uri] )
identifier[session] . identifier[add] ( identifier[connection] )
identifier[session] . identifier[commit] () | def create_connection(self, session=None):
"""
Create connection in the Connection table, according to whether it uses
proxy, TCP, UNIX sockets, SSL. Connection ID will be randomly generated.
:param session: Session of the SQL Alchemy ORM (automatically generated with
decorator).
"""
connection = Connection(conn_id=self.db_conn_id)
uri = self._generate_connection_uri()
self.log.info('Creating connection %s', self.db_conn_id)
connection.parse_from_uri(uri)
session.add(connection)
session.commit() |
def Audio(self, run, tag):
"""Retrieve the audio events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.AudioEvents`.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Audio(tag) | def function[Audio, parameter[self, run, tag]]:
constant[Retrieve the audio events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.AudioEvents`.
]
variable[accumulator] assign[=] call[name[self].GetAccumulator, parameter[name[run]]]
return[call[name[accumulator].Audio, parameter[name[tag]]]] | keyword[def] identifier[Audio] ( identifier[self] , identifier[run] , identifier[tag] ):
literal[string]
identifier[accumulator] = identifier[self] . identifier[GetAccumulator] ( identifier[run] )
keyword[return] identifier[accumulator] . identifier[Audio] ( identifier[tag] ) | def Audio(self, run, tag):
"""Retrieve the audio events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.AudioEvents`.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Audio(tag) |
def process_ip_frame(self,
id=None,
msg=None):
"""process_ip_frame
Convert a complex nested json dictionary
to a flattened dictionary and capture
all unique keys for table construction
:param id: key for this msg
:param msg: ip frame for packet
"""
# normalize into a dataframe
df = json_normalize(msg)
# convert to a flattened dictionary
dt = json.loads(df.to_json())
flat_msg = {}
for k in dt:
new_key = "ip_{}".format(k)
flat_msg[new_key] = dt[k]["0"]
if new_key not in self.ip_keys:
self.ip_keys[new_key] = k
# end of capturing all unique keys
dt["ip_id"] = id
self.all_ip.append(dt)
log.debug("IP data updated:")
log.debug(self.ip_keys)
log.debug(self.all_ip)
log.debug("")
return flat_msg | def function[process_ip_frame, parameter[self, id, msg]]:
constant[process_ip_frame
Convert a complex nested json dictionary
to a flattened dictionary and capture
all unique keys for table construction
:param id: key for this msg
:param msg: ip frame for packet
]
variable[df] assign[=] call[name[json_normalize], parameter[name[msg]]]
variable[dt] assign[=] call[name[json].loads, parameter[call[name[df].to_json, parameter[]]]]
variable[flat_msg] assign[=] dictionary[[], []]
for taget[name[k]] in starred[name[dt]] begin[:]
variable[new_key] assign[=] call[constant[ip_{}].format, parameter[name[k]]]
call[name[flat_msg]][name[new_key]] assign[=] call[call[name[dt]][name[k]]][constant[0]]
if compare[name[new_key] <ast.NotIn object at 0x7da2590d7190> name[self].ip_keys] begin[:]
call[name[self].ip_keys][name[new_key]] assign[=] name[k]
call[name[dt]][constant[ip_id]] assign[=] name[id]
call[name[self].all_ip.append, parameter[name[dt]]]
call[name[log].debug, parameter[constant[IP data updated:]]]
call[name[log].debug, parameter[name[self].ip_keys]]
call[name[log].debug, parameter[name[self].all_ip]]
call[name[log].debug, parameter[constant[]]]
return[name[flat_msg]] | keyword[def] identifier[process_ip_frame] ( identifier[self] ,
identifier[id] = keyword[None] ,
identifier[msg] = keyword[None] ):
literal[string]
identifier[df] = identifier[json_normalize] ( identifier[msg] )
identifier[dt] = identifier[json] . identifier[loads] ( identifier[df] . identifier[to_json] ())
identifier[flat_msg] ={}
keyword[for] identifier[k] keyword[in] identifier[dt] :
identifier[new_key] = literal[string] . identifier[format] ( identifier[k] )
identifier[flat_msg] [ identifier[new_key] ]= identifier[dt] [ identifier[k] ][ literal[string] ]
keyword[if] identifier[new_key] keyword[not] keyword[in] identifier[self] . identifier[ip_keys] :
identifier[self] . identifier[ip_keys] [ identifier[new_key] ]= identifier[k]
identifier[dt] [ literal[string] ]= identifier[id]
identifier[self] . identifier[all_ip] . identifier[append] ( identifier[dt] )
identifier[log] . identifier[debug] ( literal[string] )
identifier[log] . identifier[debug] ( identifier[self] . identifier[ip_keys] )
identifier[log] . identifier[debug] ( identifier[self] . identifier[all_ip] )
identifier[log] . identifier[debug] ( literal[string] )
keyword[return] identifier[flat_msg] | def process_ip_frame(self, id=None, msg=None):
"""process_ip_frame
Convert a complex nested json dictionary
to a flattened dictionary and capture
all unique keys for table construction
:param id: key for this msg
:param msg: ip frame for packet
"""
# normalize into a dataframe
df = json_normalize(msg)
# convert to a flattened dictionary
dt = json.loads(df.to_json())
flat_msg = {}
for k in dt:
new_key = 'ip_{}'.format(k)
flat_msg[new_key] = dt[k]['0']
if new_key not in self.ip_keys:
self.ip_keys[new_key] = k # depends on [control=['if'], data=['new_key']] # depends on [control=['for'], data=['k']]
# end of capturing all unique keys
dt['ip_id'] = id
self.all_ip.append(dt)
log.debug('IP data updated:')
log.debug(self.ip_keys)
log.debug(self.all_ip)
log.debug('')
return flat_msg |
def sample_train_batch(self):
"""Sample a training batch (data and label)."""
batch = []
labels = []
num_groups = self.batch_size // self.batch_k
# For CUB200, we use the first 100 classes for training.
sampled_classes = np.random.choice(100, num_groups, replace=False)
for i in range(num_groups):
img_fnames = np.random.choice(self.train_image_files[sampled_classes[i]],
self.batch_k, replace=False)
batch += [self.get_image(img_fname, is_train=True) for img_fname in img_fnames]
labels += [sampled_classes[i] for _ in range(self.batch_k)]
return nd.concatenate(batch, axis=0), labels | def function[sample_train_batch, parameter[self]]:
constant[Sample a training batch (data and label).]
variable[batch] assign[=] list[[]]
variable[labels] assign[=] list[[]]
variable[num_groups] assign[=] binary_operation[name[self].batch_size <ast.FloorDiv object at 0x7da2590d6bc0> name[self].batch_k]
variable[sampled_classes] assign[=] call[name[np].random.choice, parameter[constant[100], name[num_groups]]]
for taget[name[i]] in starred[call[name[range], parameter[name[num_groups]]]] begin[:]
variable[img_fnames] assign[=] call[name[np].random.choice, parameter[call[name[self].train_image_files][call[name[sampled_classes]][name[i]]], name[self].batch_k]]
<ast.AugAssign object at 0x7da18fe900d0>
<ast.AugAssign object at 0x7da18fe93d60>
return[tuple[[<ast.Call object at 0x7da18fe90400>, <ast.Name object at 0x7da18fe92d40>]]] | keyword[def] identifier[sample_train_batch] ( identifier[self] ):
literal[string]
identifier[batch] =[]
identifier[labels] =[]
identifier[num_groups] = identifier[self] . identifier[batch_size] // identifier[self] . identifier[batch_k]
identifier[sampled_classes] = identifier[np] . identifier[random] . identifier[choice] ( literal[int] , identifier[num_groups] , identifier[replace] = keyword[False] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[num_groups] ):
identifier[img_fnames] = identifier[np] . identifier[random] . identifier[choice] ( identifier[self] . identifier[train_image_files] [ identifier[sampled_classes] [ identifier[i] ]],
identifier[self] . identifier[batch_k] , identifier[replace] = keyword[False] )
identifier[batch] +=[ identifier[self] . identifier[get_image] ( identifier[img_fname] , identifier[is_train] = keyword[True] ) keyword[for] identifier[img_fname] keyword[in] identifier[img_fnames] ]
identifier[labels] +=[ identifier[sampled_classes] [ identifier[i] ] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[self] . identifier[batch_k] )]
keyword[return] identifier[nd] . identifier[concatenate] ( identifier[batch] , identifier[axis] = literal[int] ), identifier[labels] | def sample_train_batch(self):
"""Sample a training batch (data and label)."""
batch = []
labels = []
num_groups = self.batch_size // self.batch_k
# For CUB200, we use the first 100 classes for training.
sampled_classes = np.random.choice(100, num_groups, replace=False)
for i in range(num_groups):
img_fnames = np.random.choice(self.train_image_files[sampled_classes[i]], self.batch_k, replace=False)
batch += [self.get_image(img_fname, is_train=True) for img_fname in img_fnames]
labels += [sampled_classes[i] for _ in range(self.batch_k)] # depends on [control=['for'], data=['i']]
return (nd.concatenate(batch, axis=0), labels) |
def set_preshared_key(self, new_key):
"""
Set the preshared key for this VPN. A pre-shared key is only
present when the tunnel type is 'VPN' or the encryption mode
is 'transport'.
:return: None
"""
if self.data.get('preshared_key'):
self.update(preshared_key=new_key) | def function[set_preshared_key, parameter[self, new_key]]:
constant[
Set the preshared key for this VPN. A pre-shared key is only
present when the tunnel type is 'VPN' or the encryption mode
is 'transport'.
:return: None
]
if call[name[self].data.get, parameter[constant[preshared_key]]] begin[:]
call[name[self].update, parameter[]] | keyword[def] identifier[set_preshared_key] ( identifier[self] , identifier[new_key] ):
literal[string]
keyword[if] identifier[self] . identifier[data] . identifier[get] ( literal[string] ):
identifier[self] . identifier[update] ( identifier[preshared_key] = identifier[new_key] ) | def set_preshared_key(self, new_key):
"""
Set the preshared key for this VPN. A pre-shared key is only
present when the tunnel type is 'VPN' or the encryption mode
is 'transport'.
:return: None
"""
if self.data.get('preshared_key'):
self.update(preshared_key=new_key) # depends on [control=['if'], data=[]] |
def identify(self, text, **kwargs):
"""
Identify language.
Identifies the language of the input text.
:param str text: Input text in UTF-8 format.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if text is None:
raise ValueError('text must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('language_translator', 'V3', 'identify')
headers.update(sdk_headers)
params = {'version': self.version}
data = text
headers['content-type'] = 'text/plain'
url = '/v3/identify'
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
data=data,
accept_json=True)
return response | def function[identify, parameter[self, text]]:
constant[
Identify language.
Identifies the language of the input text.
:param str text: Input text in UTF-8 format.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
]
if compare[name[text] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b1b47400>
variable[headers] assign[=] dictionary[[], []]
if compare[constant[headers] in name[kwargs]] begin[:]
call[name[headers].update, parameter[call[name[kwargs].get, parameter[constant[headers]]]]]
variable[sdk_headers] assign[=] call[name[get_sdk_headers], parameter[constant[language_translator], constant[V3], constant[identify]]]
call[name[headers].update, parameter[name[sdk_headers]]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b1b463b0>], [<ast.Attribute object at 0x7da1b1b45ae0>]]
variable[data] assign[=] name[text]
call[name[headers]][constant[content-type]] assign[=] constant[text/plain]
variable[url] assign[=] constant[/v3/identify]
variable[response] assign[=] call[name[self].request, parameter[]]
return[name[response]] | keyword[def] identifier[identify] ( identifier[self] , identifier[text] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[text] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[headers] ={}
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[headers] . identifier[update] ( identifier[kwargs] . identifier[get] ( literal[string] ))
identifier[sdk_headers] = identifier[get_sdk_headers] ( literal[string] , literal[string] , literal[string] )
identifier[headers] . identifier[update] ( identifier[sdk_headers] )
identifier[params] ={ literal[string] : identifier[self] . identifier[version] }
identifier[data] = identifier[text]
identifier[headers] [ literal[string] ]= literal[string]
identifier[url] = literal[string]
identifier[response] = identifier[self] . identifier[request] (
identifier[method] = literal[string] ,
identifier[url] = identifier[url] ,
identifier[headers] = identifier[headers] ,
identifier[params] = identifier[params] ,
identifier[data] = identifier[data] ,
identifier[accept_json] = keyword[True] )
keyword[return] identifier[response] | def identify(self, text, **kwargs):
"""
Identify language.
Identifies the language of the input text.
:param str text: Input text in UTF-8 format.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if text is None:
raise ValueError('text must be provided') # depends on [control=['if'], data=[]]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers')) # depends on [control=['if'], data=['kwargs']]
sdk_headers = get_sdk_headers('language_translator', 'V3', 'identify')
headers.update(sdk_headers)
params = {'version': self.version}
data = text
headers['content-type'] = 'text/plain'
url = '/v3/identify'
response = self.request(method='POST', url=url, headers=headers, params=params, data=data, accept_json=True)
return response |
def view_plugins(category=None):
""" return a view of the loaded plugin names and descriptions
Parameters
----------
category : None or str
if str, apply for single plugin category
Examples
--------
>>> from pprint import pprint
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}}
>>> class DecoderPlugin(object):
... plugin_name = 'example'
... plugin_descript = 'a decoder for dicts containing _example_ key'
... dict_signature = ('_example_',)
...
>>> errors = load_plugin_classes([DecoderPlugin])
>>> pprint(view_plugins())
{'decoders': {'example': 'a decoder for dicts containing _example_ key'},
'encoders': {},
'parsers': {}}
>>> view_plugins('decoders')
{'example': 'a decoder for dicts containing _example_ key'}
>>> unload_all_plugins()
"""
if category is not None:
if category == 'parsers':
return {
name: {"descript": klass.plugin_descript,
"regex": klass.file_regex}
for name, klass in _all_plugins[category].items()
}
return {
name: klass.plugin_descript
for name, klass in _all_plugins[category].items()
}
else:
return {cat: {name: klass.plugin_descript
for name, klass in plugins.items()}
for cat, plugins in _all_plugins.items()} | def function[view_plugins, parameter[category]]:
constant[ return a view of the loaded plugin names and descriptions
Parameters
----------
category : None or str
if str, apply for single plugin category
Examples
--------
>>> from pprint import pprint
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}}
>>> class DecoderPlugin(object):
... plugin_name = 'example'
... plugin_descript = 'a decoder for dicts containing _example_ key'
... dict_signature = ('_example_',)
...
>>> errors = load_plugin_classes([DecoderPlugin])
>>> pprint(view_plugins())
{'decoders': {'example': 'a decoder for dicts containing _example_ key'},
'encoders': {},
'parsers': {}}
>>> view_plugins('decoders')
{'example': 'a decoder for dicts containing _example_ key'}
>>> unload_all_plugins()
]
if compare[name[category] is_not constant[None]] begin[:]
if compare[name[category] equal[==] constant[parsers]] begin[:]
return[<ast.DictComp object at 0x7da2054a4160>]
return[<ast.DictComp object at 0x7da2054a4be0>] | keyword[def] identifier[view_plugins] ( identifier[category] = keyword[None] ):
literal[string]
keyword[if] identifier[category] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[category] == literal[string] :
keyword[return] {
identifier[name] :{ literal[string] : identifier[klass] . identifier[plugin_descript] ,
literal[string] : identifier[klass] . identifier[file_regex] }
keyword[for] identifier[name] , identifier[klass] keyword[in] identifier[_all_plugins] [ identifier[category] ]. identifier[items] ()
}
keyword[return] {
identifier[name] : identifier[klass] . identifier[plugin_descript]
keyword[for] identifier[name] , identifier[klass] keyword[in] identifier[_all_plugins] [ identifier[category] ]. identifier[items] ()
}
keyword[else] :
keyword[return] { identifier[cat] :{ identifier[name] : identifier[klass] . identifier[plugin_descript]
keyword[for] identifier[name] , identifier[klass] keyword[in] identifier[plugins] . identifier[items] ()}
keyword[for] identifier[cat] , identifier[plugins] keyword[in] identifier[_all_plugins] . identifier[items] ()} | def view_plugins(category=None):
""" return a view of the loaded plugin names and descriptions
Parameters
----------
category : None or str
if str, apply for single plugin category
Examples
--------
>>> from pprint import pprint
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}}
>>> class DecoderPlugin(object):
... plugin_name = 'example'
... plugin_descript = 'a decoder for dicts containing _example_ key'
... dict_signature = ('_example_',)
...
>>> errors = load_plugin_classes([DecoderPlugin])
>>> pprint(view_plugins())
{'decoders': {'example': 'a decoder for dicts containing _example_ key'},
'encoders': {},
'parsers': {}}
>>> view_plugins('decoders')
{'example': 'a decoder for dicts containing _example_ key'}
>>> unload_all_plugins()
"""
if category is not None:
if category == 'parsers':
return {name: {'descript': klass.plugin_descript, 'regex': klass.file_regex} for (name, klass) in _all_plugins[category].items()} # depends on [control=['if'], data=['category']]
return {name: klass.plugin_descript for (name, klass) in _all_plugins[category].items()} # depends on [control=['if'], data=['category']]
else:
return {cat: {name: klass.plugin_descript for (name, klass) in plugins.items()} for (cat, plugins) in _all_plugins.items()} |
def problem(problem_name, **kwargs):
"""Get possibly copied/reversed problem in `base_registry` or `env_registry`.
Args:
problem_name: string problem name. See `parse_problem_name`.
**kwargs: forwarded to env problem's initialize method.
Returns:
possibly reversed/copied version of base problem registered in the given
registry.
"""
spec = parse_problem_name(problem_name)
try:
return Registries.problems[spec.base_name](
was_copy=spec.was_copy, was_reversed=spec.was_reversed)
except KeyError:
# If name is not found in base problems then try creating an env problem
return env_problem(problem_name, **kwargs) | def function[problem, parameter[problem_name]]:
constant[Get possibly copied/reversed problem in `base_registry` or `env_registry`.
Args:
problem_name: string problem name. See `parse_problem_name`.
**kwargs: forwarded to env problem's initialize method.
Returns:
possibly reversed/copied version of base problem registered in the given
registry.
]
variable[spec] assign[=] call[name[parse_problem_name], parameter[name[problem_name]]]
<ast.Try object at 0x7da20c7cadd0> | keyword[def] identifier[problem] ( identifier[problem_name] ,** identifier[kwargs] ):
literal[string]
identifier[spec] = identifier[parse_problem_name] ( identifier[problem_name] )
keyword[try] :
keyword[return] identifier[Registries] . identifier[problems] [ identifier[spec] . identifier[base_name] ](
identifier[was_copy] = identifier[spec] . identifier[was_copy] , identifier[was_reversed] = identifier[spec] . identifier[was_reversed] )
keyword[except] identifier[KeyError] :
keyword[return] identifier[env_problem] ( identifier[problem_name] ,** identifier[kwargs] ) | def problem(problem_name, **kwargs):
"""Get possibly copied/reversed problem in `base_registry` or `env_registry`.
Args:
problem_name: string problem name. See `parse_problem_name`.
**kwargs: forwarded to env problem's initialize method.
Returns:
possibly reversed/copied version of base problem registered in the given
registry.
"""
spec = parse_problem_name(problem_name)
try:
return Registries.problems[spec.base_name](was_copy=spec.was_copy, was_reversed=spec.was_reversed) # depends on [control=['try'], data=[]]
except KeyError:
# If name is not found in base problems then try creating an env problem
return env_problem(problem_name, **kwargs) # depends on [control=['except'], data=[]] |
def decode_transformer(encoder_output, encoder_decoder_attention_bias, targets,
hparams, name):
"""Original Transformer decoder."""
with tf.variable_scope(name):
targets = common_layers.flatten4d3d(targets)
decoder_input, decoder_self_bias = (
transformer.transformer_prepare_decoder(targets, hparams))
decoder_input = tf.nn.dropout(decoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
decoder_output = transformer.transformer_decoder(
decoder_input, encoder_output, decoder_self_bias,
encoder_decoder_attention_bias, hparams)
decoder_output = tf.expand_dims(decoder_output, axis=2)
decoder_output_shape = common_layers.shape_list(decoder_output)
decoder_output = tf.reshape(
decoder_output, [decoder_output_shape[0], -1, 1, hparams.hidden_size])
# Expand since t2t expects 4d tensors.
return decoder_output | def function[decode_transformer, parameter[encoder_output, encoder_decoder_attention_bias, targets, hparams, name]]:
constant[Original Transformer decoder.]
with call[name[tf].variable_scope, parameter[name[name]]] begin[:]
variable[targets] assign[=] call[name[common_layers].flatten4d3d, parameter[name[targets]]]
<ast.Tuple object at 0x7da1b1e14d00> assign[=] call[name[transformer].transformer_prepare_decoder, parameter[name[targets], name[hparams]]]
variable[decoder_input] assign[=] call[name[tf].nn.dropout, parameter[name[decoder_input], binary_operation[constant[1.0] - name[hparams].layer_prepostprocess_dropout]]]
variable[decoder_output] assign[=] call[name[transformer].transformer_decoder, parameter[name[decoder_input], name[encoder_output], name[decoder_self_bias], name[encoder_decoder_attention_bias], name[hparams]]]
variable[decoder_output] assign[=] call[name[tf].expand_dims, parameter[name[decoder_output]]]
variable[decoder_output_shape] assign[=] call[name[common_layers].shape_list, parameter[name[decoder_output]]]
variable[decoder_output] assign[=] call[name[tf].reshape, parameter[name[decoder_output], list[[<ast.Subscript object at 0x7da18ede5f60>, <ast.UnaryOp object at 0x7da18ede6fe0>, <ast.Constant object at 0x7da18ede4c70>, <ast.Attribute object at 0x7da18ede5210>]]]]
return[name[decoder_output]] | keyword[def] identifier[decode_transformer] ( identifier[encoder_output] , identifier[encoder_decoder_attention_bias] , identifier[targets] ,
identifier[hparams] , identifier[name] ):
literal[string]
keyword[with] identifier[tf] . identifier[variable_scope] ( identifier[name] ):
identifier[targets] = identifier[common_layers] . identifier[flatten4d3d] ( identifier[targets] )
identifier[decoder_input] , identifier[decoder_self_bias] =(
identifier[transformer] . identifier[transformer_prepare_decoder] ( identifier[targets] , identifier[hparams] ))
identifier[decoder_input] = identifier[tf] . identifier[nn] . identifier[dropout] ( identifier[decoder_input] ,
literal[int] - identifier[hparams] . identifier[layer_prepostprocess_dropout] )
identifier[decoder_output] = identifier[transformer] . identifier[transformer_decoder] (
identifier[decoder_input] , identifier[encoder_output] , identifier[decoder_self_bias] ,
identifier[encoder_decoder_attention_bias] , identifier[hparams] )
identifier[decoder_output] = identifier[tf] . identifier[expand_dims] ( identifier[decoder_output] , identifier[axis] = literal[int] )
identifier[decoder_output_shape] = identifier[common_layers] . identifier[shape_list] ( identifier[decoder_output] )
identifier[decoder_output] = identifier[tf] . identifier[reshape] (
identifier[decoder_output] ,[ identifier[decoder_output_shape] [ literal[int] ],- literal[int] , literal[int] , identifier[hparams] . identifier[hidden_size] ])
keyword[return] identifier[decoder_output] | def decode_transformer(encoder_output, encoder_decoder_attention_bias, targets, hparams, name):
"""Original Transformer decoder."""
with tf.variable_scope(name):
targets = common_layers.flatten4d3d(targets)
(decoder_input, decoder_self_bias) = transformer.transformer_prepare_decoder(targets, hparams)
decoder_input = tf.nn.dropout(decoder_input, 1.0 - hparams.layer_prepostprocess_dropout)
decoder_output = transformer.transformer_decoder(decoder_input, encoder_output, decoder_self_bias, encoder_decoder_attention_bias, hparams)
decoder_output = tf.expand_dims(decoder_output, axis=2)
decoder_output_shape = common_layers.shape_list(decoder_output)
decoder_output = tf.reshape(decoder_output, [decoder_output_shape[0], -1, 1, hparams.hidden_size])
# Expand since t2t expects 4d tensors.
return decoder_output # depends on [control=['with'], data=[]] |
def _to_api_value(self, attribute_type, value):
"""Return a parsed value for the API."""
if not value:
return None
if isinstance(attribute_type, properties.Instance):
return value.to_api()
if isinstance(attribute_type, properties.List):
return self._parse_api_value_list(value)
return attribute_type.serialize(value) | def function[_to_api_value, parameter[self, attribute_type, value]]:
constant[Return a parsed value for the API.]
if <ast.UnaryOp object at 0x7da204623730> begin[:]
return[constant[None]]
if call[name[isinstance], parameter[name[attribute_type], name[properties].Instance]] begin[:]
return[call[name[value].to_api, parameter[]]]
if call[name[isinstance], parameter[name[attribute_type], name[properties].List]] begin[:]
return[call[name[self]._parse_api_value_list, parameter[name[value]]]]
return[call[name[attribute_type].serialize, parameter[name[value]]]] | keyword[def] identifier[_to_api_value] ( identifier[self] , identifier[attribute_type] , identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[value] :
keyword[return] keyword[None]
keyword[if] identifier[isinstance] ( identifier[attribute_type] , identifier[properties] . identifier[Instance] ):
keyword[return] identifier[value] . identifier[to_api] ()
keyword[if] identifier[isinstance] ( identifier[attribute_type] , identifier[properties] . identifier[List] ):
keyword[return] identifier[self] . identifier[_parse_api_value_list] ( identifier[value] )
keyword[return] identifier[attribute_type] . identifier[serialize] ( identifier[value] ) | def _to_api_value(self, attribute_type, value):
"""Return a parsed value for the API."""
if not value:
return None # depends on [control=['if'], data=[]]
if isinstance(attribute_type, properties.Instance):
return value.to_api() # depends on [control=['if'], data=[]]
if isinstance(attribute_type, properties.List):
return self._parse_api_value_list(value) # depends on [control=['if'], data=[]]
return attribute_type.serialize(value) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.