content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def auc(y, z, round=True):
"""Compute area under the ROC curve."""
if round:
y = y.round()
if len(y) == 0 or len(np.unique(y)) < 2:
return np.nan
return skm.roc_auc_score(y, z) | 2,300 |
def test_die_init() -> None:
"""tests the use of a single die"""
die = Die()
assert die.sides == 6
assert die.max == 6
assert die.min == 1
assert die.rolls == 0
assert die.net_sides == die.sides
assert str(die) == "<d6 Die>"
assert repr(die) == "<d6 Die>"
assert not die > die
assert not die < die
assert die >= die
assert die <= die | 2,301 |
def get_routing_attributes(obj, modify_doc=False, keys=None):
"""
Loops through the provided object (using the dir() function) and
finds any callables which match the name signature (e.g.
get_foo()) AND has a docstring beginning with a path-like char
string.
This does process things in alphabetical order (rather than than
the unpredictable __dict__ attribute) so take this into
consideration if certain routes should be checked before others.
Unfortunately, this is a problem because the 'all' method will
always come before others, so there is no capturing one type
followed by a catch-all 'all'. Until a solution is found, just
make a router by hand.
"""
if keys is None:
keys = dir(obj)
for val, method_str in _find_routeable_attributes(obj, keys):
path, *doc = val.__doc__.split(maxsplit=1) or ('', '')
if not path:
continue
if modify_doc:
val.__doc__ = ''.join(doc)
method = HTTPMethod[method_str]
yield method, path, val | 2,302 |
def copy_jce_policy_files(cluster):
"""" Copy JCE unlimited strength policy files to all nodes. """
source_folder = cluster.get_config(KEY_JCE_POLICY_FILES_LOCATION)
if not source_folder:
raise KerberosConfigError(
'The location of JCE Unlimited Strength Policy files was not found in {}'.format(
cluster.get_config_file()))
jar_files_pattern = os.path.join(source_folder, "*.jar")
if not glob.glob(jar_files_pattern):
raise KerberosConfigError(
'No policy jar files found in {}'.format(source_folder))
target_dir = os.path.join(cluster.get_config(KEY_JAVA_HOME),
'jre', 'lib', 'security')
execute(copy, hosts=cluster.get_all_hosts(),
source_file=jar_files_pattern, remote_file=target_dir) | 2,303 |
def get_tmp_filepath(_file):
"""生成一个针对_file的临时文件名"""
_path = os.path.dirname(_file)
_tmp_filename = os.path.basename(_file)
if not _tmp_filename.startswith('.'):
_tmp_filename = '.' + _tmp_filename
_tmp_filename += '_tmp'
_tmp_filepath = os.path.join(_path, _tmp_filename)
if os.path.exists(_tmp_filepath):
return get_tmp_filepath(_tmp_filepath + '_1')
return _tmp_filepath | 2,304 |
def upperLeftOrigin( largeSize, smallSize ):
"""
The upper left coordinate (tuple) of a small rectangle in a larger rectangle (centered)
"""
origin = tuple( map( lambda x: int( ( (x[0]-x[1])/2 ) ), zip( largeSize, smallSize )) )
return origin | 2,305 |
def get_supported_events():
"""Returns the list of available _local_ templates.
If a template exists in the local app, it will take precedence
over the default trello_webhooks template. The base assumption
for this function is that _if_ a local template exists, then this
is an event we are interested in.
"""
app_template_path = path.join(
path.realpath(path.dirname(__file__)),
'templates/trello_webhooks'
)
return [t.split('.')[0] for t in listdir(app_template_path)] | 2,306 |
def test_unwrap_spans() -> None:
"""It unwraps span.pre elements."""
tree = parse_html("<span class='pre'>Test</span>")
postprocess._remove_span_pre(tree)
span = tree("span")
assert len(span) == 0
assert str(tree) == "Test" | 2,307 |
def from_ext(ext: str) -> S:
"""Get a SignedObject by file extension."""
object_types: typing.List[S] = [RpkiGhostbusters,
RpkiManifest,
RouteOriginAttestation]
entry_point_name = "rpkimancer.sigobj"
entry_points = importlib.metadata.entry_points()
for entry_point in entry_points.get(entry_point_name, []):
log.info(f"trying to load signed object {entry_point.value}")
cls = entry_point.load()
if issubclass(cls, SignedObject):
object_types.append(typing.cast(S, cls))
else:
log.warning(f"signed objects must inherit from {SignedObject}")
lookup_map = {cls.econtent_type.file_ext: cls
for cls in object_types}
try:
return lookup_map[ext]
except KeyError:
return lookup_map[ext.lstrip(".")] | 2,308 |
def plot_audio(audio,time,ResultPath,title):
"""Plot and save an audio file amplitude over time"""
plt.figure()
plt.plot(time,audio, linewidth=0.01)
plt.ylabel("Amplitude")
plt.xlabel("Time (s)")
plt.title(title)
pathname=ResultPath + title
plt.savefig(pathname)
plt.show()
return() | 2,309 |
def dataSet():
"""
测试数据集
"""
x = [np.array([[1], [2], [3]]),
np.array([[2], [3], [4]])]
d = np.array([[1], [2]])
return x, d | 2,310 |
def config():
"""
Get the OpenAPI Document configuration
:returns: OpenAPI configuration YAML dict
"""
with open(get_test_file_path('pygeoapi-test-openapi-config.yml')) as config_file: # noqa
return yaml_load(config_file) | 2,311 |
def save_data(df, database_filename):
"""Saves Data into Database
Args:
df: cleaned dataframe
database_filename: database file name
"""
engine = create_engine('sqlite:///' + database_filename)
df.to_sql('Disasters', engine, if_exists='replace',index=False) | 2,312 |
def convert_broadcast_lesser(node, **kwargs):
"""Map MXNet's broadcast_lesser operator attributes to onnx's Less operator
and return the created node.
"""
return create_basic_op_node('Less', node, kwargs) | 2,313 |
def main(mt_input: str, panelapp_path: str, config_path: str, out_vcf: str):
"""
Read the MT from disk
Do filtering and class annotation
Export as a VCF
:param mt_input: path to the MT directory
:param panelapp_path: path to the panelapp data dump
:param config_path: path to the config json
:param out_vcf: path to write the VCF out to
"""
# initiate Hail with defined driver spec.
init_batch()
# get the run configuration JSON
logging.info(f'Reading config dict from "{config_path}"')
with open(AnyPath(config_path), encoding='utf-8') as handle:
config_dict = json.load(handle)
# find the config area specific to hail operations
hail_config = config_dict.get('filter')
# read the parsed panelapp data
logging.info(f'Reading PanelApp data from "{panelapp_path}"')
with open(AnyPath(panelapp_path), encoding='utf-8') as handle:
panelapp = json.load(handle)
# pull green and new genes from the panelapp data
green_expression, new_expression = green_and_new_from_panelapp(panelapp)
logging.info(
f'Starting Hail with reference genome "{hail_config.get("ref_genome")}"'
)
# if we already generated the annotated output, load instead
if not AnyPath(mt_input.rstrip('/') + '/').exists():
raise Exception(f'Input MatrixTable doesn\'t exist: {mt_input}')
matrix = hl.read_matrix_table(mt_input)
logging.debug(
f'Loaded annotated MT from {mt_input}, size: {matrix.count_rows()}',
)
# running global quality filter steps
matrix = filter_matrix_by_ac(matrix_data=matrix, config=hail_config)
matrix = filter_matrix_by_variant_attributes(matrix_data=matrix)
# pull annotations into info and update if missing
logging.info('Pulling VEP annotations into INFO field')
matrix = extract_annotations(matrix)
# # checkpoint after applying all these operations
matrix = informed_repartition(matrix=matrix)
# filter on row annotations
logging.info('Filtering Variant rows')
matrix = filter_rows_for_rare(matrix=matrix, config=hail_config)
logging.info(f'Variants remaining after Rare filter: {matrix.count_rows()}')
matrix = filter_benign_or_non_genic(matrix=matrix)
logging.info(f'Variants remaining after Benign filter: {matrix.count_rows()}')
matrix = filter_to_green_genes_and_split(
matrix=matrix, green_genes=green_expression
)
logging.info(f'Variants remaining after Green-Gene filter: {matrix.count_rows()}')
matrix = filter_by_consequence(matrix=matrix, config=hail_config)
logging.info(f'Variants remaining after Consequence filter: {matrix.count_rows()}')
logging.info('Repartitioning after consequence filtration')
matrix = informed_repartition(matrix=matrix)
# add Classes to the MT
logging.info('Applying categories to variant consequences')
matrix = annotate_category_1(matrix)
matrix = annotate_category_2(matrix, hail_config, new_expression)
matrix = annotate_category_3(matrix, hail_config)
matrix = annotate_category_4(matrix, hail_config)
# filter to class-annotated only prior to export
logging.info('Filter variants to leave only classified')
matrix = filter_to_categorised(matrix)
logging.info(f'Variants remaining after Category filter: {matrix.count_rows()}')
# another little repartition after heavy filtering
matrix = informed_repartition(matrix=matrix)
# add an additional annotation, if the variant is Category4 only
# obtain the massive CSQ string using method stolen from the Broad's Gnomad library
# also take the single gene_id (from the exploded attribute)
matrix = matrix.annotate_rows(
info=matrix.info.annotate(
CSQ=vep_struct_to_csq(
matrix.vep, csq_fields=config_dict['variant_object'].get('csq_string')
),
gene_id=matrix.geneIds,
),
category_4_only=hl.if_else(
(matrix.info.Category1 == 0)
& (matrix.info.Category2 == 0)
& (matrix.info.Category3 == 0)
& (matrix.info.Category4 == 1),
ONE_INT,
MISSING_INT,
),
)
# write to MT
matrix.write(f'{out_vcf}.mt', overwrite=True)
# parse out the compound het details (after pulling gene_id above)
comp_het_details = extract_comp_het_details(matrix=matrix)
# transform the vcf output path into a json path
out_json = f'{out_vcf.split(".", maxsplit=1)[0]}.json'
# and write the comp-het JSON file
with AnyPath(out_json).open('w') as handle:
json.dump(comp_het_details, handle, indent=True, default=str)
logging.info('comp-het data written to cloud')
# write the results to a VCF path
logging.info(f'Write variants out to "{out_vcf}"')
write_matrix_to_vcf(matrix=matrix, output_path=out_vcf) | 2,314 |
def test_simulator_setup_space_quoted():
"""run_and_pytest() parses quoted --setup TEXT argument."""
command = (
'phmdoctest doc/setup.md --setup "import math" --teardown LAST'
" --report --outfile discarded.py"
)
simulator_status = phmdoctest.simulator.run_and_pytest(
well_formed_command=command, pytest_options=["--doctest-modules", "-v"]
)
assert simulator_status.runner_status.exit_code == 0
assert simulator_status.pytest_exit_code == 0
stdout = simulator_status.runner_status.stdout
assert 'python 9 setup "import math"' in stdout | 2,315 |
def sliding_window(image, step_size, window_size):
"""给定一副图像,返回一个从左向右滑动的窗口,直至覆盖整个图像"""
for y in range(0, image.shape[0], step_size):
for x in range(0, image.shape[1], step_size):
yield (x, y, image[y:y + window_size[1], x:x + window_size[0]]) | 2,316 |
def check_federated_type(
type_spec: computation_types.Type,
member: Optional[computation_types.Type] = None,
placement: Optional[placement_literals.PlacementLiteral] = None,
all_equal: Optional[bool] = None):
"""Checks that `type_spec` is a federated type with the given parameters.
Args:
type_spec: The `tff.Type` to check.
member: The expected member type, or `None` if unspecified.
placement: The desired placement, or `None` if unspecified.
all_equal: The desired result of accessing the property
`tff.FederatedType.all_equal` of `type_spec`, or `None` if left
unspecified.
Raises:
TypeError: if `type_spec` is not a federated type of the given kind.
"""
py_typecheck.check_type(type_spec, computation_types.FederatedType)
if member is not None:
py_typecheck.check_type(member, computation_types.Type)
member.check_assignable_from(type_spec.member)
if placement is not None:
py_typecheck.check_type(placement, placement_literals.PlacementLiteral)
if type_spec.placement is not placement:
raise TypeError(
'Expected federated type placed at {}, got one placed at {}.'.format(
placement, type_spec.placement))
if all_equal is not None:
py_typecheck.check_type(all_equal, bool)
if type_spec.all_equal != all_equal:
raise TypeError(
'Expected federated type with all_equal {}, got one with {}.'.format(
all_equal, type_spec.all_equal)) | 2,317 |
def randomize_examples(verbalize, path='prompts/',example_filenames="example_" , n_examples=3, onlyverbal=False):
"""
Randomizes the examples for the initial prompt.
Parameters
----------
verbalize : bool
If true, examples contain reasoning for the answer, e.g. "because I do not believe in conspiracies."
path : TYPE, optional
Filepath. The default is 'prompts/'.
example_filenames : TYPE, optional
How the files containing the examples are named. The default is "example_".
n_examples : TYPE, optional
How many examples are sampled. The default is 3.
onlyverbal : bool, optional
If True, examples where there are no numerical answers are used. The default is False.
Returns
-------
examples_string : TYPE
DESCRIPTION.
"""
# Read all the examples in a directory
examples_list = []
for file in os.listdir(path):
if file.startswith(example_filenames):
examples_list.append(open(path + file, "r").read())
# Remove verbalization
if verbalize == False:
if onlyverbal == False:
for i, value in enumerate(examples_list):
temp = examples_list[i]
examples_list[i] = temp[0:temp.find("Participant:") + 14] + '.'
if onlyverbal == True:
for i, value in enumerate(examples_list):
temp = examples_list[i]
findend = temp.find("Participant:")
end_i = temp.find(",", findend)
examples_list[i] = temp[0:end_i] + '.'
# Randomize the order
examples_list = random.sample(examples_list, k=n_examples)
# Add to a string that will be added to the prompt
for i in range(n_examples):
if i == 0:
examples_string = examples_list[i]
else:
examples_string = examples_string + "\n\n" + examples_list[i]
return examples_string | 2,318 |
def histogram2d(x, y, bins_x, bins_y):
"""Histogram 2d between two continuous row vectors.
Parameters
----------
x : array_like
Vector array of shape (N,) and of type np.float32
y : array_like
Vector array of shape (N,) and of type np.float32
bins_x, bins_y : int64
Number of bins respectively for the x and y variables
Returns
-------
hist : array_like
Array of shape (bins, bins) and of type int64
"""
# x-range
x_max, x_min = x.max(), x.min()
delta_x = 1 / ((x_max - x_min) / bins_x)
# y-range
y_max, y_min = y.max(), y.min()
delta_y = 1 / ((y_max - y_min) / bins_y)
# compute histogram 2d
xy_bin = np.zeros((np.int64(bins_x), np.int64(bins_y)), dtype=np.int64)
for t in range(len(x)):
i = (x[t] - x_min) * delta_x
j = (y[t] - y_min) * delta_y
if 0 <= i < bins_x and 0 <= j < bins_y:
xy_bin[int(i), int(j)] += 1
return xy_bin | 2,319 |
def xy2latlong(x: float, y: float, ds: Any) -> Tuple[float, float]:
"""Return lat long coordinate by x, y
>>> import gdal
>>> path = "../../../tests/data/raster_for_test.tif"
>>> ds = gdal.Open(path)
>>> xy2latlong(3715171, 2909857, ds)
(1.7036231518576481, 48.994284431891565)
"""
old_cs = osr.SpatialReference()
old_cs.ImportFromWkt(ds.GetProjectionRef())
# create the new coordinate system
wgs84_wkt = """
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]"""
new_cs = osr.SpatialReference()
new_cs.ImportFromWkt(wgs84_wkt)
# create a transform object to convert between coordinate systems
transform = osr.CoordinateTransformation(old_cs, new_cs)
# get the coordinates in lat long
latlong = transform.TransformPoint(x, y)
return latlong[0], latlong[1] | 2,320 |
def relative_date(r='12m', end_date='today', date_format='%Y-%m-%d',
as_string=False, unixtimestamp=False):
"""
Relative Date function
Calculates a datetime from a given end date and a relative reference.
INPUT:
r - relative date reference as '-12d' accepts d, w, m or y
end_date - 'today' (default), date string, datetime object
date_format - input format of string & output if requested
as_string - True | False (default)
decides if output is converted to string from datetime
unixtimestamp - converts datetime to an INTEGER unixtimestamp
"""
# Create Datetime object end_date based on supplied end_date
# If not string or 'today' assume already in datetime format
if end_date == 'today':
end_date = dt.datetime.today()
elif isinstance(end_date, str):
end_date = dt.datetime.strptime(end_date, date_format)
# Breakdown Relative Reference into type (i.e. d, w, m, y) & number
r = r[1::] if r[0] == '-' else r
dtype, dnum = str(r[-1]).lower(), float(r[0:-1])
# Manipulate based on relative Days, Weeks, Months or Years
if dtype == 'd': start_date = end_date - dt.timedelta(days=dnum)
elif dtype == 'w': start_date = end_date - dt.timedelta(weeks=dnum)
elif dtype == 'm': start_date = end_date - dt.timedelta(weeks=dnum*4)
elif dtype == 'y': start_date = end_date - dt.timedelta(weeks=dnum*52.143)
# Output as Strings if desirable
if as_string is True:
start_date = dt.datetime.strftime(start_date, date_format)
end_date = dt.datetime.strftime(end_date, date_format)
elif unixtimestamp is True:
start_date = int(dt.datetime.timestamp(start_date))
end_date = int(dt.datetime.timestamp(end_date))
return start_date, end_date | 2,321 |
def get_iexist_vdw_bond(ipt):
"""
check if a given mol pair contain any vdw bond, which exists
in the query mol. Note that input mol pairs must have cc=0.
"""
obj, mi, mj = ipt
iok = F
if np.any( [ set(b) <= set(mi.iasq+mj.iasq) for b in obj.ncbs ] ):
iok = T
return iok | 2,322 |
def getSVG(shape, opts=None, view_vector=(-0, 0, 20.0)):
"""
Export a shape to SVG
"""
d = {"width": 800, "height": 800, "marginLeft": 20, "marginTop": 20}
if opts:
d.update(opts)
# need to guess the scale and the coordinate center
uom = guessUnitOfMeasure(shape)
width = float(d["width"])
height = float(d["height"])
marginLeft = float(d["marginLeft"])
marginTop = float(d["marginTop"])
# TODO: provide option to give 3 views
viewVector = FreeCAD.Base.Vector(view_vector)
(visibleG0, visibleG1, hiddenG0, hiddenG1) = Drawing.project(shape, viewVector)
(hiddenPaths, visiblePaths) = getPaths(
Drawing.projectToSVG(shape, viewVector, "")
) # this param is totally undocumented!
# get bounding box -- these are all in 2-d space
bb = visibleG0.BoundBox
bb.add(visibleG1.BoundBox)
bb.add(hiddenG0.BoundBox)
bb.add(hiddenG1.BoundBox)
# width pixels for x, height pixesl for y
# massive hack convert pixels to mm
unitScale = (
3.779527559
) # min( width / bb.XLength * 0.75 , height / bb.YLength * 0.75 )
# compute amount to translate-- move the top left into view
(xTranslate, yTranslate) = (
(0 - bb.XMin) + marginLeft / unitScale,
(0 - bb.YMax) - marginTop / unitScale,
)
# compute paths ( again -- had to strip out freecad crap )
hiddenContent = ""
for p in hiddenPaths:
hiddenContent += PATHTEMPLATE % p
visibleContent = ""
for p in visiblePaths:
visibleContent += PATHTEMPLATE % p
svg = SVG_TEMPLATE % (
{
"unitScale": str(unitScale),
"strokeWidth": "0.1",
"hiddenContent": visibleContent,
"xTranslate": str(xTranslate),
"yTranslate": str(yTranslate),
"width": str(width),
"height": str(height),
"textboxY": str(height - 30),
"uom": str(uom),
}
)
# svg = SVG_TEMPLATE % (
# {"content": projectedContent}
# )
return svg | 2,323 |
def test_process(ase3_code):
"""Test running a calculation
note this does not test that the expected outputs are created of output parsing"""
# Prepare input parameters
Ase3Parameters = DataFactory('ase3')
input_file = SinglefileData(
file=os.path.join(TEST_DIR, 'input_files', 'run_gpaw.py'))
# set up calculation
inputs = {
'code': ase3_code,
'input_file': input_file,
'operation_mode':Str('inout'),
'metadata': {
'options': {
'max_wallclock_seconds': 30
},
},
}
result = run(CalculationFactory('ase3'), **inputs)
computed_diff = result['ase3'].get_content()
assert 'ase3_output' in computed_diff | 2,324 |
def load_data(messages_filepath, categories_filepath):
"""Loads messages and categories data and creates a merged dataframe
Args:
messages_filepath (str): Path to the messages file
categories_filepath (str): Path to the categories file
Returns:
(pd.DataFrame): A messages and categories dataframe
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
return messages.merge(categories, on='id') | 2,325 |
def is_square_inside(row, col, rows, cols):
"""Check if row and col is square inside grid having rows and cols."""
return row not in (0, rows - 1) and col not in (0, cols - 1) | 2,326 |
def demo(ks=(1, 2, 3, 4), N=20, azimuths=(0, 20), elevations=(90, 30), colors=get_colors(), verbose=True, savefig=False,
showfig=True, elements=True):
"""
:param tuple ks : Orders of the Enneper surface
:param int N : Resolution of each plot
:param tuple azimuths : Azimuths for each plot
:param tuple elevations: Elevations for each plot
:param tuple colors : RGBA tuples for colors of the surface, planes, lines, and point
:param bool verbose : Whether to output auxilliary information
:param bool savefig : Whether to save the figure to a file
:param bool showfig : Whether to show the figure
:param bool elements : Whether to display elements
:return:
"""
c_surfs, c_plane, c_line, c_point = colors
for k in ks:
if verbose:
print(f"Enneper surface of order k={k}")
# Collect meshes of the Enneper surface, its symmetry planes, symmetry axes, and symmetry point
X, Y, Z, C = Enneper_mesh(k=k, N=N, colors=c_surfs)
if elements:
u_plane = max(np.max(X), np.max(Y))
u_line = 0.6*np.max(np.sqrt(X**2 + Y**2))
u_zfactor = np.max(Z) / u_plane
Xs, Ys, Zs, Cs = symmetry_element_meshes(k, N, u_plane, u_line, u_zfactor, c_plane, c_line, c_point)
X, Y, Z, C = bridge_meshes([X] + Xs, [Y] + Ys, [Z] + Zs, [C] + Cs)
# We plot the surface from various view points
for azimuth, elevation in zip(azimuths, elevations):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.view_init(elev=elevation, azim=azimuth)
ax.plot_surface(X, Y, Z, antialiased=True, rstride=1, cstride=1, facecolors=C,
shade=True, linewidth=1)
ax.set_xlim(np.min(X), np.max(X))
ax.set_ylim(np.min(Y), np.max(Y))
ax.set_zlim(np.min(Z), np.max(Z))
plt.axis('off')
if savefig:
if elements:
fname = f"figs/Enneper-k-{k}-elements-elevation-{elevation}-azimuth-{azimuth}-N-{N}.png"
else:
fname = f"figs/Enneper-k-{k}-elevation-{elevation}-azimuth-{azimuth}-N-{N}.png"
plt.savefig(fname, bbox_inches='tight', dpi=300)
if showfig:
plt.show() | 2,327 |
def reshapeLabel(label):
"""
Reshape 1-D [0,1,...] to 2-D [[1,-1],[-1,1],...].
"""
n = label.size(0)
y = FloatTensor(n, 2)
y[:, 0] = 2 * (0.5 - label)
y[:, 1] = - y[:, 0]
return y.long() | 2,328 |
def _coo_scipy2torch(adj, coalesce=True, use_cuda=False):
"""
convert a scipy sparse COO matrix to torch
"""
values = adj.data
indices = np.vstack((adj.row, adj.col))
i = torch.LongTensor(indices)
v = torch.FloatTensor(values)
ans = torch.sparse.FloatTensor(i, v, torch.Size(adj.shape))
if use_cuda:
ans = ans.cuda()
if coalesce:
ans = ans.coalesce()
return ans | 2,329 |
def behavior_by_delta_temp(db_dict: dict, bins: np.ndarray):
"""
Computes frequency of behavior by delta-temperature achieved during the preceding bout
:param db_dict: Debug dictionary created during simulation run
:param bins: The bin-edges for dividing the bout delta temperature space
:return: A dictionary with behaviors as keys and probability in each bin as values
"""
selector = np.logical_and(db_dict["sel_behav"] != '', db_dict["sel_behav"] != 'N')
behavior_types = np.unique(db_dict["sel_behav"][selector])
all_behavs = db_dict["sel_behav"][selector]
all_btemps = db_dict["curr_temp"][selector]
all_deltas = np.zeros(all_btemps.size)
for i in range(1, all_btemps.size):
all_deltas[i] = all_btemps[i] - all_btemps[i-1]
ad_counts = np.histogram(all_deltas, bins)[0].astype(np.float)
result = {k: np.zeros(bins.size - 1) for k in behavior_types}
for behav in behavior_types:
b_dtemps = all_deltas[all_behavs == behav]
result[behav] = np.histogram(b_dtemps, bins)[0].astype(np.float) / ad_counts
return result | 2,330 |
def _gen_efficientnet(channel_multiplier=1.0, depth_multiplier=1.0, num_classes=1000, **kwargs):
"""Creates an EfficientNet model.
Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py
Paper: https://arxiv.org/abs/1905.11946
EfficientNet params
name: (channel_multiplier, depth_multiplier, resolution, dropout_rate)
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
Args:
channel_multiplier: multiplier to number of channels per layer
depth_multiplier: multiplier to number of repeats per stage
"""
# ORIGINAL
# arch_def = [
# ['ds_r1_k3_s1_e1_c16_se0.25'],
# ['ir_r2_k3_s2_e6_c24_se0.25'],
# ['ir_r2_k5_s2_e6_c40_se0.25'],
# ['ir_r3_k3_s2_e6_c80_se0.25'],
# ['ir_r3_k5_s1_e6_c112_se0.25'],
# ['ir_r4_k5_s2_e6_c192_se0.25'],
# ['ir_r1_k3_s1_e6_c320_se0.25'],
# ]
# DEBUG
arch_def = [
['ds_r1_k3_s1_e1_c16'],
['ir_r2_k3_s2_e6_c24'],
['ir_r2_k5_s2_e6_c40'],
['ir_r3_k3_s2_e6_c80'],
['ir_r3_k5_s1_e6_c112'],
['ir_r4_k5_s2_e6_c192'],
['ir_r1_k3_s1_e6_c320'],
]
bn_momentum, bn_eps = _resolve_bn_params(kwargs)
# NOTE: other models in the family didn't scale the feature count
num_features = _round_channels(1280, channel_multiplier, 8, None)
model = GenEfficientNet(
_decode_arch_def(arch_def, depth_multiplier),
num_classes=num_classes,
stem_size=32,
channel_multiplier=channel_multiplier,
channel_divisor=8,
channel_min=None,
num_features=num_features,
bn_momentum=bn_momentum,
bn_eps=bn_eps,
act_fn=swish,
**kwargs
)
return model | 2,331 |
def test_rescan_file(test_microvm_with_ssh, network_config):
"""Verify that rescan works with a file-backed virtio device."""
test_microvm = test_microvm_with_ssh
test_microvm.spawn()
# Set up the microVM with 1 vCPUs, 256 MiB of RAM, 0 network ifaces and
# a root file system with the rw permission. The network interface is
# added after we get a unique MAC and IP.
test_microvm.basic_config()
_tap, _, _ = test_microvm_with_ssh.ssh_network_config(network_config, '1')
block_size = 2
# Add a scratch block device.
fs = drive_tools.FilesystemFile(
os.path.join(test_microvm.fsfiles, 'scratch'),
size=block_size
)
test_microvm.add_drive(
'scratch',
fs.path,
)
test_microvm.start()
ssh_connection = net_tools.SSHConnection(test_microvm.ssh_config)
_check_block_size(ssh_connection, '/dev/vdb', fs.size())
# Check if reading from the entire disk results in a file of the same size
# or errors out, after a truncate on the host.
truncated_size = block_size//2
utils.run_cmd(f"truncate --size {truncated_size}M {fs.path}")
block_copy_name = "dev_vdb_copy"
_, _, stderr = ssh_connection.execute_command(
f"dd if=/dev/vdb of={block_copy_name} bs=1M count={block_size}")
assert "dd: error reading '/dev/vdb': Input/output error" in stderr.read()
_check_file_size(ssh_connection, f'{block_copy_name}',
truncated_size * MB)
response = test_microvm.drive.patch(
drive_id='scratch',
path_on_host=test_microvm.create_jailed_resource(fs.path),
)
assert test_microvm.api_session.is_status_no_content(response.status_code)
_check_block_size(
ssh_connection,
'/dev/vdb',
fs.size()
) | 2,332 |
def do_add_application_type(request):
"""定义
dict_class=models.CharField(u"字典类别",max_length=255)
dict_type=models.CharField(u"字典类型",max_length=255)
dict_name=models.CharField(u"字典名称",max_length=255)
dict_value=models.CharField(u"字典值",max_length=255)
dict_status=models.IntegerField(u"字典状态")
dict_mark=models.CharField(u"字典备注",max_length=1000,null=True,blank=True)
"""
dict_class=request.POST.get("dict_class")
dict_type=request.POST.get("dict_type")
dict_name=request.POST.get("dict_name")
dict_code=request.POST.get("dict_code")
dict_status=0
dict_mark=request.POST.get("dict_mark")
try:
dicts = Dicts.objects.filter(dict_class=dict_class,dict_type=dict_type,dict_name=dict_name,dict_code=dict_code)
if dicts.exists():
return render_json({'code':True, 'msg':u"已存在相同记录信息"})
Dicts.objects.create(dict_class=dict_class,dict_type=dict_type
,dict_name=dict_name,dict_code=dict_code
,dict_status=dict_status,dict_mark=dict_mark)
logger.info('insert object to Dicts is success')
return render_json({'code':True, 'msg':u"数据保存成功"})
except Exception, e:
logger.error('insert object to Dicts is error:{}'.format(repr(e)))
return render_json({'code':False, 'msg':u"数据保存失败"}) | 2,333 |
def setup_app(command, conf, vars):
"""Place any commands to setup axantaddressbook here"""
conf = base_config.configure(conf.global_conf, conf.local_conf)
base_config.setup(conf)
setup_schema(command, conf, vars)
bootstrap(command, conf, vars) | 2,334 |
def eval_single_grid_node(iden, counter, phases, maxiter, start_index):
"""
Evaluating randomly generated spotty single system model.
:param iden: str; node ID
:param counter: int; current number of already calculeted nodes
:param phases: numpy.array; desired phases of observations
:param maxiter: int; total number of nodes in this batch
:param start_index: int; number of iterations already calculated before interruption
:return: None
"""
aug_counter = counter + start_index
print(f'Processing node: {aug_counter}/{maxiter}, {100.0 * aug_counter / maxiter:.2f}%')
while True:
params = aux.draw_single_star_params()
try:
s = SingleSystem.from_json(params)
except ValueError as e:
continue
o = Observer(passband=config.PASSBANDS, system=s)
try:
o.lc(phases=phases, normalize=True)
# o.plot.lc()
except (LimbDarkeningError, AtmosphereError) as e:
# print(f'Parameters: {params} produced system outside grid coverage.')
continue
dtb.insert_observation(
config.DATABASE_NAME, o, iden, config.PARAMETER_COLUMNS_SINGLE, config.PARAMETER_TYPES_SINGLE
)
break | 2,335 |
def assert_data_frame_almost_equal(left, right):
"""Raise AssertionError if ``pd.DataFrame`` objects are not "almost equal".
Wrapper of ``pd.util.testing.assert_frame_equal``. Floating point values
are considered "almost equal" if they are within a threshold defined by
``assert_frame_equal``. This wrapper uses a number of
checks that are turned off by default in ``assert_frame_equal`` in order to
perform stricter comparisons (for example, ensuring the index and column
types are the same). It also does not consider empty ``pd.DataFrame``
objects equal if they have a different index.
Other notes:
* Index (row) and column ordering must be the same for objects to be equal.
* NaNs (``np.nan``) in the same locations are considered equal.
This is a helper function intended to be used in unit tests that need to
compare ``pd.DataFrame`` objects.
Parameters
----------
left, right : pd.DataFrame
``pd.DataFrame`` objects to compare.
Raises
------
AssertionError
If `left` and `right` are not "almost equal".
See Also
--------
pandas.util.testing.assert_frame_equal
"""
# pass all kwargs to ensure this function has consistent behavior even if
# `assert_frame_equal`'s defaults change
pdt.assert_frame_equal(left, right,
check_dtype=True,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False)
# this check ensures that empty DataFrames with different indices do not
# compare equal. exact=True specifies that the type of the indices must be
# exactly the same
pdt.assert_index_equal(left.index, right.index,
exact=True,
check_names=True) | 2,336 |
def exponential_decay_function(distance: np.ndarray) -> np.ndarray:
"""Calculate exponential discount factor for action interaction weight matrix.
Parameters
-----------
distance: array-like, shape (len_list, )
Distance between two slots.
"""
if not isinstance(distance, np.ndarray) or distance.ndim != 1:
raise ValueError("distance must be 1-dimensional ndarray")
return np.exp(-distance) | 2,337 |
def line_length(line, ellipsoid='WGS-84',shipping=True):
"""Length of a line in meters, given in geographic coordinates
Adapted from https://gis.stackexchange.com/questions/4022/looking-for-a-pythonic-way-to-calculate-the-length-of-a-wkt-linestring#answer-115285
Arguments:
line {Shapely LineString} -- a shapely LineString object with WGS-84 coordinates
ellipsoid {String} -- string name of an ellipsoid that `geopy` understands (see
http://geopy.readthedocs.io/en/latest/#module-geopy.distance)
Returns:
Length of line in meters
"""
if shipping == True:
if line.geometryType() == 'MultiLineString':
return sum(line_length(segment) for segment in line)
return sum(
vincenty(tuple(reversed(a)), tuple(reversed(b)), ellipsoid=ellipsoid).kilometers
for a, b in pairwise(line.coords)
)
else:
if line.geometryType() == 'MultiLineString':
return sum(line_length(segment) for segment in line)
return sum(
vincenty(a, b, ellipsoid=ellipsoid).kilometers
for a, b in pairwise(line.coords)
) | 2,338 |
def corr2_coeff(x, y):
"""A magic function for computing correlation between matrices and arrays.
This code is 640x+ faster on large dataset compared to np.corrcoef().
------------------------------------------------------------------
author: Divakar (https://stackoverflow.com/users/3293881/divakar)
url: https://stackoverflow.com/questions/42677677
------------------------------------------------------------------
"""
# input arrays subtract row-wise mean
x_sub_mx = x - x.mean(1)[:, None]
y_sub_my = y - y.mean(1)[:, None]
# sum of squares across rows
ssx = (x_sub_mx ** 2).sum(1)
ssy = (y_sub_my ** 2).sum(1)
return np.dot(x_sub_mx, y_sub_my.T) / np.sqrt(np.dot(ssx[:, None], ssy[None])) | 2,339 |
def get_database_url(track: str) -> Optional[URL]:
"""
Get the database URL based on the environment
How the database URL is selected:
1. If a predefined URL for the track is set, use that
2. If no predefined URL is set, generate one based on the preferred database type
"""
database_default_port_mapping = {MYSQL: 3306, POSTGRES: 5432}
uppercase_track = track.upper()
track_database_url = env.str(f"K8S_{uppercase_track}_DATABASE_URL", "")
if track_database_url:
return make_url(track_database_url)
database_type = get_database_type()
if not database_type:
return None
deploy_name = get_deploy_name(track)
database_port = database_default_port_mapping[database_type]
database_host = f"{deploy_name}-db-{database_type}"
database_url = (
f""
f"{database_type}://{settings.DATABASE_USER}:{settings.DATABASE_PASSWORD}"
f"@{database_host}:{database_port}"
f"/{settings.DATABASE_DB}"
)
return make_url(database_url) | 2,340 |
def create_membership_push_to_timeline(sender, instance, created, **kwargs):
"""
Creating new membership with associated user. If the user is the project owner we don't
do anything because that info will be shown in created project timeline entry
@param sender: Membership model
@param instance: Membership object
"""
# We shown in created project timeline entry
if created and instance.user and instance.user != instance.project.owner:
created_datetime = instance.created_at
_push_to_timelines(instance.project, instance.user, instance, "create", created_datetime) | 2,341 |
def build_suite():
"""A function."""
#suite = unittest.TestSuite()
#suite.addTest(WidgetTestCase('test_default_size'))
#suite.addTest(WidgetTestCase('test_resize'))
suite = unittest.TestLoader().loadTestsFromTestCase(WidgetTestCase)
return suite | 2,342 |
def get_quantile(data, percentage, **kwargs):
"""
Assuming the dataset is loaded as type `np.array`, and has shape
(num_samples, num_features).
:param data: Provided dataset, assume each row is a data sample and \
each column is one feature.
:type data: `np.ndarray`
:param percentage: Quantile or sequence of quantiles to compute, \
which must be between 0 and 1 inclusive.
:type percentage: `float` or `np.array` of `float`
:param kwargs: Dictionary of differential privacy arguments \
for computing the specified quantile of each feature across all samples, \
e.g., epsilon, etc.
:type kwargs: `dict`
:return: A vector of shape (1, num_features) stores the
standard deviation of each feature across all samples.
:rtype: `np.array` of `float`
"""
try:
quantile_vec = np.quantile(data, q=percentage, axis=0)
except Exception as ex:
raise FLException('Error occurred when calculating '
'the quantile. ' + str(ex))
return quantile_vec | 2,343 |
def clean_repository_clone_url( repository_clone_url ):
"""Return a URL that can be used to clone a tool shed repository, eliminating the protocol and user if either exists."""
if repository_clone_url.find( '@' ) > 0:
# We have an url that includes an authenticated user, something like:
# http://[email protected]:9009/repos/some_username/column
items = repository_clone_url.split( '@' )
tmp_url = items[ 1 ]
elif repository_clone_url.find( '//' ) > 0:
# We have an url that includes only a protocol, something like:
# http://bx.psu.edu:9009/repos/some_username/column
items = repository_clone_url.split( '//' )
tmp_url = items[ 1 ]
else:
tmp_url = repository_clone_url
return tmp_url | 2,344 |
def calculate_uncertainty_ins_seg(logits, classes):
"""
We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the
foreground class in `classes`.
Args:
logits (Tensor): A tensor of shape (R, C, ...) or (R, 1, ...) for class-specific or
class-agnostic, where R is the total number of predicted masks in all images and C is
the number of foreground classes. The values are logits.
classes (list): A list of length R that contains either predicted of ground truth class
for eash predicted mask.
Returns:
scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with
the most uncertain locations having the highest uncertainty score.
"""
if logits.shape[1] == 1:
gt_class_logits = logits.clone()
else:
gt_class_logits = logits[
torch.arange(logits.shape[0], device=logits.device), classes
].unsqueeze(1)
return -(torch.abs(gt_class_logits)) | 2,345 |
def euclidean_distance(x, y):
"""
Compute Euclidean distance between two Variable matrices.
---
param:
x: PyTorch Variable with shape (m, d)
y: PyTorch Variable with shape (n, d)
return:
distance: PyTorch Variable with shape (m, n)
"""
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
distance = xx + yy
distance.addmm_(1, -2, x, y.t())
distance = distance.clamp(min=1e-12).sqrt()
return distance | 2,346 |
def gsettings_set(schema, path, key, value):
"""Set value of gsettings schema"""
if path is None:
gsettings = Gio.Settings.new(schema)
else:
gsettings = Gio.Settings.new_with_path(schema, path)
if isinstance(value, list):
return gsettings.set_strv(key, value)
if isinstance(value, int):
return gsettings.set_int(key, value)
if isinstance(value, str):
return gsettings.set_string(key, value) | 2,347 |
def _get_permutations_draw(draw):
"""Helper to get all permutations of a draw (list of letters), hint:
use itertools.permutations (order of letters matters)"""
for length in range(1, 8):
yield from itertools.permutations(draw, r=length) | 2,348 |
def test_constructor_mm2gamma(setup_mm2gamma_zarr):
"""
test that constructor parses metadata properly
no data extraction in this test
"""
src = setup_mm2gamma_zarr
mmr = ZarrReader(src)
assert(mmr.mm_meta is not None)
assert(mmr.z_step_size is not None)
assert(mmr.width is not 0)
assert(mmr.height is not 0)
assert(mmr.frames is not 0)
assert(mmr.slices is not 0)
assert(mmr.channels is not 0)
assert(mmr.rows is not None)
assert(mmr.columns is not None)
assert(mmr.wells is not None)
assert(mmr.hcs_meta is not None)
# Check HCS metadata copy
meta = mmr.hcs_meta
assert('plate' in meta.keys())
assert('well' in meta.keys())
assert(len(meta['well']) == mmr.get_num_positions())
assert('images' in meta['well'][0])
assert(len(meta['well'][0]['images']) != 0)
assert('path' in meta['well'][0]['images'][0])
assert(meta['well'][0]['images'][0]['path'] == 'Pos_000') | 2,349 |
def initialize_parameters_deep(layer_dims):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
bl -- bias vector of shape (layer_dims[l], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * 0.01
parameters['b' + str(l)] = np.zeros(shape=(layer_dims[l], 1))
### END CODE HERE ###
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))
assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters | 2,350 |
def get_nblocks_ntraces(f,nblocks,ntraces,pts,nbheaders,dt,read_blockhead):
"""
Read n blocks from a Varian binary file which may have multiple traces
per block.
Parameters:
* f File object of Varian binary file to read from.
* nblocks Number of blocks to read.
* ntraces Number of traces per block.
* pts Number of points per trace.
* nbheaders Number of block headers in each block.
* dt Data type of data in binary file (real).
* read_blockhead Set to True to read the varian blockheaders(s) into
the returned dictionary. False ignores them.
Returns: dic,data if read_blockhead is True, data if False
"""
# create an empty array to hold data
data = np.empty( (nblocks*ntraces,pts), dtype=dt)
if read_blockhead:
bdic = [0]*nblock
# read the data
for i in xrange(nblocks):
if read_blockhead:
bdic[i],bdata = get_block_ntraces(f,ntraces,pts,nbheaders,dt,True)
data[i*ntraces:(i+1)*ntraces] = bdata
else:
bdata = get_block_ntraces(f,ntraces,pts,nbheaders,dt,False)
data[i*ntraces:(i+1)*ntraces] = bdata
if read_blockhead:
return bdic,data
else:
return data | 2,351 |
def register():
"""Run multi_pyspin constructor and register multi_pyspin destructor. Should be called once when first imported"""
multi_pyspin.register() | 2,352 |
def calcMedian(list_o_tuples):
"""Given a list of tuples (A, B), where A = category, and B = counts,
returns A that represents the median count value"""
#calc total
ct = 0
for (a, b) in list_o_tuples:
ct += float(b)
med = ct / 2
#find A
ct = 0
for (i, (a, b)) in enumerate(list_o_tuples):
ct += float(b)
if ct > med:
break
#print (i, a, b)
return a | 2,353 |
def add(n1, n2):
"""Adds the 2 given numbers"""
return n1 + n2 | 2,354 |
def kwarg_any(kwarg_functions):
"""Resolve kwarg predicates with short-circuit evaluation. This optimization
technique means we do not have to evaluate every predicate if one is already
true.
"""
return any(kwarg_function() for kwarg_function in kwarg_functions) | 2,355 |
def _behler_parrinello_cutoff_fn(dr: Array,
cutoff_distance: float=8.0) -> Array:
"""Function of pairwise distance that smoothly goes to zero at the cutoff."""
# Also returns zero if the pairwise distance is zero,
# to prevent a particle from interacting with itself.
return jnp.where((dr < cutoff_distance) & (dr > 1e-7),
0.5 * (jnp.cos(jnp.pi * dr / cutoff_distance) + 1), 0) | 2,356 |
def test_get_models_list():
"""Assert that the right models are returned when the parameter is a list."""
atom = ATOMClassifier(X_bin, y_bin, random_state=1)
atom.run(["LR1", "LR2", "LR3"])
assert atom._get_models(["LR1", "LR2"]) == ["LR1", "LR2"] | 2,357 |
def _get_build_to_download(build: str) -> Tuple[str, Optional[str]]:
"""Get the build version to download.
If the passed value is not an explict build number (eg. 15.0) then
the build for the current day of that major/minor will be downloaded.
:param build: The target build number.
:return: The target build information.
"""
components = build.split(".")
num_components = len(components)
if num_components == 1:
components.append("0")
if num_components == 2:
return ".".join(components), None
# Always treat the last component as the 'build'. Unlike Houdini itself
# which would treat a release candidate version as part of the build number
# the web api will treat the candidate version as the build number and the
# the 3 main components as the version.
return ".".join(components[: num_components - 1]), components[-1] | 2,358 |
def test(program, mode):
"""
function to do tests
"""
print('Program {}, mode {}'.format(program, mode))
print('Doing tests...') | 2,359 |
def label(type=None,
is_emphasis=True,
is_label_show=False,
label_pos=None,
label_text_color="#000",
label_text_size=12,
formatter=None,
**kwargs):
""" Text label of , to explain some data information about graphic item like value, name and so on.
In ECharts 3, to make the configuration structure flatter,
labelis taken to be at the same level with itemStyle, and has two status normal and emphasis as itemStyle does.
:param type:
Chart type
:param is_emphasis:
It specifies whether to show laebl in emphasis status.
:param is_label_show:
It specifies whether to show laebl in normal status.
:param label_pos:
Label position.It can be 'top', 'left', 'right', 'bottom', 'inside','outside'
:param label_text_color:
Label text color.
:param label_text_size:
Label font size.
:param formatter:
Data label formatter,it can be 'series', 'name', 'value', 'precent'
:param kwargs:
:return:
"""
if label_pos is None:
label_pos = "outside" if type in ["pie", "graph"] else "top"
_label = {
"normal": {"show": is_label_show,
"position": label_pos,
"textStyle": {"color": label_text_color,
"fontSize": label_text_size}},
"emphasis": {"show": is_emphasis}
}
fmat = {"series": "{a} ", "name": "{b} ", "value": "{c} ", "percent": "{d}% "}
if formatter is None:
_formatter = "{b} {d}%" if type == "pie" else None
else:
_formatter = "".join([fmat.get(f) for f in formatter if fmat.get(f, None)])
if type != "graph":
_label.get("normal").update(formatter=_formatter)
return _label | 2,360 |
def MPC_ComputeCrc(card_type: TechnologyType, frame: bytes) -> bytes:
"""Computes frame CRC
Parameters
----------
card_type : TechnologyType
Technology type
frame : bytes
Input frame
Returns
-------
bytes
CRC bytes
"""
if not isinstance(card_type, TechnologyType):
raise TypeError('card_type must be an instance of '
'TechnologyType IntEnum')
if not isinstance(frame, bytes):
raise TypeError('frame must be an instance of bytes')
_check_limits(c_uint32, len(frame), 'frame')
crc1 = c_uint8()
crc2 = c_uint8()
CTS3Exception._check_error(_MPuLib.MPC_ComputeCrc(
c_uint8(0),
c_int32(card_type),
frame,
c_uint32(len(frame)),
byref(crc1),
byref(crc2)))
return bytes([crc1.value, crc2.value]) | 2,361 |
def max_contiguous(input, value, _builder=None):
"""
Let the compiler knows that the `value` first values in :code:`input` are contiguous.
"""
value = _constexpr_to_value(value)
return semantic.max_contiguous(input, value) | 2,362 |
def kl_reverse(logu: torch.Tensor) -> torch.Tensor:
"""
Log-space Csiszar function for reverse KL-divergence D_f(p,q) = KL(q||p).
Also known as the exclusive KL-divergence and negative ELBO, minimizing
results in zero-forcing / mode-seeking behavior.
Args:
logu (torch.Tensor): ``p.log_prob``s evaluated at samples from q.
"""
return -logu | 2,363 |
def twos_comp_to_signed(val: int, n_bits: int) -> int:
"""
Convert a "two's complement" representation (as an integer) to its signed
version.
Args:
val: positive integer representing a number in two's complement format
n_bits: number of bits (which must reflect a whole number of bytes)
Returns:
signed integer
See http://stackoverflow.com/questions/1604464/twos-complement-in-python
"""
assert n_bits % 8 == 0, "Must specify a whole number of bytes"
n_bytes = n_bits // 8
b = val.to_bytes(n_bytes, byteorder=sys.byteorder, signed=False)
return int.from_bytes(b, byteorder=sys.byteorder, signed=True) | 2,364 |
def main():
"""main entry point for module execution
"""
argument_spec = dict(
lines=dict(type='list', aliases=['commands'], required=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
lines = module.params['lines']
result = {
'changed': False
}
if lines and not module.check_mode:
r = edit_config(module, lines)
result['changed'] = True
result['commands'] = lines
result['updates'] = lines
result['result'] = r
# "<ERROR> Need to do reset after execute the save command."
# これが戻ってきたときに、警告を出す
commit_resp = r.get('commit_response')
if commit_resp:
result['warnings'] = commit_resp
module.exit_json(**result) | 2,365 |
def delete_file(subject_file_id):
"""deletes a particular file
:rtype tuple
:return (subject_file_id, deleted_file_path)
"""
file_entity = SubjectFileEntity.query.filter_by(id=subject_file_id).one()
file_path = file_entity.get_full_path(app.config['REDIDROPPER_UPLOAD_SAVED_DIR'])
os.remove(file_path)
file_entity.delete()
db.session.commit()
return (subject_file_id, file_path) | 2,366 |
def export_chart_avg_age_by_country(data_file_path):
"""
Exports a line chart with average age observed for each country.
Args:
data_file_path (str):
File path of the exported CSV file
"""
# Read csv as pandas dataframe
fight_data = pd.read_csv(data_file_path, sep=';')
fight_data_df = pd.DataFrame(fight_data)
# Group data for average age of each country and plot a line chart
fight_data_df.groupby([' country'])[' age'].mean().plot(linewidth=10)
plt.xlabel("Country")
plt.ylabel("Average Age observed for each country")
plt.title("Distribution of average age observed for each country")
plt.savefig('saved_figure_avg_age.png', dpi=300, bbox_inches = "tight")
plt.close() | 2,367 |
def test_command_line_slide_info_file_not_found(sample_svs):
"""Test CLI slide info file not found error."""
runner = CliRunner()
slide_info_result = runner.invoke(
cli.main,
[
"slide-info",
"--img-input",
str(sample_svs)[:-1],
"--file-types",
"*.ndpi, *.svs",
"--mode",
"show",
],
)
assert slide_info_result.output == ""
assert slide_info_result.exit_code == 1
assert isinstance(slide_info_result.exception, FileNotFoundError) | 2,368 |
def do_lint() -> str:
"""
Execute pylint
"""
check_command_exists("pylint")
with safe_cd(SRC):
lint_output_file_name = f"{PROBLEMS_FOLDER}/lint.txt"
if os.path.isfile(lint_output_file_name):
os.remove(lint_output_file_name)
if IS_DJANGO:
django_bits = "--load-plugins pylint_django "
else:
django_bits = ""
command_text = (
f"{PIPENV} pipenv run pylint {django_bits} "
f"--rcfile=.pylintrc {PROJECT_NAME} "
f"--output-format=parseable".strip().replace(" ", " ")
)
print(command_text)
command = shlex.split(command_text)
with open(lint_output_file_name, "w") as outfile:
env = config_pythonpath()
subprocess.call(command, stdout=outfile, env=env)
full_text = open(lint_output_file_name, "r").read()
lint_did_indeed_run = "Your code has been rated at" in full_text
fatal_errors = sum(
1
for line in open(lint_output_file_name)
if "no-member" in line
or "no-name-in-module" in line
or "import-error" in line
or ": E" in line
or ": F" in line
)
if fatal_errors > 0:
for line in open(lint_output_file_name):
if (
"no-member" in line
or "no-name-in-module" in line
or "import-error" in line
or ": E" in line
or ": F" in line
):
print(line)
message = f"Fatal lint errors : {fatal_errors}"
if IS_GITLAB:
with open(lint_output_file_name) as error_file:
print(error_file.read())
say_and_exit(message, "lint")
return message
for line in [
line
for line in open(lint_output_file_name)
if not (
"*************" in line
or "---------------------" in line
or "Your code has been rated at" in line
or line == "\n"
)
]:
print(line)
if total_loc() > SMALL_CODE_BASE_CUTOFF:
cutoff = MAXIMUM_LINT
else:
cutoff = 0
num_lines = sum(
1
for line in open(lint_output_file_name)
if not (
"*************" in line
or "---------------------" in line
or "Your code has been rated at" in line
or line == "\n"
)
)
if num_lines > cutoff:
print(f"Too many lines of lint : {num_lines}, max {cutoff}")
exit(-1)
num_lines_all_output = sum(1 for _ in open(lint_output_file_name))
if not lint_did_indeed_run and num_lines_all_output == 0:
# should always have at least 'found 0 errors' in output
if os.path.isfile(lint_output_file_name):
# force lint to re-run, because empty file will be missing
os.remove(lint_output_file_name)
print("No lint messages at all, did pylint fail to run or is it installed?")
exit(-1)
return "pylint succeeded" | 2,369 |
def json_configs(type, name):
"""
Base method that extracts the configuration info from the json file defined
in SETTINGS
Args:
type - the name of the type of configuration object to look in
name - the name of the object whose configs will be extracted
Returns:
a dict containing the settings for the object of type and name
Raises:
a value error if type or name are not defined in the SETTINGS json file
"""
f = open(SETTINGS)
configs = json.load(f)[type]
f.close()
if name not in configs:
raise ValueError('Unable to find configuration for %s %s' % (type, name))
return configs[name] | 2,370 |
def reload() -> bool:
"""Gracefully reloads uWSGI.
* http://uwsgi.readthedocs.io/en/latest/Management.html#reloading-the-server
"""
return False | 2,371 |
def predict_imagen(titulo=None,
grados=None,
ano_lanzamiento=None,
paginas=None,
codbarras=None):
""" Predictor for Imagen from model/5a143f443980b50a74003699
Created using BigMLer
"""
import re
tm_tokens = 'tokens_only'
tm_full_term = 'full_terms_only'
tm_all = 'all'
def term_matches(text, field_name, term):
""" Counts the number of occurences of term and its variants in text
"""
forms_list = term_forms[field_name].get(term, [term])
options = term_analysis[field_name]
token_mode = options.get('token_mode', tm_tokens)
case_sensitive = options.get('case_sensitive', False)
first_term = forms_list[0]
if token_mode == tm_full_term:
return full_term_match(text, first_term, case_sensitive)
else:
# In token_mode='all' we will match full terms using equals and
# tokens using contains
if token_mode == tm_all and len(forms_list) == 1:
pattern = re.compile(r'^.+\b.+$', re.U)
if re.match(pattern, first_term):
return full_term_match(text, first_term, case_sensitive)
return term_matches_tokens(text, forms_list, case_sensitive)
def full_term_match(text, full_term, case_sensitive):
"""Counts the match for full terms according to the case_sensitive
option
"""
if not case_sensitive:
text = text.lower()
full_term = full_term.lower()
return 1 if text == full_term else 0
def get_tokens_flags(case_sensitive):
"""Returns flags for regular expression matching depending on text
analysis options
"""
flags = re.U
if not case_sensitive:
flags = (re.I | flags)
return flags
def term_matches_tokens(text, forms_list, case_sensitive):
""" Counts the number of occurrences of the words in forms_list in
the text
"""
flags = get_tokens_flags(case_sensitive)
expression = r'(\b|_)%s(\b|_)' % '(\\b|_)|(\\b|_)'.join(forms_list)
pattern = re.compile(expression, flags=flags)
matches = re.findall(pattern, text)
return len(matches)
term_analysis = {
"titulo": {
"case_sensitive": False,
"token_mode": 'all',
},
}
term_forms = {
"titulo": {
"fantásticos": ['fantásticos', 'fantásticas'],
"gigante": ['gigante', 'gigantes'],
},
}
if (codbarras is None):
return {"prediction": 1.82, "error": 5.53698}
if (codbarras > 9789872414340):
if (ano_lanzamiento is None):
return {"prediction": 9, "error": 7.02326}
if (ano_lanzamiento > 2008):
if (paginas is None):
return {"prediction": 10.5, "error": 5.88884}
if (paginas > 90):
if (titulo is None):
return {"prediction": 9, "error": 5.08228}
if (term_matches(titulo, "titulo", u"fantásticos") > 0):
return {"prediction":8, "error":5.08228}
if (term_matches(titulo, "titulo", u"fantásticos") <= 0):
if (grados is None):
return {"prediction": 9.5, "error": 5.26764}
if (grados == "Elsa Pizzi"):
return {"prediction":9, "error":5.26764}
if (grados != "Elsa Pizzi"):
return {"prediction":10, "error":5.26764}
if (paginas <= 90):
if (titulo is None):
return {"prediction": 12, "error": 5.08228}
if (term_matches(titulo, "titulo", u"gigante") > 0):
return {"prediction":11, "error":5.08228}
if (term_matches(titulo, "titulo", u"gigante") <= 0):
if (grados is None):
return {"prediction": 12.5, "error": 5.26764}
if (grados == "Patricia Roggio"):
return {"prediction":13, "error":5.26764}
if (grados != "Patricia Roggio"):
return {"prediction":12, "error":5.26764}
if (ano_lanzamiento <= 2008):
if (grados is None):
return {"prediction": 6, "error": 5.08228}
if (grados == "4°, 5°"):
return {"prediction":7, "error":5.08228}
if (grados != "4°, 5°"):
if (grados == "5°, 6°"):
return {"prediction":5, "error":5.26764}
if (grados != "5°, 6°"):
return {"prediction":6, "error":5.26764}
if (codbarras <= 9789872414340):
if (codbarras > 9789872414309):
if (paginas is None):
return {"prediction": 3, "error": 5.08228}
if (paginas > 100):
if (grados is None):
return {"prediction": 2.5, "error": 5.26764}
if (grados == "4°, 5°"):
return {"prediction":2, "error":5.26764}
if (grados != "4°, 5°"):
return {"prediction":3, "error":5.26764}
if (paginas <= 100):
return {"prediction":4, "error":5.08228}
if (codbarras <= 9789872414309):
if (codbarras > 9789871989852):
return {"prediction":1, "error":0.26071}
if (codbarras <= 9789871989852):
return {"prediction":0, "error":0.04286} | 2,372 |
def traducir_texto(texto, lenguaje_destino):
"""
Permite traducir un texto de entrada.
.. note::
Es importante tener en cuenta los siguientes aspectos al utilizar la \
función **traducir_texto**:
* La función utiliza la librería googletrans, que hace uso de la API \
de Google Translate. Por lo tanto, se requiere tener una conexión \
a internet para su funcionamiento.
* El límite máximo de caracteres en un solo texto es de 15.000.
* Debido a las limitaciones de la versión web del traductor de Google,\
el uso de la API no garantiza que la librería funcione \
correctamente en todo momento.
* Si desea utilizar una API estable, se recomienda el uso de la \
`API de traducción oficial de Google <https://cloud.google.com/translate/docs>`_.
* Si recibe un error HTTP 5xx, probablemente se deba a que Google ha \
bloqueado su dirección IP.
* Para mayor información puede consultar la \
`documentación de la librería googletrans <https://py-googletrans.readthedocs.io/en/latest/>`_.
:param texto: Texto de entrada.
:type texto: str
:param lenguaje_destino: Indica el lenguaje al que desea traducir \
el texto. Para mayor información, consultar la sección de \
:ref:`Lenguajes soportados <seccion_lenguajes_soportados>`.
:type lenguaje_destino: {'es', 'en', 'fr', 'ge'}
:return: (str) Texto traducido.
"""
traductor = Translator()
# Adecuar el lenguaje de destino al formato de la API
lenguaje_destino = dict_lenguajes[lenguaje_destino]
lenguaje_destino = dict_lenguajes_simplificado[lenguaje_destino]
salida = traductor.translate(texto, dest=lenguaje_destino)
if isinstance(texto, str):
return salida.text
else:
return [i.text for i in salida] | 2,373 |
def euler_to_axis_angle(roll: float, pitch: float, yaw: float) -> np.ndarray:
"""Converts Euler angle to Axis-angle format.
Args:
roll: rotation angle.
pitch: up/down angle.
yaw: left/right angle.
Returns:
Equivalent Axis-angle format.
"""
r = Rotation.from_euler('xyz', [roll, pitch, yaw])
return r.as_rotvec() | 2,374 |
def predict():
"""
An example of how to load a trained model and use it
to predict labels.
"""
# load the saved model
classifier = pickle.load(open('best_model.pkl'))
# compile a predictor function
predict_model = theano.function(
inputs=[classifier.input],
outputs=classifier.y_pred)
# We can test it on some examples from test test
dataset='mnist.pkl.gz'
datasets = load_data(dataset)
test_set_x, test_set_y = datasets[2]
test_set_x = test_set_x.get_value()
predicted_values = predict_model(test_set_x[:10])
print("Predicted values for the first 10 examples in test set:")
print(predicted_values) | 2,375 |
def test_sanitize_query_params_only_month():
"""
GIVEN Empty year and correct month params.
WHEN sanitize_query_params() fun is invoked.
THEN Result is tuple with empty year and correct month.
"""
result = sanitize_query_params(year='', month='1')
assert result == ('', '01') | 2,376 |
def add_scalebar(axis, matchx=True, matchy=True, hidex=True, hidey=True, unitsx=None, unitsy=None, scalex=1.0, scaley=1.0, xmax=None, ymax=None, space=None, **kwargs):
"""
Add scalebars to axes
Adds a set of scale bars to *ax*, matching the size to the ticks of the plot and optionally hiding the x and y axes
- axis : the axis to attach ticks to
- matchx,matchy : if True, set size of scale bars to spacing between ticks, if False, set size using sizex and sizey params
- hidex,hidey : if True, hide x-axis and y-axis of parent
- **kwargs : additional arguments passed to AnchoredScaleBars
Returns created scalebar object
"""
def get_tick_size(subaxis):
tick_size = None
tick_locs = subaxis.get_majorticklocs()
if len(tick_locs)>1:
tick_size = np.abs(tick_locs[1] - tick_locs[0])
return tick_size
if matchx:
sizex = get_tick_size(axis.xaxis)
if matchy:
sizey = get_tick_size(axis.yaxis)
if 'sizex' in kwargs:
sizex = kwargs['sizex']
if 'sizey' in kwargs:
sizey = kwargs['sizey']
def autosize(value, maxvalue, scale, n=1, m=10):
round_to_n = lambda value, n, m: int(np.ceil(round(value, -int(np.floor(np.log10(abs(value)))) + (n - 1)) / m)) * m
while value > maxvalue:
try:
value = round_to_n(0.8 * maxvalue * scale, n, m) / scale
except:
value /= 10.0
m /= 10.0
return value
if ymax is not None and sizey>ymax:
sizey = autosize(sizey, ymax, scaley)
if xmax is not None and sizex>xmax:
sizex = autosize(sizex, xmax, scalex)
kwargs['sizex'] = sizex
kwargs['sizey'] = sizey
if unitsx is None:
unitsx = ''
if unitsy is None:
unitsy = ''
if 'labelx' not in kwargs or kwargs['labelx'] is None:
kwargs['labelx'] = '%.3g %s'%(kwargs['sizex'] * scalex, unitsx)
if 'labely' not in kwargs or kwargs['labely'] is None:
kwargs['labely'] = '%.3g %s'%(kwargs['sizey'] * scaley, unitsy)
# add space for scalebar
if space is not None:
ylim0, ylim1 = axis.get_ylim()
ylim = (ylim0 - space, ylim1)
if ylim0 > ylim1: # if y axis is inverted
ylim = (ylim0 + space, ylim1)
axis.set_ylim(ylim)
scalebar = AnchoredScaleBar(axis, **kwargs)
axis.add_artist(scalebar)
if hidex:
axis.xaxis.set_visible(False)
if hidey:
axis.yaxis.set_visible(False)
if hidex and hidey:
axis.set_frame_on(False)
return scalebar | 2,377 |
def test_cross_validate():
"""Assert that the cross_validate method works as intended."""
atom = ATOMClassifier(X_bin, y_bin, random_state=1)
atom.run("LR")
assert isinstance(atom.lr.cross_validate(), dict)
assert isinstance(atom.lr.cross_validate(scoring="AP"), dict) | 2,378 |
def parse_args():
"""Parse and enforce command-line arguments."""
try:
options, args = getopt(sys.argv[1:], "l:dh", ["listen=", "debug", "help"])
except GetoptError as e:
print("error: %s." % e, file=sys.stderr)
print_usage()
sys.exit(1)
listen = {"host": "127.0.0.1", "port": "8080"}
debug = False
for option, value in options:
if option in ("-h", "--help"):
print_usage()
sys.exit(0)
elif option in ("-l", "--listen"):
fields = value.split(":")
listen = {"host": fields[0].strip(),
"port": int(fields[1]) if len(fields) > 1 else "8080"}
elif option in ("-d", "--debug"):
debug = True
return (listen, debug) | 2,379 |
def test_status_no_total(monkeypatch):
""" Have not created a job with .set_total() yet
"""
monkeypatch.setenv('WorkTopic', 'abc123')
with patch('redis.StrictRedis', mock_strict_redis_client):
with pytest.raises(JobDoesNotExist):
RedisProgress().status('123') | 2,380 |
def create_medoids_summary(season, country, result, d, names):
"""
Create cluster based summary of medoids' description
Parameters:
season: str, season sued to cluster
country: str, used to cluster
result: cluster resutls joint to customer features
d: trajectory clustering results
names: list of cluster names
"""
results_with_clusters=result[pd.notnull(result['cluster'])]
season_name=seasons[season.lower()]
num_visitors_thousand=calc_num_visitors_in_thousands(result)
num_clusters=calc_num_clusters(d)
summary_text=f"""In the last {season_name} roughly {num_visitors_thousand} thousand tourists visited Tuscany from {country.title()}. The data shows us {num_clusters} clusters. Each line in the graph above represents a cluster's typical path that tourists from {country.title()} followed. These paths are displayed as differently-coloured lines in the map here above. \n"""
summary_text=summary_text+create_medoid_basic_description(results_with_clusters, names, num_clusters)
print(summary_text)
write_file(country, season, summary_text, 'summary')
return summary_text | 2,381 |
def backtest(
strategy,
data, # Treated as csv path is str, and dataframe of pd.DataFrame
commission=COMMISSION_PER_TRANSACTION,
init_cash=INIT_CASH,
data_format="c",
plot=True,
verbose=True,
sort_by="rnorm",
**kwargs
):
"""
Backtest financial data with a specified trading strategy
{0}
"""
# Setting inital support for 1 cpu
# Return the full strategy object to get all run information
cerebro = bt.Cerebro(stdstats=False, maxcpus=1, optreturn=False)
cerebro.addobserver(bt.observers.Broker)
cerebro.addobserver(bt.observers.Trades)
cerebro.addobserver(bt.observers.BuySell)
# Convert all non iterables and strings into lists
kwargs = {
k: v if isinstance(v, Iterable) and not isinstance(v, str) else [v]
for k, v in kwargs.items()
}
cerebro.optstrategy(
STRATEGY_MAPPING[strategy],
init_cash=[init_cash],
transaction_logging=[verbose],
**kwargs
)
# Apply Total, Average, Compound and Annualized Returns calculated using a logarithmic approach
cerebro.addanalyzer(btanalyzers.Returns, _name="returns")
cerebro.addanalyzer(btanalyzers.SharpeRatio, _name="mysharpe")
cerebro.broker.setcommission(commission=commission)
# Treat `data` as a path if it's a string; otherwise, it's treated as a pandas dataframe
if isinstance(data, str):
if verbose:
print("Reading path as pandas dataframe ...")
data = pd.read_csv(data, header=0, parse_dates=["dt"])
# If data has `dt` as the index, set `dt` as the first column
# This means `backtest` supports the dataframe whether `dt` is the index or a column
if data.index.name == "dt":
data = data.reset_index()
pd_data = bt.feeds.PandasData(
dataname=data, **DATA_FORMAT_MAPPING[data_format]
)
cerebro.adddata(pd_data)
cerebro.broker.setcash(init_cash)
# Allows us to set buy price based on next day closing
# (technically impossible, but reasonable assuming you use all your money to buy market at the end of the next day)
cerebro.broker.set_coc(True)
if verbose:
print("Starting Portfolio Value: %.2f" % cerebro.broker.getvalue())
# clock the start of the process
tstart = time.time()
stratruns = cerebro.run()
# clock the end of the process
tend = time.time()
params = []
metrics = []
if verbose:
print("==================================================")
for stratrun in stratruns:
if verbose:
print("**************************************************")
for strat in stratrun:
p = strat.p._getkwargs()
p = {
k: v
for k, v in p.items()
if k not in ["periodic_logging", "transaction_logging"]
}
returns = strat.analyzers.returns.get_analysis()
sharpe = strat.analyzers.mysharpe.get_analysis()
# Combine dicts for returns and sharpe
m = {
**returns,
**sharpe,
"pnl": strat.pnl,
"final_value": strat.final_value,
}
params.append(p)
metrics.append(m)
if verbose:
print("--------------------------------------------------")
print(p)
print(returns)
print(sharpe)
params_df = pd.DataFrame(params)
metrics_df = pd.DataFrame(metrics)
# Get indices based on `sort_by` metric
optim_idxs = np.argsort(metrics_df[sort_by].values)[::-1]
sorted_params_df = params_df.iloc[optim_idxs].reset_index(drop=True)
sorted_metrics_df = metrics_df.iloc[optim_idxs].reset_index(drop=True)
sorted_combined_df = pd.concat(
[sorted_params_df, sorted_metrics_df], axis=1
)
# print out the result
print("Time used (seconds):", str(tend - tstart))
# Save optimal parameters as dictionary
optim_params = sorted_params_df.iloc[0].to_dict()
optim_metrics = sorted_metrics_df.iloc[0].to_dict()
print("Optimal parameters:", optim_params)
print("Optimal metrics:", optim_metrics)
if plot:
has_volume = DATA_FORMAT_MAPPING[data_format]["volume"] is not None
# Plot only with the optimal parameters when multiple strategy runs are required
if params_df.shape[0] == 1:
# This handles the Colab Plotting
# Simple Check if we are in Colab
try:
from google.colab import drive
iplot=False
except:
iplot=True
cerebro.plot(volume=has_volume, figsize=(30, 15), iplot=iplot)
else:
print("=============================================")
print("Plotting backtest for optimal parameters ...")
backtest(
strategy,
data, # Treated as csv path is str, and dataframe of pd.DataFrame
commission=commission,
data_format=data_format,
plot=plot,
verbose=verbose,
sort_by=sort_by,
**optim_params
)
return sorted_combined_df | 2,382 |
def mapflatdeep(iteratee, *seqs):
"""
Map an `iteratee` to each element of each iterable in `seqs` and recurisvely flatten the
results.
Examples:
>>> list(mapflatdeep(lambda n: [[n, n]], [1, 2]))
[1, 1, 2, 2]
Args:
iteratee (object): Iteratee applied per iteration.
*seqs (Iterable): Iterables to iterate over.
Yields:
Elements result from recursive flatten + map operations.
"""
return flattendeep(map(iteratee, *seqs)) | 2,383 |
def eHealthClass_getSkinConductanceVoltage():
"""eHealthClass_getSkinConductanceVoltage() -> float"""
return _ehealth.eHealthClass_getSkinConductanceVoltage() | 2,384 |
def construct_item(passage: str, labels):
"""
根据输入的passage和labels构建item,
我在巴拉巴拉...
['B-ASP', 'I-ASP', 'I-ASP', 'I-ASP', ..., 'I-OPI', 'I-OPI', 'O']
构造结果示例如下:
{
'passage': '使用一段时间才来评价,淡淡的香味,喜欢!',
'aspect': [['香味', 14, 16]],
'opinion': [['喜欢', 17, 19]]
}
:return:
"""
assert len(passage) == len(labels)
aspects, opinions = [], []
for i, char, label in zip(range(len(passage)), passage, labels):
if label == "O":
continue
elif label.startswith("B"):
if label.endswith("ASP"):
aspects.append([char, i])
elif label.endswith("OPI"):
opinions.append([char, i])
else:
raise Exception("label must be in set {'B-ASP', 'I-ASP', 'B-OPI', 'I-OPI', 'O'}.")
elif label.endswith("ASP"):
if (i==0 or not labels[i-1].endswith("ASP")):
aspects.append([char, i])
else:
aspects[-1][0] += char
elif label.endswith("OPI"):
if (i==0 or not labels[i-1].endswith("OPI")):
opinions.append([char, i])
else:
opinions[-1][0] += char
else:
raise Exception("label must be in set {'B-ASP', 'I-ASP', 'B-OPI', 'I-OPI', 'O'}.")
aspects = [[aspect[0], aspect[1], aspect[1]+len(aspect[0])] for aspect in aspects]
opinions = [[opinion[0], opinion[1], opinion[1] + len(opinion[0])] for opinion in opinions]
result = {
"passage": passage,
"aspects": aspects,
"opinions": opinions
}
return result | 2,385 |
def for_b():
""" *'s printed in the Shape of Small b """
for row in range(9):
for col in range(6):
if col ==0 or row in (4,8) and col !=5 or row in (5,6,7) and col ==5:
print('*',end=' ')
else:
print(' ',end=' ')
print() | 2,386 |
def close_client(client):
"""Saves the recoded responses to a temp file if the config file allows.
This should be called in the unit test's tearDown method.
Checks to see if settings.CACHE_RESPONSES is True, to make sure we only
save sessions to repeat if the user desires.
"""
if client and settings.CACHE_RESPONSES:
# If this was a live request, save the recording.
client.http_client.close_session() | 2,387 |
def getStartingAddress(packet):
"""Get the address of a modbus request"""
return ((ord(packet[8]) << 8) + ord(packet[9])) | 2,388 |
def rf_local_unequal(left_tile_col, rhs: Union[float, int, Column_type]) -> Column:
"""Cellwise inequality comparison between two tiles, or with a scalar value"""
if isinstance(rhs, (float, int)):
rhs = lit(rhs)
return _apply_column_function('rf_local_unequal', left_tile_col, rhs) | 2,389 |
def save_dump_to_file(dump):
"""
Saves dump information and activities in csv files.
"""
assert isinstance(dump, Dump)
with open("dumps.csv", "a") as dumps_file:
dumps_file.write(dump.to_csv())
dumps_file.write("\n")
print("Saved {0} activities between {1} and {2}...".format(
len(dump.activities),
dump.from_datetime.isoformat(" "),
dump.to_datetime.isoformat(" "))) | 2,390 |
def get_normalized_star_spectrum(spectral_type, magnitude, filter_name):
"""
spec_data = get_normalized_star_spectrum(spectral_type, magnitude, filter_name)
Returns a structure containing the synthetic spectrum of the star having the spectral type and magnitude
in the specified input filter. Magnitude is in VEGAMAG-F(lambda) system.
Spectra are from PICKLES, PASP, 110, 863 (1998)
Absolute flux spectra, no effect of atmospheric and instrument transmission
Parameters
----------
r0AtZenith: float
overall r0 at zenith [m]
spectral_type: string.
spectral type and luminosity class (e.g. G2V or M4III) or 'vega'
magnitude: float.
magnitude in the filter_name filter
filter_name: string.
Name of the filter. See Filters.get() for the list of available filters
Returns
-------
spectrum: synphot.SourceSpectrum object defining the spectrum
Examples
--------
Plot the spectrum of a vega, A0V, G2V stars of mag=8 defined on JohnsonR filter
>>> sp= get_normalized_star_spectrum('vega', 8, Filters.JOHNSON_R)
>>> spA0V= get_normalized_star_spectrum('A0V', 8, Filters.JOHNSON_R)
>>> spG2V= get_normalized_star_spectrum('G2V', 8, Filters.JOHNSON_R)
>>> plt.plot(sp.waveset, sp(sp.waveset), label='Vega')
>>> plt.plot(spA0V.waveset, spA0V(spA0V.waveset), label='A0V')
>>> plt.plot(spG2V.waveset, spG2V(spG2V.waveset), label='G2V')
>>> plt.grid(True)
>>> plt.xlabel('nm')
>>> plt.ylabel('FLAM')
>>> plt.xlim(0, 10000)
>>> plt.legend()
"""
# read the sourcespectrum
if spectral_type == 'vega':
spectrum = SourceSpectrum.from_vega()
else:
spectrum = SourceSpectrum.from_file(
PickelsLibrary.filename(spectral_type))
bandpass = Filters.get(filter_name)
spectrum_norm = spectrum.normalize(
magnitude * synphot.units.VEGAMAG,
bandpass,
vegaspec=SourceSpectrum.from_vega())
return spectrum_norm | 2,391 |
def header(**kwargs):
"""
Create header node and return it.
Equivalent to :code:`return Element("header", attributes...)`.
"""
return Element("header", **kwargs) | 2,392 |
def bump_func(xs_arg, low, high, btype):
"""
Setup initial displacement distribution of a bump,
either sine or triangular.
"""
# check the case of a single float as input
if isinstance(xs_arg, (int, float)):
xs_in = np.array([float(xs_arg)])
scalar = True
else:
xs_in = xs_arg
scalar = False
Ys_out = 0.0 * xs_in
mid = (low + high) / 2.0
diff = high - low
for i in range(0, len(Ys_out)):
if ((xs_in[i] > low) and (xs_in[i] < high)):
if (btype == 1):
# triangle shape
Ys_out[i] = 1.0 - abs((xs_in[i] - mid) / (0.5 * diff))
else:
# sine bump
Ys_out[i] = (
1.0 + m.cos(2. * m.pi / diff * (xs_in[i] - mid))) / 2.
if scalar is True:
return Ys_out[0]
else:
return Ys_out | 2,393 |
def main(debug_function: bool = False):
"""
the main function for the script
"""
# region def main(...):
# region debug_function
if debug_function:
print("[def] sort_by_number_of_videos_and_size.main()")
print("{")
# endregion
current_directory_path = os.getcwd()
# region debug_function
if debug_function:
print("> current_directory_path : \n (type) {0} = \n{1}"\
.format(type(current_directory_path),
str(current_directory_path)))
print()
# endregion
all_files_and_folders = os.listdir(current_directory_path)
# region debug_function
if debug_function:
print("> all_files_and_folders:")
for index, item in enumerate(all_files_and_folders):
print("> all_files_and_folders[{0}] :\n (type) {1} = \n {2}" \
.format(str(index), type(item), str(item)))
# print()
print()
# endregion
only_folders : list = []
for item in all_files_and_folders:
if is_folder(item):
only_folders.append(item)
...
# region debug_function
if debug_function:
print("> only_folders:")
for index, item in enumerate(only_folders):
print("> only_folders[{0}] :\n (type) {1} = \n {2}" \
.format(str(index), type(item), str(item)))
# print()
print()
# endregion
for folder in only_folders:
process_folder(current_directory_path, folder, debug_function=True)
...
# region debug_function
if debug_function:
print("}")
# endregion
... | 2,394 |
def set_kallisto_binary_path(path):
"""Helper function to set the KALLISTO_PATH variable. Automatically finds the
full path to the executable and sets that as KALLISTO_PATH.
"""
global KALLISTO_PATH
shutil_path = shutil.which(path)
actual_path = None
# First, check if it is an executable in the user's PATH
if shutil_path:
actual_path = os.path.abspath(shutil_path)
elif os.path.isfile(path):
actual_path = os.path.abspath(path)
else:
raise Exception(f'Unable to resolve path {path}')
# Check that it is executable
if not os.access(actual_path, os.X_OK):
raise NotExecutableException(f'{actual_path} is not executable')
KALLISTO_PATH = actual_path | 2,395 |
def setup_session(username, password, check_url=None,
session=None, verify=True):
"""
A special call to get_cookies.setup_session that is tailored for
URS EARTHDATA at NASA credentials.
"""
if session is not None:
# URS connections cannot be kept alive at the moment.
session.headers.update({'Connection': 'close'})
session = get_cookies.setup_session('https://urs.earthdata.nasa.gov',
username=username,
password=password,
session=session,
check_url=check_url,
verify=verify)
return session | 2,396 |
def is_number(item):
"""Check if the item is a number."""
return isinstance(item, numbers.Number) | 2,397 |
def from_list(commands):
"""
Given a list of tuples of form (depth, text)
that represents a DFS traversal of a command tree,
returns a dictionary representing command tree.
"""
def subtrees(commands, level):
if not commands:
return
acc = []
parent, *commands = commands
for command in commands:
if command["level"] > level:
acc.append(command)
else:
yield (parent, acc)
parent = command
acc.clear()
yield (parent, acc)
def walk(commands, level=0):
return [
{
"description": key["description"],
"children": walk(subtree, level + 1),
"id": key["id"],
}
for key, subtree in subtrees(commands, level)
]
return walk(commands) | 2,398 |
def get_cassandra_config_options(config):
"""Parse Cassandra's Config class to get all possible config values.
Unfortunately, some are hidden from the default cassandra.yaml file, so this appears the only way to do this."""
return _get_config_options(config=config, config_class='org.apache.cassandra.config.Config') | 2,399 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.