content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def getFileList(*args, **kwargs):
"""
Returns a list of files matching an optional wildcard pattern. Note that this command works directly on raw system files
and does not go through standard Maya file path resolution.
Flags:
- filespec : fs (unicode) [create]
wildcard specifier for search.Flag can appear in Create mode of commandFlag can have multiple arguments, passed either
as a tuple or a list.
- folder : fld (unicode) [create]
return a directory listing
Derived from mel command `maya.cmds.getFileList`
"""
pass | 5,358,100 |
def export_descriptor(config, output_dir, args):
"""
# input 2 images, output keypoints and correspondence
save prediction:
pred:
'image': np(320,240)
'prob' (keypoints): np (N1, 2)
'desc': np (N2, 256)
'warped_image': np(320,240)
'warped_prob' (keypoints): np (N2, 2)
'warped_desc': np (N2, 256)
'homography': np (3,3)
'matches': np [N3, 4]
"""
from utils.loader import get_save_path
from utils.var_dim import squeezeToNumpy
# basic settings
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
logging.info("train on device: %s", device)
with open(os.path.join(output_dir, "config.yml"), "w") as f:
yaml.dump(config, f, default_flow_style=False)
writer = SummaryWriter(getWriterPath(task=args.command, date=True))
save_path = get_save_path(output_dir)
save_output = save_path / "../predictions"
os.makedirs(save_output, exist_ok=True)
## parameters
outputMatches = True
subpixel = config["model"]["subpixel"]["enable"]
patch_size = config["model"]["subpixel"]["patch_size"]
# data loading
from utils.loader import dataLoader_test as dataLoader
task = config["data"]["dataset"]
data = dataLoader(config, dataset=task)
test_set, test_loader = data["test_set"], data["test_loader"]
from utils.print_tool import datasize
datasize(test_loader, config, tag="test")
# model loading
from utils.loader import get_module
Val_model_heatmap = get_module("", config["front_end_model"])
## load pretrained
val_agent = Val_model_heatmap(config["model"], device=device)
val_agent.loadModel()
## tracker
tracker = PointTracker(max_length=2, nn_thresh=val_agent.nn_thresh)
###### check!!!
count = 0
for i, sample in tqdm(enumerate(test_loader)):
img_0, img_1 = sample["image"], sample["warped_image"]
# first image, no matches
# img = img_0
def get_pts_desc_from_agent(val_agent, img, device="cpu"):
"""
pts: list [numpy (3, N)]
desc: list [numpy (256, N)]
"""
heatmap_batch = val_agent.run(
img.to(device)
) # heatmap: numpy [batch, 1, H, W]
# heatmap to pts
pts = val_agent.heatmap_to_pts()
# print("pts: ", pts)
if subpixel:
pts = val_agent.soft_argmax_points(pts, patch_size=patch_size)
# heatmap, pts to desc
desc_sparse = val_agent.desc_to_sparseDesc()
# print("pts[0]: ", pts[0].shape, ", desc_sparse[0]: ", desc_sparse[0].shape)
# print("pts[0]: ", pts[0].shape)
outs = {"pts": pts[0], "desc": desc_sparse[0]}
return outs
def transpose_np_dict(outs):
for entry in list(outs):
outs[entry] = outs[entry].transpose()
outs = get_pts_desc_from_agent(val_agent, img_0, device=device)
pts, desc = outs["pts"], outs["desc"] # pts: np [3, N]
if outputMatches == True:
tracker.update(pts, desc)
# save keypoints
pred = {"image": squeezeToNumpy(img_0)}
pred.update({"prob": pts.transpose(), "desc": desc.transpose()})
# second image, output matches
outs = get_pts_desc_from_agent(val_agent, img_1, device=device)
pts, desc = outs["pts"], outs["desc"]
if outputMatches == True:
tracker.update(pts, desc)
pred.update({"warped_image": squeezeToNumpy(img_1)})
# print("total points: ", pts.shape)
pred.update(
{
"warped_prob": pts.transpose(),
"warped_desc": desc.transpose(),
"homography": squeezeToNumpy(sample["homography"]),
}
)
if outputMatches == True:
matches = tracker.get_matches()
print("matches: ", matches.transpose().shape)
pred.update({"matches": matches.transpose()})
print("pts: ", pts.shape, ", desc: ", desc.shape)
# clean last descriptor
tracker.clear_desc()
filename = str(count)
path = Path(save_output, "{}.npz".format(filename))
np.savez_compressed(path, **pred)
# print("save: ", path)
count += 1
print("output pairs: ", count) | 5,358,101 |
def test_left_right_transitions_linear():
"""Set a transition matrix generated by a left-right topology on a linear HMM"""
hmm = deepcopy(hmm_lr)
topology = _LinearTopology(n_states=5, random_state=rng)
transitions = topology.random_transitions()
hmm.transitions = transitions
assert_equal(hmm.transitions, transitions) | 5,358,102 |
def LinkSigningChain(*certs):
"""Sets up the parent cert ID property values for a chain of certs."""
for i, cert in enumerate(certs):
if i == len(certs) - 1:
cert.parent_certificate_id = 0
else:
cert.parent_certificate_id = certs[i + 1].id | 5,358,103 |
def meta_caption(meta) -> str:
"""makes text from metadata for captioning video"""
caption = ""
try:
caption += meta.title + " - "
except (TypeError, LookupError, AttributeError):
pass
try:
caption += meta.artist
except (TypeError, LookupError, AttributeError):
pass
return caption | 5,358,104 |
def ticket_id_correctly_formatted(s: str) -> bool:
"""Checks if Ticket ID is in the form of 'PROJECTNAME-1234'"""
return matches(r"^\w+-\d+$|^---$|^-$")(s) | 5,358,105 |
def test_1():
"""
f(x) = max(.2, sin(x)^2)
"""
test_graph = FunctionTree('Test_1')
max_node = Max('max')
const_node = Constant('0.2', .2)
square_node = Square('square')
sin_node = Sin('sin')
test_graph.insert_node(max_node, 'Output', 'x')
test_graph.insert_node(square_node, 'max', 'x')
test_graph.insert_node(const_node, 'max')
test_graph.insert_node(sin_node, 'square', 'x')
return test_graph | 5,358,106 |
def export_vector(vector, description, output_name, output_method='asset'):
"""Exports vector to GEE Asset in GEE or to shapefile
in Google Drive.
Parameters
----------
vector : ee.FeatureCollection
Classified vector segments/clusters.
description : str
Description of the exported layer.
output_name : str
Path for the output file. Path must exist within
Google Earth Engine Assets path or Google Drive.
output_method : str
Export method/destination. Options include 'asset' for
export to Google Earth Engine Assets or 'drive' for
export to Google Drive.
Returns
-------
output_message : str
Message indicating location of the exported layer.
Example
-------
>>> import ee
>>> peak_green = ee.Image('LANDSAT/LC08/C01/T1_SR/LC08_008057_20170602')
>>> post_harvest = ee.Image('LANDSAT/LC08/C01/T1_SR/LC08_008057_20170906')
>>> image_collection = ee.ImageCollection([peak_green, post_harvest])
>>> ndvi_diff = ndvi_diff_landsat8(image_collection, 1, 0)
>>> study_area_boundary = ee.FeatureCollection("users/calekochenour/vegetation-change/drtt_study_area_boundary")
>>> ndvi_change_thresholds = [-2.0, -0.5, -0.5, -0.35]
>>> change_features = segment_snic(ndvi_diff, study_area_boundary, ndvi_change_thresholds)
>>> change_primary_vector = raster_to_vector(change_features.get('primary'), study_area_boundary)
>>> change_secondary_vector = raster_to_vector(change_features.get('secondary'), study_area_boundary)
>>> change_primary_export = export_vector(vector=change_primary_vector, description='Primary Change', output_name=change_primary_asset_name, output_method='asset'
>>> change_secondary_export = export_vector(vector=change_secondary_vector, description='Secondary Change', output_name=change_secondary_asset_name, output_method='asset')
"""
# Create export task for Google Drive
if output_method.lower() == "drive":
# Export vectors as shapefile to Google Drive
task = ee.batch.Export.table.toDrive(**{
'collection': vector,
'description': output_name,
'fileFormat': 'SHP'})
# Assign output message
output_message = f"Exporting {output_name.split('/')[-1]} to Google Drive..."
# Create task for GEE Asset
elif output_method.lower() == "asset":
# Export vectors to GEE Asset
task = ee.batch.Export.table.toAsset(**{
'collection': vector,
'description': description,
'assetId': output_name})
# Assign output message
output_message = f"Exporting {output_name.split('/')[-1]} to GEE Asset..."
else:
# Rasie error
raise ValueError("Invalid export method. Please specify 'Drive' or 'Asset'.")
# Start export task
task.start()
# Return output message
return print(output_message) | 5,358,107 |
def gather_competition_data(comp_url, target_directory=None):
"""
gather all competition data from the url:
"https://www.digitalrock.de/dav_calendar.php?no_dav=1&year=2019"
:return: - nothing - but side efect are the competition
files in target directory
"""
print("Fetching competition data from %r" % comp_url)
response = requests.get(comp_url)
html_content = response.text
# try to create the target directory if it did not exist
if not target_directory:
target_directory = Directory
if not os.path.exists(target_directory):
raise Exception('directory does not exist: %r' % target_directory)
for href in get_links_from_html(html_content):
if not href or not href.startswith('/egroupware'):
continue
_, _, params = href.partition('!')
if not params:
continue
competition = params.replace("&", "::") + '.json'
filename = os.path.join(target_directory, competition)
if os.path.exists(filename):
print("File already exists: %r" % filename)
continue
print("fetching competition %r" % params)
response = requests.get(json_url + params)
if not response.ok:
print("Error while fetchin data")
continue
jresp = json.loads(response.text)
with open(filename, "w") as f:
f.write(json.dumps(jresp, indent=4)) | 5,358,108 |
def fetch_configs(config=''):
"""
Fetch config files from the remote, via rsync. Specify a config
directory, such as 'cylinder' to copy just one config. Config files
are stored as, e.g. cylinder/config.dat and cylinder/config.xml
Local path to use is specified in machines_user.json, and should
normally point to a mount on entropy, i.e.
/store4/blood/username/config_files
This method is not intended for normal use, but is useful when the
local machine cannot have an entropy mount, so that files can be
copied to a local machine from entropy, and then transferred to the
compute machine, via 'fab entropy fetch_configs; fab legion
put_configs'
"""
with_config(config)
if env.manual_gsissh:
local(
template(
"globus-url-copy -cd -r -sync \
gsiftp://$remote/$job_config_path/ \
file://$job_config_path_local/"
)
)
else:
local(
template(
"rsync -pthrvz $username@$remote:$job_config_path/ \
$job_config_path_local"
)
) | 5,358,109 |
def test_bytes_head(path):
"""It should get the first lines of a file as bytes"""
assert bytes_head(path('0_0.csv'), 1) == b'a,b\n'
assert bytes_head(path('0_0.csv'), 100) == b'a,b\n0,0\n0,1' | 5,358,110 |
def reminder() -> None:
"""Send reminder if no entry as of certian times"""
utc_now = pytz.utc.localize(datetime.datetime.utcnow())
# Heroku scheduler usually runs a couple mins after hour, not at top, so just hour compared
now = datetime.datetime.strftime(utc_now.astimezone(pytz.timezone("America/Chicago")), '%I: %p')
if now in ["11: AM", "02: PM", "05: PM", "08: PM"]:
if entry_today() == "no":
sms_sender("I haven't heard from you today. How are you feeling? Reply with a # for: sleep, stress, joints,"
" energy, and your mood.", "+12064036747") | 5,358,111 |
def optimize_centers_mvuiq(A, B, Q, centers, keep_sparsity=True):
""" minimize reconstruction error after weighting by matrix A and make it unbiased
min_{c_i} \|A.(\sum_i Q_i c_i) - B\|_F^2 such that sum(B-A(\sum_i Q_i c_i)) = 0
"""
num_levels = len(centers)
thr = sla.norm(A) * 1e-6
# 1- compute A*(Q==i) and store it. find the non-empty quantization bins in the process
valid_idx = []
AQ = [np.zeros(1) for _ in range(num_levels)]
for i in range(num_levels):
AQ[i] = np.matmul(A, Q == i)
if (sla.norm(AQ[i]) >= thr) and ((centers[i] != 0) or not keep_sparsity):
# check whether the i-th bin has any effect on the quantization performance and
# do not consider sparse values (center=0)
valid_idx += [i]
if not valid_idx:
return
# 2- find the optimum reconstruction points for the non-empty quantization bins
# 2.a- create matrix M, used in the optimization problem
num_valid = len(valid_idx)
d = np.sum(B)
f = np.zeros(num_valid)
M = np.zeros(shape=(num_valid, num_valid))
e = np.zeros(shape=num_valid)
for r in range(num_valid):
f[r] = np.sum(AQ[valid_idx[r]])
for c in range(r, num_valid):
# trace(AQ[valid_idx[c]].T @ AQ[valid_idx[r]])
M[r, c] = np.sum(AQ[valid_idx[c]] * AQ[valid_idx[r]])
M[c, r] = M[r, c]
# trace(B.T @ AQ[valid_idx[r]])
e[r] = np.sum(AQ[valid_idx[r]] * B)
# 2.b- solve for min |Mx-e| such that fx=d
if num_valid == 0:
v = 0
elif num_valid == 1:
v = d / f[0]
elif num_valid == 2:
# for the special binary case, the solution can be found easily
scale = sla.norm(f) + 1e-12
f /= scale
d /= scale
u = np.array([-f[1], f[0]])
a = (e - d * M.dot(f)).dot(u) / (M.dot(u).dot(u) + 1e-12)
v = d * f + a * u
else:
# use quadratic programming (Goldfarb-Idnani algorithm) to solve the problem
d = np.array([d]).astype(np.float)
f = np.reshape(f, newshape=(-1, 1))
v = quadprog.solve_qp(M, e, f, d, 1)[0]
# 3- copy the found center points
centers[valid_idx] = v
return centers | 5,358,112 |
def container_instance_task_arns(cluster, instance_arn):
"""Fetch tasks for a container instance ARN."""
arns = ecs.list_tasks(cluster=cluster, containerInstance=instance_arn)['taskArns']
return arns | 5,358,113 |
def sierpinski_triangle(order, length, upper_left_x, upper_left_y):
"""
:param order: int, the order of Sierpinski Triangle
:param length: int, the length of order n Sierpinski Triangle
:param upper_left_x: str, the upper left x coordinate of order n Sierpinski Triangle
:param upper_left_y: str, the upper left y coordinate of order n Sierpinski Triangle
:return: no return value in this function
"""
if order == 0:
pass
else:
draw_tri(length, upper_left_x, upper_left_y)
# upper left triangle
sierpinski_triangle(order-1, length/2, upper_left_x, upper_left_y)
# upper right triangle
sierpinski_triangle(order-1, length/2, upper_left_x+length*0.5, upper_left_y)
# bottom triangle
sierpinski_triangle(order-1, length/2, upper_left_x+length*0.25, upper_left_y+length*0.433) | 5,358,114 |
def control_logic(center_x,center_y):
"""
Purpose:
---
This function should implement the control logic to balance the ball at a particular setpoint on the table.
The orientation of the top table should "ONLY" be controlled by the servo motor as we would expect in a
practical scenario.
Hence "ONLY" the shaft of the servo motor or in other words the revolute joint between servo and servo fin
should have 'motor enabled' and 'control loop enabled option' checked. Refer documentation for further understanding of
these options.
This function should use the necessary Legacy Python Remote APIs to control the revolute joints.
NOTE: In real life, a 180 degree servo motor can rotate between -90 to +90 degrees or -1.57 to 1.57 radians only.
Hence the command to be sent to servo motor should be between this range only. When the top plate is parallel to
base plate, the revolute joint between servo and servo fin should be at 0 degrees orientation. Refer documentation
for further understanding.
NOTE: Since the simulation is dynamic in nature there should not by any bottlenecks in this code due to which the
processing may take a lot of time. As a result 'control_logic' function should be called in every iteration of
the while loop. Use global variables instead of reinitialising the varibles used in this function.
Input Arguments:
---
`center_x` : [ int ]
the x centroid of the ball
`center_y` : [ int ]
the y centroid of the ball
Returns:
---
None
Example call:
---
control_logic(center_x,center_y)
"""
global setpoint, client_id, initial_error,setflag,setflag2,setflag3
############## ADD YOUR CODE HERE ##############
#posarray = [0,0,0,0]
returnarray2 = [-1,-1,-1,-1]
##---##
##Initilialising streaming operations and getting joint properties
#for i in range(4):
#while returnarray2!=[0,0,0,0]:
#_,posarray[0]=sim.simxGetJointPosition(client_id, handle_arr[0], sim.simx_opmode_streaming)
#sim.simxSynchronousTrigger(client_id)
#sim.simxSetJointTargetVelocity(client_id,handle_arr[0],360*np.pi/180,sim.simx_opmode_oneshot)
#sim.simxSetJointMaxForce(client_id,handle_arr[0],10000,sim.simx_opmode_oneshot)
#returnarray2[0]=sim.simxSetJointTargetPosition(client_id,handle_arr[0],0,sim.simx_opmode_oneshot)
#_,posarray[1]=sim.simxGetJointPosition(client_id, handle_arr[1], sim.simx_opmode_streaming)
#sim.simxSynchronousTrigger(client_id)
#sim.simxSetJointTargetVelocity(client_id,handle_arr[0],360*np.pi/180,sim.simx_opmode_oneshot)
#sim.simxSetJointMaxForce(client_id,handle_arr[0],10000,sim.simx_opmode_oneshot)
#returnarray2[1]=sim.simxSetJointTargetPosition(client_id,handle_arr[1],0,sim.simx_opmode_oneshot)
#_,posarray[2]=sim.simxGetJointPosition(client_id, handle_arr[2], sim.simx_opmode_streaming)
##sim.simxSynchronousTrigger(client_id)
#sim.simxSetJointTargetVelocity(client_id,handle_arr[0],360*np.pi/180,sim.simx_opmode_oneshot)
#sim.simxSetJointMaxForce(client_id,handle_arr[0],10000,sim.simx_opmode_oneshot)
#returnarray2[2]=sim.simxSetJointTargetPosition(client_id,handle_arr[2],0,sim.simx_opmode_oneshot)
#_,posarray[3]=sim.simxGetJointPosition(client_id, handle_arr[3], sim.simx_opmode_streaming)
#sim.simxSynchronousTrigger(client_id)
#sim.simxSetJointTargetVelocity(client_id,handle_arr[0],360*np.pi/180,sim.simx_opmode_oneshot)
#sim.simxSetJointMaxForce(client_id,handle_arr[0],10000,sim.simx_opmode_oneshot)
#returnarray2[3]=sim.simxSetJointTargetPosition(client_id,handle_arr[3],0,sim.simx_opmode_oneshot)
#print(_,"pos code")
print("out")
#sim.simxGetPingTime(client_id)
initial_error.append(center_x)
initial_error.append(center_y)
setflag=False
setflag2=False
setflag3=False
perror_x=0
perror_y=0
control_angle(handle_arr,center_x,center_y)
#print(sim.simxGetPingTime(client_id))
#sim.simxSynchronousTrigger(client_id)
#center_x, center_y =
################################################## | 5,358,115 |
def benchmark(pipelines=None, datasets=None, hyperparameters=None, metrics=METRICS, rank='f1',
distributed=False, test_split=False, detrend=False, output_path=None):
"""Evaluate pipelines on the given datasets and evaluate the performance.
The pipelines are used to analyze the given signals and later on the
detected anomalies are scored against the known anomalies using the
indicated metrics.
Finally, the scores obtained with each metric are averaged accross all the signals,
ranked by the indicated metric and returned on a ``pandas.DataFrame``.
Args:
pipelines (dict or list): dictionary with pipeline names as keys and their
JSON paths as values. If a list is given, it should be of JSON paths,
and the paths themselves will be used as names. If not give, all verified
pipelines will be used for evaluation.
datasets (dict or list): dictionary of dataset name as keys and list of signals as
values. If a list is given then it will be under a generic name ``dataset``.
If not given, all benchmark datasets will be used used.
hyperparameters (dict or list): dictionary with pipeline names as keys
and their hyperparameter JSON paths or dictionaries as values. If a list is
given, it should be of corresponding order to pipelines.
metrics (dict or list): dictionary with metric names as keys and
scoring functions as values. If a list is given, it should be of scoring
functions, and they ``__name__`` value will be used as the metric name.
If not given, all the available metrics will be used.
rank (str): Sort and rank the pipelines based on the given metric.
If not given, rank using the first metric.
distributed (bool): Whether to use dask for distributed computing. If not given,
use ``False``.
test_split (bool or float): Whether to use the prespecified train-test split. If
float, then it should be between 0.0 and 1.0 and represent the proportion of
the signal to include in the test split. If not given, use ``False``.
detrend (bool): Whether to use ``scipy.detrend``. If not given, use ``False``.
output_path (str): Location to save the intermediatry results. If not given,
intermediatry results will not be saved.
Returns:
pandas.DataFrame: Table containing the scores obtained with
each scoring function accross all the signals for each pipeline.
"""
pipelines = pipelines or VERIFIED_PIPELINES
datasets = datasets or BENCHMARK_DATA
if isinstance(pipelines, list):
pipelines = {pipeline: pipeline for pipeline in pipelines}
if isinstance(datasets, list):
datasets = {'dataset': datasets}
if isinstance(hyperparameters, list):
hyperparameters = {pipeline: hyperparameter for pipeline, hyperparameter in
zip(pipelines.keys(), hyperparameters)}
if isinstance(metrics, list):
metrics_ = dict()
for metric in metrics:
if callable(metric):
metrics_[metric.__name__] = metric
elif metric in METRICS:
metrics_[metric] = METRICS[metric]
else:
raise ValueError('Unknown metric: {}'.format(metric))
metrics = metrics_
results = _evaluate_datasets(
pipelines, datasets, hyperparameters, metrics, distributed, test_split, detrend)
if output_path:
LOGGER.info('Saving benchmark report to %s', output_path)
results.to_csv(output_path)
return _sort_leaderboard(results, rank, metrics) | 5,358,116 |
def notNone(arg,default=None):
""" Returns arg if not None, else returns default. """
return [arg,default][arg is None] | 5,358,117 |
def get_scorer(scoring):
"""Get a scorer from string
"""
if isinstance(scoring, str) and scoring in _SCORERS:
scoring = _SCORERS[scoring]
return _metrics.get_scorer(scoring) | 5,358,118 |
def triangle_num(value: int) -> int:
"""Returns triangular number for a given value.
Parameters
----------
value : int
Integer value to use in triangular number calculaton.
Returns
-------
int
Triangular number.
Examples:
>>> triangle_num(0)
0
>>> triangle_num(1)
1
>>> triangle_num(4)
10
>>> triangle_num(10)
55
>>> triangle_num("A")
Traceback (most recent call last):
...
TypeError: '>' not supported between instances of 'str' and 'int'
>>> triangle_num(-1)
Traceback (most recent call last):
...
TypeError: Please use positive integer value
"""
if value >= 0:
tot : list = [0]
def recur(n: int, t: list) -> Iterator:
if n > 0:
t[0] += n
n -= 1
return recur(n, t)
recur(value, tot)
return tot[0]
raise ValueError("Please use positive integer value.") | 5,358,119 |
def show_whole_td_msg(whole_td_msg):
"""
显示完整的td_msg
:param whole_td_msg: np.ndarray, shape=(20, 16, 200, 1000)
:return:
"""
img_path = '../tmp/td_msg'
for tag in range(20):
final = np.zeros((200, 1000), dtype=np.uint8)
for i in range(16):
pic = np.zeros((200, 1000), dtype=np.uint8)
for r, bur in enumerate(whole_td_msg[tag, i]):
for c, buc in enumerate(bur):
if buc >= 1:
pic[r, c] += buc
final += pic
for r, bur in enumerate(final):
for c, buc in enumerate(bur):
final[r, c] = int(255 * buc)
img_name = img_path+'/'+str(tag)+'.bmp'
cv.imwrite(img_name, final) | 5,358,120 |
def locations_sim_euclidean(image:DataBunch, **kwargs):
"""
A locations similarity function that uses euclidean similarity between vectors. Predicts the anatomical locations of
the input image, and then returns the eucliean similarity between the input embryo's locations vector and the
locations vectors of the database embryos.
Euclidean similarity and distance are computed between unnormalized, one-hot locations vectors. The euclidean
similarity between two locations vectors is defined as 1/(1 + euclidean distance).
Arguments:
- image: The input image DataBunch
Returns:
A tensor of similarity values (one for each database image). Each similarity score is the euclidean similarity between
locations vectors.
"""
locations_pred = run_inference(image, do_stage=False)[0]
_, database_image_locations = retrieve_predictions()
euclidean_distance = torch.norm(database_image_locations-locations_pred, dim=1).unsqueeze(1)
return 1/(1+euclidean_distance) | 5,358,121 |
def assert_almost_equal(
actual: Tuple[numpy.float64, numpy.float64],
desired: List[numpy.float64],
decimal: int,
err_msg: Literal["(4, 200)normal"],
):
"""
usage.statsmodels: 1
"""
... | 5,358,122 |
def _butter_bandpass_filter(data, low_cut, high_cut, fs, axis=0, order=5):
"""Apply a bandpass butterworth filter with zero-phase filtering
Args:
data: (np.array)
low_cut: (float) lower bound cutoff for high pass filter
high_cut: (float) upper bound cutoff for low pass filter
fs: (float) sampling frequency in Hz
axis: (int) axis to perform filtering.
order: (int) filter order for butterworth bandpass
Returns:
bandpass filtered data.
"""
nyq = 0.5 * fs
b, a = butter(order, [low_cut / nyq, high_cut / nyq], btype="band")
return filtfilt(b, a, data, axis=axis) | 5,358,123 |
def transform_and_normalize(vecs, kernel, bias):
"""应用变换,然后标准化
"""
if not (kernel is None or bias is None):
vecs = (vecs + bias).dot(kernel)
return vecs / (vecs**2).sum(axis=1, keepdims=True)**0.5 | 5,358,124 |
def mpesa_response(r):
"""
Create MpesaResponse object from requests.Response object
Arguments:
r (requests.Response) -- The response to convert
"""
r.__class__ = MpesaResponse
json_response = r.json()
r.response_description = json_response.get('ResponseDescription', '')
r.error_code = json_response.get('errorCode')
r.error_message = json_response.get('errorMessage', '')
return r | 5,358,125 |
def swap_flies(dataset, indices, flies1=0, flies2=1):
"""Swap flies in dataset.
Caution: datavariables are currently hard-coded!
Caution: Swap *may* be in place so *might* will alter original dataset.
Args:
dataset ([type]): Dataset for which to swap flies
indices ([type]): List of indices at which to swap flies.
flies1 (int or list/tuple, optional): Either a single value for all indices or a list with one value per item in indices. Defaults to 0.
flies2 (int or list/tuple, optional): Either a single value for all indices or a list with one value per item in indices. Defaults to 1.
Returns:
dataset with swapped indices ()
"""
for cnt, index in enumerate(indices):
if isinstance(flies1, (list, tuple)) and isinstance(flies2, (list, tuple)):
fly1, fly2 = flies1[cnt], flies2[cnt]
else:
fly1, fly2 = flies1, flies2
if 'pose_positions_allo' in dataset:
dataset.pose_positions_allo.values[index:, [fly2, fly1], ...] = dataset.pose_positions_allo.values[index:, [fly1, fly2], ...]
if 'pose_positions' in dataset:
dataset.pose_positions.values[index:, [fly2, fly1], ...] = dataset.pose_positions.values[index:, [fly1, fly2], ...]
if 'body_positions' in dataset:
dataset.body_positions.values[index:, [fly2, fly1], ...] = dataset.body_positions.values[index:, [fly1, fly2], ...]
return dataset | 5,358,126 |
def get_log_events(logGroupName=None, logStreamName=None, startTime=None, endTime=None, nextToken=None, limit=None, startFromHead=None):
"""
Lists log events from the specified log stream. You can list all the log events or filter using a time range.
By default, this operation returns as many log events as can fit in a response size of 1MB (up to 10,000 log events). If the results include tokens, there are more log events available. You can get additional log events by specifying one of the tokens in a subsequent call.
See also: AWS API Documentation
:example: response = client.get_log_events(
logGroupName='string',
logStreamName='string',
startTime=123,
endTime=123,
nextToken='string',
limit=123,
startFromHead=True|False
)
:type logGroupName: string
:param logGroupName: [REQUIRED]
The name of the log group.
:type logStreamName: string
:param logStreamName: [REQUIRED]
The name of the log stream.
:type startTime: integer
:param startTime: The start of the time range, expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC. Events with a timestamp earlier than this time are not included.
:type endTime: integer
:param endTime: The end of the time range, expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC. Events with a timestamp later than this time are not included.
:type nextToken: string
:param nextToken: The token for the next set of items to return. (You received this token from a previous call.)
:type limit: integer
:param limit: The maximum number of log events returned. If you don't specify a value, the maximum is as many log events as can fit in a response size of 1MB, up to 10,000 log events.
:type startFromHead: boolean
:param startFromHead: If the value is true, the earliest log events are returned first. If the value is false, the latest log events are returned first. The default value is false.
:rtype: dict
:return: {
'events': [
{
'timestamp': 123,
'message': 'string',
'ingestionTime': 123
},
],
'nextForwardToken': 'string',
'nextBackwardToken': 'string'
}
"""
pass | 5,358,127 |
def test_event_data(create_main_loop):
"""Test after_epoch and after_batch event args match the expectation."""
recording_hook = DataRecordingHook()
model, dataset, mainloop = create_main_loop(epochs=3, model_class=RecordingModel,
extra_hooks=[recording_hook], extra_streams=['valid'])
mainloop.run_training()
# check the epoch ids
assert recording_hook.epoch_ids == [1, 2, 3]
assert model.is_train_data == [True]*_DATASET_ITERS+[False]*_DATASET_ITERS+[True]*_DATASET_ITERS + \
[False]*_DATASET_ITERS+[True]*_DATASET_ITERS+[False]*_DATASET_ITERS
# now the model recorded its outputs as a list of all the batches regardless the stream and epoch, i.e.:
# [train_e1_b1, train_e1_b2, ..., valid_e1_b1, ... train_e2,b1, ...]
# while the DataRecordingHook has the following structure:
# {'train': [train_e1_b1, train_e1,b2, ..., train_e2,b1, ...], 'valid': [...]}
# we will convert the 'model structure' to the 'hook structure' so that they are comparable
def chunks(list_, size):
"""Split the given list_ into chunks of size consecutive elements."""
for i in range(0, len(list_), size):
yield list_[i:i + size]
output_data = model.output_data # pylint: disable=no-member
input_data = model.input_data # pylint: disable=no-member
model_outputs_by_stream_list = list(zip(*[(epoch[:len(epoch)//2], epoch[len(epoch)//2:])
for epoch in chunks(output_data, _DATASET_ITERS*2)]))
model_inputs_by_stream_list = list(zip(*[(epoch[:len(epoch)//2], epoch[len(epoch)//2:])
for epoch in chunks(input_data, _DATASET_ITERS*2)]))
model_outputs_by_stream = {'train': sum(model_outputs_by_stream_list[0], []),
'valid': sum(model_outputs_by_stream_list[1], [])}
model_inputs_by_stream = {'train': sum(model_inputs_by_stream_list[0], []),
'valid': sum(model_inputs_by_stream_list[1], [])}
# for all the streams
for stream_name in ['valid', 'train']:
assert stream_name in recording_hook.batch_data
io_data = zip(recording_hook.batch_data[stream_name],
model_outputs_by_stream[stream_name],
model_inputs_by_stream[stream_name],
dataset.batches[stream_name])
for hook_data, model_outputs, model_inputs, batches in io_data:
# check if the hook_data and model_inputs contain correct stream sources
for source_name in dataset.source_names:
assert source_name in hook_data
assert source_name in model_inputs
assert np.alltrue(hook_data[source_name] == batches[source_name])
assert np.alltrue(model_inputs[source_name] == batches[source_name])
# check if the hook_data contains correct model outputs
for output_name in model.output_names:
assert output_name in hook_data
assert np.alltrue(hook_data[output_name] == model_outputs[output_name]) | 5,358,128 |
def periodic_kernel(avetoas, log10_sigma=-7, log10_ell=2,
log10_gam_p=0, log10_p=0):
"""Quasi-periodic kernel for DM"""
r = np.abs(avetoas[None, :] - avetoas[:, None])
# convert units to seconds
sigma = 10**log10_sigma
l = 10**log10_ell * 86400
p = 10**log10_p * 3.16e7
gam_p = 10**log10_gam_p
d = np.eye(r.shape[0]) * (sigma/500)**2
K = sigma**2 * np.exp(-r**2/2/l**2 - gam_p*np.sin(np.pi*r/p)**2) + d
return K | 5,358,129 |
def emails(request):
"""
A view to send emails out to hunt participants upon receiving a valid post request as well as
rendering the staff email form page
"""
teams = Hunt.objects.get(is_current_hunt=True).real_teams
people = []
for team in teams:
people = people + list(team.person_set.all())
email_list = [person.user.email for person in people]
if request.method == 'POST':
email_form = EmailForm(request.POST)
if email_form.is_valid():
subject = email_form.cleaned_data['subject']
message = email_form.cleaned_data['message']
email_to_chunks = [email_list[x: x + 80] for x in range(0, len(email_list), 80)]
for to_chunk in email_to_chunks:
email = EmailMessage(subject, message, '[email protected]', [], to_chunk)
email.send()
return HttpResponseRedirect('')
else:
email_form = EmailForm()
context = {'email_list': (', ').join(email_list), 'email_form': email_form}
return render(request, 'email.html', add_apps_to_context(context, request)) | 5,358,130 |
def _stochastic_universal_sampling(parents: Population, prob_distribution: list, n: int):
"""
Stochastic universal sampling (SUS) algorithm. Whenever more than one sample is to be drawn from the distribution
the use of the stochastic universal sampling algorithm is preferred compared to roulette wheel algorithm.
Parameters
----------
:param parents: beagle.Population
Population from which n individuals are going to be selected.
:param prob_distribution: list
Cumulative probability distribution.
:param n: int
Length of the selected population.
Returns
-------
:return: list of beagle.Individual
Selected individuals.
Exceptions
-----------
:raise Exception
If the algorithm enters an infinite loop because random_num is greater than 1 an exception will occur.
"""
current_member, i = 0, 0
mating_pool = [None] * n
random_num = np.random.uniform(low=0, high=(1/n))
while current_member < n:
while random_num <= prob_distribution[i]:
mating_pool[current_member] = parents[i]
random_num += 1 / n
current_member += 1
if random_num > 1:
raise Exception(
'The SUS algorithm has entered an infinite loop. Verify that the selected population '
'sizes are suitable for this type of operator.')
i += 1
mating_pool = [deepcopy(individual) for individual in mating_pool] # Make a deepcopy of each selected individual
return mating_pool | 5,358,131 |
def format_exception_with_frame_info(e_type, e_value, e_traceback, shorten_filenames=False):
"""Need to suppress thonny frames to avoid confusion"""
_traceback_message = "Traceback (most recent call last):\n"
_cause_message = getattr(
traceback,
"_cause_message",
("\nThe above exception was the direct cause " + "of the following exception:") + "\n\n",
)
_context_message = getattr(
traceback,
"_context_message",
("\nDuring handling of the above exception, " + "another exception occurred:") + "\n\n",
)
def rec_format_exception_with_frame_info(etype, value, tb, chain=True):
# Based on
# https://www.python.org/dev/peps/pep-3134/#enhanced-reporting
# and traceback.format_exception
if etype is None:
etype = type(value)
if tb is None:
tb = value.__traceback__
if chain:
if value.__cause__ is not None:
yield from rec_format_exception_with_frame_info(None, value.__cause__, None)
yield (_cause_message, None, None, None)
elif value.__context__ is not None and not value.__suppress_context__:
yield from rec_format_exception_with_frame_info(None, value.__context__, None)
yield (_context_message, None, None, None)
if tb is not None:
yield (_traceback_message, None, None, None)
tb_temp = tb
for entry in traceback.extract_tb(tb):
assert tb_temp is not None # actual tb doesn't end before extract_tb
if "cpython_backend" not in entry.filename and (
not entry.filename.endswith(os.sep + "ast.py")
or entry.name != "parse"
or etype is not SyntaxError
):
fmt = ' File "{}", line {}, in {}\n'.format(
entry.filename, entry.lineno, entry.name
)
if entry.line:
fmt += " {}\n".format(entry.line.strip())
yield (fmt, id(tb_temp.tb_frame), entry.filename, entry.lineno)
tb_temp = tb_temp.tb_next
assert tb_temp is None # tb was exhausted
for line in traceback.format_exception_only(etype, value):
if etype is SyntaxError and line.endswith("^\n"):
# for some reason it may add several empty lines before ^-line
partlines = line.splitlines()
while len(partlines) >= 2 and partlines[-2].strip() == "":
del partlines[-2]
line = "\n".join(partlines) + "\n"
yield (line, None, None, None)
items = rec_format_exception_with_frame_info(e_type, e_value, e_traceback)
return list(items) | 5,358,132 |
def _add_simple_procparser(subparsers, name, helpstr, func, defname='proc',
xd=False, yd=False, dualy=False, other_ftypes=True):
"""Add a simple subparser."""
parser = _add_procparser(subparsers, name, helpstr, func, defname=defname)
_add_def_args(parser, xd=xd, yd=yd, dualy=dualy)
return parser | 5,358,133 |
def save_instrument_data_reference(msg_struct):
"""Checks to see if there is already an instrument data reference for this event code, and if there isn't, creates
one. Instrument data references are used to allow the servers to track which events are pertinent to a particular
instrument (some events are for all instruments, some only for specific instrument types). If an instrument data
reference is to be added, this function also determines whether the reference is 'special' or not. If there is an
entire special table devoted to the event (where 'description' is the table name), then it is classified as
'special'.
Parameters
----------
msg_struct: dictionary
Decoded version of msg, converted to python dictionary.
"""
db_refs = db.session.query(InstrumentDataReference)\
.filter(InstrumentDataReference.instrument_id == msg_struct['data']['instrument_id'])\
.filter(InstrumentDataReference.description == msg_struct['data']['description']).all()
if not db_refs:
special = "false"
# "special" indicates whether this particular data description has its own table
rows = db.session.execute('''SELECT column_name FROM information_schema.columns WHERE table_name = :table''',
dict(table=msg_struct['data']['description'])).fetchall()
if rows:
special = "true"
new_instrument_data_ref = InstrumentDataReference()
new_instrument_data_ref.instrument_id = msg_struct['data']['instrument_id']
new_instrument_data_ref.description = msg_struct['data']['description']
new_instrument_data_ref.special = special
db.session.add(new_instrument_data_ref)
db.session.commit()
EM_LOGGER.info("Saved new instrument data reference") | 5,358,134 |
def addSortMethod(handle, sortMethod):
"""A stub implementation of the xbmcplugin addSortMethod() function"""
return | 5,358,135 |
def get_volumes(fn):
"""Return number of volumes in nifti"""
return int(subprocess.check_output(['fslnvols', fn])) | 5,358,136 |
async def _execSubprocess(command: str) -> tuple[int, bytes]:
"""Execute a command and check for errors.
Args:
command (str): commands as a string
Returns:
tuple[int, bytes]: tuple of return code (int) and stdout (str)
"""
async with SEM:
process = await asyncio.create_subprocess_shell(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
out = await process.communicate()
exitCode = process.returncode
return exitCode, out[0] | 5,358,137 |
def submit_job(app_id, app_key, task_id, mimetype, text_col, dedupe):
"""Submit a job to the queue for the Celery worker. Create the required JSON message and post it to RabbitMQ."""
# These are the args that the Python function in the adjunct processor will use.
kwargs = {"app_id": app_id,
"app_key": app_key,
"task_id": task_id,
"format": mimetype,
"text_col": text_col,
"dedupe": dedupe,
"s3_endpoint": S3ENDPOINT,
"bucket": BUCKET,
"redis_port": REDISPORT,
"redis_host": REDISHOST}
# Recreate a celery message manually so that we don't need to import celery_tasks.py which has heavy dependencies.
job = {"id": task_id,
# "task": "synapsify_adjunct.celery_tasks.synapsify_master",
"task": "dc2_master",
"kwargs": kwargs}
# Connect to RabbitMQ and post.
conn = amqp.Connection(host=RMQHOST, port=RMQPORT, userid=RMQUSERNAME, password=RMQPASSWORD, virtual_host=RMQVHOST, insist=False)
cha = conn.channel()
msg = amqp.Message(json.dumps(job))
msg.properties["content_type"] = "application/json"
cha.basic_publish(routing_key=RMQEXCHANGE,
msg=msg)
cha.close()
conn.close() | 5,358,138 |
def comprspaces(*args):
"""
.. function:: comprspaces(text1, [text2,...]) -> text
This function strips (from the beginning and the end) and compresses
the spaces in its input.
Examples:
>>> table1('''
... ' an example with spaces ' 'another example with spaces '
... ''')
>>> sql("select comprspaces(a,b) from table1")
comprspaces(a,b)
--------------------------------------------------
an example with spaces another example with spaces
"""
if len(args) == 1:
return reduce_spaces.sub(' ', strip_remove_newlines.sub('', args[0]))
out=[]
for i in args:
o=reduce_spaces.sub(' ', strip_remove_newlines.sub('', i))
out+=[o]
return ' '.join(out) | 5,358,139 |
def find_available_port():
"""Find an available port.
Simple trick: open a socket to localhost, see what port was allocated.
Could fail in highly concurrent setups, though.
"""
s = socket.socket()
s.bind(('localhost', 0))
_address, port = s.getsockname()
s.close()
return port | 5,358,140 |
def redirect_std():
"""
Connect stdin/stdout to controlling terminal even if the scripts input and output
were redirected. This is useful in utilities based on termenu.
"""
stdin = sys.stdin
stdout = sys.stdout
if not sys.stdin.isatty():
sys.stdin = open_raw("/dev/tty", "r", 0)
if not sys.stdout.isatty():
sys.stdout = open_raw("/dev/tty", "w", 0)
return stdin, stdout | 5,358,141 |
def merge_deep(dct1, dct2, merger=None):
"""
Deep merge by this spec below
:param dct1:
:param dct2:
:param merger Optional merger
:return:
"""
my_merger = merger or Merger(
# pass in a list of tuples,with the
# strategies you are looking to apply
# to each type.
[
(list, ["append"]),
(dict, ["merge"])
],
# next, choose the fallback strategies,
# applied to all other types:
["override"],
# finally, choose the strategies in
# the case where the types conflict:
["override"]
)
return my_merger.merge(dct1, dct2) | 5,358,142 |
def step(
context, bind_to, data, title='', area=False, x_is_category=False,
labels=False, vertical_grid_line=False, horizontal_grid_line=False,
show_legend=True, zoom=False, group_tooltip=True, height=None,
width=None
):
"""Generates javascript code to show a 'step' chart.
Args:
context: Context of template.
bind_to: A string that specifics an HTML element (eg: id or class)
that chart will be shown in that. (like: '#chart')
data: It is dictinary that contains data of chart, some
informations about extra lines, grouping of data and
chart axis labels. eg:
{
'x': ['2017-5-19', '2017-5-20', '2017-5-21', '2017-5-22'],
'horizontal_lines': [40],
# 'vertical_lines': [40],
'data': [
{'title': 'A', 'values': [26, 35, 52, 34, 45, 74],
'color': '#FF34FF'},
# {'title': 'B', 'values': [54, 25, 52, 26, 20, 89]},
],
# 'groups': [('A', 'B')]
}
vertical_lines works just if x_is_category seted to False.
title: A string that will be shown on top of the chart.
area: It's a boolean option. If true, the area under the curve
will be colored.
x_is_category: It's a boolean option. If false, labels of X axis
will be considered as real number and sortable. (they will
be sorted automatically)
labels: It's a boolean option. If true, value of record will be
shown on column.
vertical_grid_line: It's boolean option, If true some vertical rows
will be drawn in chart. (grid lines)
horizontal_grid_line: It's boolean option, If true some horizontal
rows will be drawn in chart. (grid lines)
show_legend: It's boolean option, If false, legends of the chart
will be hidden.
zoom: It's boolean option, If true, end user can scroll on
chart to zoom in and zoom out.
group_tooltip: It's boolean option, If true, data of all records
in that point whill be shown to gather.
height: It's an integer option, it will determine heigth of chart
in pixel.
width: It's an integer option, it will determine width of chart
in pixel.
Returns:
A string contains chart js code and import code of C3 static files, if
it did not imported yet.
You can see structure of chart in chart_structur variable.
"""
# step chart structure in JS
chart_structur = (
'\n<script type="text/javascript">'
'\n var chart = c3.generate({'
'\n bindto: "%s",'
'\n data: {'
'\n x: %s,'
'\n columns: ['
'\n %s'
'\n ],'
'\n type : "%s",'
'\n colors: {'
'\n %s'
'\n },'
'\n groups: ['
'\n %s'
'\n ],'
'\n labels : %s'
'\n },'
'\n title: { text: "%s"},'
'\n axis: { x: { type: "%s" } },'
'\n grid: {'
'\n x: { show: %s ,lines: [%s] },'
'\n y: { show: %s ,lines: [%s] },'
'\n },'
'\n legend: { show: %s },'
'\n zoom: { enabled: %s },'
'\n tooltip: { grouped: %s },'
'\n size: { height: %s, width: %s }'
'\n });'
'\n</script>'
)
# convert parameters to strings to be acceptable in JS and C3 syntax.
if area:
_type = 'area-step'
else:
_type = 'step'
if x_is_category:
x_type = 'category'
else:
x_type = ''
if labels:
labels = 'true'
else:
labels = 'false'
if vertical_grid_line:
vertical_grid_line = 'true'
else:
vertical_grid_line = 'false'
if horizontal_grid_line:
horizontal_grid_line = 'true'
else:
horizontal_grid_line = 'false'
if show_legend:
show_legend = 'true'
else:
show_legend = 'false'
if zoom:
zoom = 'true'
else:
zoom = 'false'
if group_tooltip:
group_tooltip = 'true'
else:
group_tooltip = 'false'
if height is not None:
height = int(height)
else:
height = 'null'
if width is not None:
width = int(width)
else:
width = 'null'
# read horizontal line points from data
horizontal_lines = str()
if 'horizontal_lines' in data.keys():
for line in data['horizontal_lines']:
horizontal_lines = ''.join([horizontal_lines,
'{ value: %s}' % line, ','])
# read vertical line points from data
# raise an exception if x_is_category set to true and vertical_lines exists
vertical_lines = str()
if 'vertical_lines' in data.keys():
if x_is_category:
raise Exception(
"It's meaningless to use vertical_lines with x_is_category."
)
for line in data['vertical_lines']:
vertical_lines = ''.join(
[vertical_lines, '{ value: %s}' % line, ','])
# reads 'x' field of data and creates X axis labels.
# a hash is used to naming X axis labels
x_labels = str()
if 'x' in data.keys():
if x_is_category:
x_labels = data['x']
else:
x_labels = list(filter(lambda x: int(x), data['x']))
x_labels = ','.join([repr(str(label)) for label in x_labels])
x_labels = '["2d2014226823e74c2accfcce8e0ca141", %s],' % x_labels
x_label_list_name = '"2d2014226823e74c2accfcce8e0ca141"'
else:
x_labels = ''
x_label_list_name = "null"
# read records points to draw on chart
data_title_list = list()
chart_data = str()
for item in data['data']:
values = ','.join([str(v) for v in item['values']])
item_data = '["%s", %s], ' % (item['title'], values)
chart_data = ' '.join([chart_data, item_data])
data_title_list.append(item['title'])
# add X axis labels to chart data
chart_data = ''.join([chart_data, x_labels])
# read colors of data
chart_color = str()
for item in data['data']:
if 'color' in item.keys():
item_color = '"%s": "%s", ' % (item['title'], item['color'])
chart_color = ' '.join([chart_color, item_color])
# read grouping details of data
total_group_string = str()
if 'groups' in data.keys():
for group in data['groups']:
group_string = str()
for item in group:
# raise an exception if mentioned key were not exist in data
if item not in data_title_list:
raise ValueError("%s is not exists in your data!" % item)
group_string = ''.join([group_string, ',', repr(item)])
total_group_string = ''.join(
[total_group_string, '[', group_string, ']', ','])
# pass arguments to chart structure
chart = chart_structur % (
bind_to, x_label_list_name,
chart_data, _type, chart_color, total_group_string, labels,
title, x_type, vertical_grid_line, vertical_lines,
horizontal_grid_line, horizontal_lines, show_legend, zoom,
group_tooltip, height, width
)
# add import C3 elements to it, if it does not imported yet and return it.
if not ('import_js_c3' in context and context['import_js_c3']):
context['import_js_c3'] = True
return mark_safe('%s\n%s' % (import_c3(), chart))
else:
return mark_safe(chart) | 5,358,143 |
def get(user_request, url, **kwargs):
""" A wrapper of requests.get.
This method will automatically add user's session key as the cookie to enable sso
Sends a GET request. Returns :class:`Response` object.
:param user_request: The http request contains the authentication key and is triggered by user.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
_set_session_key(user_request, kwargs)
if debug:
log(user_request, url, "GET", kwargs=kwargs)
return requests.get(url, **kwargs) | 5,358,144 |
def get_baseline_y(line: PageXMLTextLine) -> List[int]:
"""Return the Y/vertical coordinates of a text line's baseline."""
if line_starts_with_big_capital(line):
return [point[1] for point in line.baseline.points if point[1] < line.baseline.bottom - 20]
else:
return [point[1] for point in line.baseline.points] | 5,358,145 |
def get_device_type(dev, num_try=1):
""" Tries to get the device type with delay """
if num_try >= MAX_DEVICE_TYPE_CHECK_RETRIES:
return
time.sleep(1) # if devtype is checked to early it is reported as 'unknown'
iface = xwiimote.iface(dev)
device_type = iface.get_devtype()
if not device_type or device_type == 'unknown':
return get_device_type(dev, num_try + 1)
return device_type | 5,358,146 |
def register_barrier():
"""So we don't have multiple jobs running simulateously"""
register_job(
user="vogels",
project="sgd",
experiment=experiment,
job="barrier",
priority=priority,
n_workers=16,
config_overrides={},
runtime_environment={"clone": {"code_package": code_package}, "script": "barrier.py"},
annotations={"description": description},
) | 5,358,147 |
def index():
"""
This is the grocery list.
Concatenates the ingredients from all the upcoming recipes
The ingredients dict that we pass to the template has this structure
{
"carrot": {
"g": 200,
"number": 4,
"str": "200g, 4number",
},
"salt": {
"g": 20,
"pinch": 3,
"str": "20g, 3pinch",
},
}
If two ingredients have the same unit, I add the quantities, but trying to
unify all the different ways of expressing ingredient units would be a lost cause.
We add the str key because doing formatting work in the template is so much fun
"""
recipes = Recipe.query.filter_by(upcoming=True)
ingredients = dict()
for recipe in recipes:
recipe_d = recipe.to_dict()
for ingredient in recipe_d["ingredients"]:
#worth changing the ingredients to a named tuple ?
#would be better at least here
name, unit, quantity = (ingredient["name"],
ingredient["unit"],
ingredient["quantity"])
quantity = quantity * recipe.upcoming_servings / recipe.servings
if name in ingredients:
if unit in ingredients[name]:
ingredients[name][unit] += quantity
else:
ingredients[name][unit] = quantity
else:
ingredients[name] = {
unit: quantity,
}
for name, d in ingredients.items():
s = ", ".join("{:g}{}".format(
round(quantity, 2), unit) for unit, quantity in d.items())
ingredients[name]["str"] = s
return render_template("grocery_list.html",
title="Grocery list",
recipes=recipes,
ingredients=ingredients) | 5,358,148 |
def test_Fit_MinFunc():
"""
There are times where I don't pass just a simple function to the fitting algorithm.
Instead I need to calculate the error myself and pass that to the model. This tests
that ability.
"""
init = {
'm': 20,
'b': -10
}
def func(X, *args):
vecLinear = np.vectorize(funcs.linear)
yThr = vecLinear(linearData['X'], *args)
return np.sqrt(np.sum((linearData['Y'] - yThr) ** 2))
LinMod = model(func)
LinMod.setParams(init)
LinMod.fit(linearData['X'], linearData['Y'])
results = LinMod.parameters.copy()
for key in linearParams.keys():
error = np.abs((results[key]-linearParams[key])/linearParams[key])*100
assert error < 15 | 5,358,149 |
def BOP(data):
"""
Balance of Power Indicator
:param pd.DataFrame data: pandas DataFrame with open, high, low, close data
:return pd.Series: with indicator data calculation results
"""
fn = Function('BOP')
return fn(data) | 5,358,150 |
def get_infinite(emnist_client_data, num_pseudo_clients):
"""Converts a Federated EMNIST dataset into an Infinite Federated EMNIST set.
Infinite Federated EMNIST expands each writer from the EMNIST dataset into
some number of pseudo-clients each of whose characters are the same but apply
a fixed random affine transformation to the original user's characters. The
distribution over affine transformation is approximately equivalent to the one
described at https://www.cs.toronto.edu/~tijmen/affNIST/. It applies the
following transformations in this order:
1. A random rotation chosen uniformly between -20 and 20 degrees.
2. A random shearing adding between -0.2 to 0.2 of the x coordinate to the
y coordinate (after centering).
3. A random scaling between 0.8 and 1.25 (sampled log uniformly).
4. A random translation between -5 and 5 pixels in both the x and y axes.
Args:
emnist_client_data: The `tff.simulation.ClientData` to convert.
num_pseudo_clients: How many pseudo-clients to generate for each real
client. Each pseudo-client is formed by applying a given random affine
transformation to the characters written by a given real user. The first
pseudo-client for a given user applies the identity transformation, so the
original users are always included.
Returns:
An expanded `tff.simulation.ClientData`.
"""
num_client_ids = len(emnist_client_data.client_ids)
return transforming_client_data.TransformingClientData(
raw_client_data=emnist_client_data,
make_transform_fn=_make_transform_fn,
num_transformed_clients=(num_client_ids * num_pseudo_clients)) | 5,358,151 |
def extractHavingSubDomain():
"""
[Passive] Sets the having_Sub_Domain feature after checking how many sub-domains the hostname has.
This number include the "www." prefix and the top level domain like ".com" or ".uk"
1) -1 if the hostname has more than 3 parts after splitting along '.' ie "www." + some name + ".com".
2) 1 if the hostname has 3 or fewer parts after splitting along '.'
"""
list = elements.hostname.split(".")
if len(list) > 3:
features["having_Sub_Domain"] = -1
else:
features["having_Sub_Domain"] = 1 | 5,358,152 |
def exec_crossover_once(ind1, ind2, cxpoint, n_degree):
"""single point crossover for two individuals
Parameters
----------
ind1: individual 1
ind2: individual 2
cxpoint: crossover point
n_degree: number of degree
"""
g1 = ind1.G
g2 = ind2.G
x = np.min(cxpoint)
y = np.max(cxpoint)
edge1 = set(sorted(ind1.G.edges))
edge2 = set(sorted(ind2.G.edges))
edge1_lower = {(i, j) for i, j in edge1 if i < x or (i == x and j <= y)}
edge1_upper = edge1 - edge1_lower
edge2_lower = {(i, j) for i, j in edge2 if i < x or (i == x and j <= y)}
edge2_upper = edge2 - edge2_lower
g1.remove_edges_from(edge1_lower)
g1.add_edges_from(edge2_lower)
g2.remove_edges_from(edge2_lower)
g2.add_edges_from(edge1_lower)
ind1.G = keep_regularity(g1, n_degree, random_state=0)
ind2.G = keep_regularity(g2, n_degree, random_state=0)
ind1.fitness = None
ind2.fitness = None | 5,358,153 |
def create_plot(df, title, carbon_unit, cost_unit, ylimit=None):
"""
:param df:
:param title: string, plot title
:param carbon_unit: string, the unit of carbon emissions used in the
database/model, e.g. "tCO2"
:param cost_unit: string, the unit of cost used in the database/model,
e.g. "USD"
:param ylimit: float/int, upper limit of y-axis; optional
:return:
"""
if df.empty:
return figure()
# Set up data source
source = ColumnDataSource(data=df)
# Determine column types for plotting, legend and colors
# Order of stacked_cols will define order of stacked areas in chart
x_col = "period"
line_col = "carbon_cap"
stacked_cols = ["in_zone_project_emissions", "import_emissions_degen"]
# Stacked Area Colors
colors = ["#666666", "#999999"]
# Set up the figure
plot = figure(
plot_width=800,
plot_height=500,
tools=["pan", "reset", "zoom_in", "zoom_out", "save", "help"],
title=title,
x_range=df[x_col]
# sizing_mode="scale_both"
)
# Add stacked bar chart to plot
bar_renderers = plot.vbar_stack(
stackers=stacked_cols,
x=x_col,
source=source,
color=colors,
width=0.5,
)
# Add Carbon Cap target line chart to plot
target_renderer = plot.circle(
x=x_col,
y=line_col,
source=source,
size=20,
color="black",
fill_alpha=0.2,
line_width=2,
)
# Create legend items
legend_items = [
("Project Emissions", [bar_renderers[0]]),
("Import Emissions", [bar_renderers[1]]),
("Carbon Target", [target_renderer]),
]
# Add Legend
legend = Legend(items=legend_items)
plot.add_layout(legend, "right")
plot.legend[0].items.reverse() # Reverse legend to match stacked order
plot.legend.click_policy = "hide" # Add interactivity to the legend
# Note: Doesn't rescale the graph down, simply hides the area
# Note2: There's currently no way to auto-size legend based on graph size(?)
# except for maybe changing font size automatically?
show_hide_legend(plot=plot) # Hide legend on double click
# Format Axes (labels, number formatting, range, etc.)
plot.xaxis.axis_label = "Period"
plot.yaxis.axis_label = "Emissions ({})".format(carbon_unit)
plot.yaxis.formatter = NumeralTickFormatter(format="0,0")
plot.y_range.end = ylimit # will be ignored if ylimit is None
# Add delivered RPS HoverTool
r_delivered = bar_renderers[0] # renderer for delivered RPS
hover = HoverTool(
tooltips=[
("Period", "@period"),
(
"Project Emissions",
"@%s{0,0} %s (@fraction_of_project_emissions{0%%})"
% (stacked_cols[0], carbon_unit),
),
],
renderers=[r_delivered],
toggleable=False,
)
plot.add_tools(hover)
# Add curtailed RPS HoverTool
r_curtailed = bar_renderers[1] # renderer for curtailed RPS
hover = HoverTool(
tooltips=[
("Period", "@period"),
(
"Import Emissions",
"@%s{0,0} %s (@fraction_of_import_emissions{0%%})"
% (stacked_cols[1], carbon_unit),
),
],
renderers=[r_curtailed],
toggleable=False,
)
plot.add_tools(hover)
# Add RPS Target HoverTool
hover = HoverTool(
tooltips=[
("Period", "@period"),
("Carbon Target", "@%s{0,0} %s" % (line_col, carbon_unit)),
(
"Marginal Cost",
"@carbon_cap_marginal_cost_per_emission{0,0} %s/%s"
% (cost_unit, carbon_unit),
),
],
renderers=[target_renderer],
toggleable=False,
)
plot.add_tools(hover)
return plot | 5,358,154 |
def get_app(name, **kwargs):
"""Returns an instantiated Application based on the name.
Args:
name (str): The name of the application
kwargs (dict): Keyword arguments used for application instantiation
Returns:
deepcell.applications.Application: The instantiated application
"""
name = str(name).lower()
app_map = dca.settings.VALID_APPLICATIONS
try:
return app_map[name]['class'](**kwargs)
except KeyError:
raise ValueError('{} is not a valid application name. '
'Valid applications: {}'.format(
name, list(app_map.keys()))) | 5,358,155 |
def test_subscriber__CreatorAnnotator__1(person):
"""`CreatorAnnotator` sets the creator on adding a person with browser."""
assert u'global editor' == IEditor(person).creator | 5,358,156 |
def worker_complete():
"""Complete worker."""
participant_id = request.args.get('participant_id')
if not participant_id:
return error_response(
error_type="bad request",
error_text='participantId parameter is required'
)
try:
_worker_complete(participant_id)
except KeyError:
return error_response(error_type='ParticipantId not found: {}'.format(participant_id))
return success_response(status="success") | 5,358,157 |
def combine(connected_events):
"""
Combine connected events into a graph.
:param connected_events: see polychronous.filter
:return: graph_of_connected_events
"""
graph_of_connected_events = nx.Graph()
graph_of_connected_events.add_edges_from(connected_events)
return (graph_of_connected_events) | 5,358,158 |
def set_config(**kwargs):
"""
Set SleepECG preferences and store them to the user configuration file.
If a value is `None`, the corresponding key is deleted from the user
configuration. See :ref:`configuration` for a list of possible
settings.
Parameters
----------
**kwargs: dict, optional
The configuration keys and values to set.
Examples
--------
>>> set_config(data_dir='~/.sleepecg/datasets')
"""
default_config = _read_yaml(_DEFAULT_CONFIG_PATH)
user_config = _read_yaml(_USER_CONFIG_PATH)
# validate all parameters before setting anything
for key, value in kwargs.items():
if key not in default_config:
options = ', '.join(default_config)
raise ValueError(f'Trying to set invalid config key: {key!r}, possible options: {options}') # noqa: E501
for key, value in kwargs.items():
if value is None:
user_config.pop(key, None)
else:
user_config[key] = value
_USER_CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True)
with open(_USER_CONFIG_PATH, 'w') as user_config_file:
yaml.dump(user_config, user_config_file) | 5,358,159 |
def calculate_hash(filepath, hash_name):
"""Calculate the hash of a file. The available hashes are given by the hashlib module. The available hashes can be listed with hashlib.algorithms_available."""
hash_name = hash_name.lower()
if not hasattr(hashlib, hash_name):
raise Exception('Hash algorithm not available : {}'\
.format(hash_name))
with open(filepath, 'rb') as f:
checksum = getattr(hashlib, hash_name)()
for chunk in iter(lambda: f.read(4096), b''):
checksum.update(chunk)
return checksum.hexdigest() | 5,358,160 |
def specify_run_step(
args: RunConfig,
aml_resources: AMLResources,
run_script_path: Path,
loop_config_class: Type[OptimizerConfig],
check_consistency: bool = True,
) -> Tuple[List[PythonScriptStep], List[PipelineData], Dict[str, List[str]], List[str]]:
"""
Create the pipeline step(s) to run the simulation.
Args:
aml_resources: an instance of AMLResources which contains the necessary information on
AML resources to instantiate pipeline steps
run_script_path: script that the run step should invoke
loop_config_class: (subclass of) OptimizerConfig that should be instantiated
check_consistency: whether to run data_and_simulation_are_consistent; normally we do, but
this may be set to False for tests that check other parts of this functionality.
Returns: A list of PythonScriptSteps, with one for each expansion, a list of output data locations in AML,
a dictionary of styled subsets for plotting, and a list of the temporary spec files that have been created
"""
# Expand config
selections_and_configs = list(load_resolutions_from_command(args, loop_config_class))
temp_spec_files = []
parallel_steps = []
all_run_outputs = []
styled_subsets: Dict[str, List[str]] = {}
# For each expansion, create a PythonScriptStep to run the simulator script.
num_selections = len(selections_and_configs)
for index, pair_list in enumerate(selections_and_configs, 1):
config0 = pair_list[0][1]
if (not check_consistency) or data_and_simulation_are_consistent(config0):
logging.info(
f"Config resolution {index} of {num_selections} will have {len(pair_list)} runs included in pipeline"
)
else: # pragma: no cover
logging.error(f"Dropping config resolution {index} of {num_selections} from pipeline")
continue
for config_dct, config in pair_list:
batch_strategy = config_dct["bayesopt"]["batch_strategy"]
acquisition = config_dct["bayesopt"]["acquisition"]
experiment_label = f"{batch_strategy} - {acquisition}"
# TODO: what about acquisition, optimization_strategy?
if batch_strategy not in styled_subsets:
styled_subsets[batch_strategy] = [experiment_label]
else:
styled_subsets[batch_strategy].append(experiment_label) # pragma: no cover
# set up the run configuration
aml_run_config = RunConfiguration(_name=f"Parallel run combination {config.resolution_spec}.{config.seed}")
aml_run_config.target = aml_resources.compute_target
aml_run_config.environment = aml_resources.env # type: ignore # auto
# create different versions of args for each combination
temp_config_path = spec_file_basename(config.resolution_spec, config.seed or 0, suffix="yml")
temp_spec_files.append(temp_config_path)
with Path(temp_config_path).open("w") as fp:
yaml.dump(config_dct, fp, Dumper=CustomDumper)
args.spec_file = temp_config_path
original_arg_list = sys.argv[1:]
simulator_args = original_arg_list
spec_file_index = simulator_args.index("--spec_file")
simulator_args[spec_file_index + 1] = temp_config_path
num_runs_index = simulator_args.index("--num_runs")
if isinstance(num_runs_index, int) and num_runs_index >= 0:
simulator_args[num_runs_index + 1] = "1" # pragma: no cover
else:
simulator_args += ["--num_runs", "1"]
# create PipelineData to consume the output of this step in the next (plotting) step
step_output = PipelineData(
name=f"outputs_batch_{config.resolution_spec}_{config.seed}",
output_name=f"outputs_batch_{config.resolution_spec}_{config.seed}",
datastore=aml_resources.datastore,
is_directory=True,
)
all_run_outputs += [step_output]
simulator_args += ["--output_dir", step_output]
step = PythonScriptStep(
script_name=str(run_script_path.absolute().relative_to(ROOT_DIR)),
source_directory=ROOT_DIR,
arguments=simulator_args,
outputs=[step_output],
compute_target=aml_resources.compute_target,
runconfig=aml_run_config,
)
parallel_steps.append(step)
return parallel_steps, all_run_outputs, styled_subsets, temp_spec_files | 5,358,161 |
def upgrade_to_blender2910(nodes):
"""
Blender 2.91 adds a new input to the BSDF node, emit strength in slot 18, moving the previous slot 18 up etc.
Anything that connect to slot 18 or above from before 2.91 will have it's slot number increased by one.
The input default values will also be updated to match
:param nodes: node tree as dict, nodes or groups
"""
_BSDF_node_names = []
print('Upgrading nodes to Blender 2.91...')
for n in nodes:
node = nodes[n]
if node['bl_idname'] == 'ShaderNodeBsdfPrincipled':
# Save the node name for so connections can be updated
_BSDF_node_names.append(node['name'])
# Shift the input default values slots up
for i in reversed(range(18, 22 + 1)):
nodes[n]['inputs'][str(i)] = node['inputs'][str(i-1)]
del nodes[n]['inputs']['18']
for n in nodes:
node = nodes[n]
try:
for output, targets in node['outputs'].items():
for name, ids in targets.items():
if name in _BSDF_node_names:
# increment if the slot is 18 or higher
if isinstance(ids, int) and ids >= 18:
nodes[n]['outputs'][output][name] = ids + 1
elif isinstance(ids, list):
tmp_ids = ids.copy()
for pos, i in enumerate(ids):
if i >= 18:
tmp_ids[pos] = i + 1
nodes[n]['outputs'][output][name] = tmp_ids
except KeyError:
print('No outputs in node: {}'.format(node['name']))
print('Nodes upgraded to comply with Blender 2.91') | 5,358,162 |
def default_mp_value_parameters():
"""Set the different default parameters used for mp-values.
Returns
-------
dict
A default parameter set with keys: rescale_pca (whether the PCA should be
scaled by variance explained) and nb_permutations (how many permutations to
calculate empirical p-value). Defaults to True and 100, respectively.
"""
params = {"rescale_pca": True, "nb_permutations": 100}
return params | 5,358,163 |
def change_auth_keys(server, user, auth_keys):
"""
update authorize keys. ath_keys is list of keys.
will get current auth_keys, remove keys with auth_tag, and add new
auth_keys with auth_tag.
return: if success, none. else, a dict: { stdout: xxx, stderr: yyy }
"""
auth_tag = os.getenv('AUTH_KEY_TAG')
retcode, out, err = get_auth_keys(server, user)
if retcode != 0:
return {'stdout': out, 'stderr': err}
current_keys = [x for x in out.strip().split('\n') if auth_tag not in x]
for key in auth_keys:
current_keys.append(f'{key} {auth_tag}')
retcode, out, err = set_auth_keys(server, user, ':'.join(current_keys))
if retcode != 0:
return {'stdout': out, 'stderr': err}
return 0, None, None | 5,358,164 |
def svn_wc_diff(*args):
"""
svn_wc_diff(svn_wc_adm_access_t anchor, char target, svn_wc_diff_callbacks_t callbacks,
void callback_baton,
svn_boolean_t recurse, apr_pool_t pool) -> svn_error_t
"""
return _wc.svn_wc_diff(*args) | 5,358,165 |
def plot3d_gmphd_track(model, grid, gm_s_list=None, gm_list_list=None,
observation_list=None, prediction_list=None, truth=None,
title=None, contours=4, log_plot=True):
""" Animate GM-PHD Filter result on 3D plot with mayavi
Args:
model (:obj:`pymrt.tracking.models.CVModel`):
grid (:obj:`numpy.ndarray`): 3D mesh generated by :func:`numpy.mgrid` or
:func:`numpy.meshgrid`.
gm_s_list (:obj:`list`): List of PHD scalars at each time step.
gm_list_list (:obj:`list`): List of Gaussian Mixtures at each time step.
If ``gm_s_list`` is None, it is used along with ``grid`` to generate
the PHD scalar at each time step.
observation_list (:obj:`list`): List of observations at each time step.
prediction_list (:obj:`list`): List of predictions at each time step.
truth (:obj:`tuple`): A tuple of truth states (by step)
title (:obj:`string`): Plot title.
contours (:obj:`int`): Number of contour surfaces to draw.
log_plot (:obj:`bool`): Plot ``gm_s`` in log scale.
"""
global frame
frame = 0
if gm_s_list is None:
if gm_list_list is None:
raise ValueError("Must provide 3D sampled GM scalar gm_s or a "
"Gaussian Mixture list")
else:
print('Sampling PHD in 3D space')
from ...tracking.utils import gm_calculate
gm_s_list = []
i = 0
for gm_list in gm_list_list:
sys.stdout.write('calculate gm_scalar for step %d' % i)
gm_s_list.append(gm_calculate(
gm_list=gm_list, grid=grid
))
i += 1
if title is None:
title = 'PHD'
print('Start Plotting with Mayavi')
class Controller(HasTraits):
run_calculation = Button('Next Frame')
view = View(Item(name='run_calculation'))
def _run_calculation_changed(self, value):
# action = ThreadedAction(self.data, self.figure)
# action.start()
global frame
print("Update 3D plots calculation in Frame %d" % frame, end=' ')
truth_points, obs_points, pred_points, contour = self.data
if log_plot:
contour_s = np.log(gm_s_list[frame] + np.finfo(np.float).tiny)
else:
contour_s = gm_s_list[frame]
contour.mlab_source.scalars = contour_s
for i in range(frame, max(0, frame-8), -1):
opacity = 1. - 0.1 * (frame - i)
truth_points[i].actor.property.opacity = opacity
obs_points[i].actor.property.opacity = opacity
pred_points[i].actor.property.opacity = opacity
print('done.')
mlab.draw()
frame += 1
@mayavi2.standalone
def mayavi_main():
"""Example showing how to view a 3D numpy array in mayavi2.
"""
figure = mlab.figure(title)
if log_plot:
contour_s = np.log(gm_s_list[frame] + np.finfo(np.float).tiny)
else:
contour_s = gm_s_list[frame]
contour = mlab.contour3d(
grid[0], grid[1], grid[2],
contour_s,
transparent=True,
opacity=0.5
)
truth_points, obs_points, pred_points = plot3d_data_preparation(
prediction=prediction_list,
observation=observation_list,
truth=truth,
model=model
)
for points in truth_points:
points.actor.property.opacity = 0.
for points in obs_points:
points.actor.property.opacity = 0.
for points in pred_points:
points.actor.property.opacity = 0.
truth_points[0].actor.property.opacity = 1.
obs_points[0].actor.property.opacity = 1.
pred_points[0].actor.property.opacity = 1.
mlab.colorbar(contour, title='PHD', orientation='vertical')
mlab.outline(contour)
plot_data = (truth_points, obs_points, pred_points, contour)
computation = Controller(data=plot_data, figure=figure)
computation.edit_traits()
mayavi_main() | 5,358,166 |
def _FindResourceIds(header, resource_names):
"""Returns the numerical resource IDs that correspond to the given resource
names, as #defined in the given header file."
"""
pattern = re.compile(
r'^#define (%s) _Pragma\S+ (\d+)$' % '|'.join(resource_names))
with open(header, 'r') as f:
res_ids = [ int(pattern.match(line).group(2))
for line in f if pattern.match(line) ]
if len(res_ids) != len(resource_names):
raise Exception('Find resource id failed: the result is ' +
', '.join(str(i) for i in res_ids))
return set(res_ids) | 5,358,167 |
def resolve_request_path(requested_uri):
"""
Check for any aliases and alter the path accordingly.
Returns resolved_uri
"""
for key, val in PATH_ALIASES.items():
if re.match(key, requested_uri):
return re.sub(key, val, requested_uri)
return requested_uri | 5,358,168 |
async def test_import_invalid_ip(hass: HomeAssistant) -> None:
"""Test that invalid IP error is handled during import."""
name = "Vallox 90 MV"
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={"host": "vallox90mv.host.name", "name": name},
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "invalid_host" | 5,358,169 |
def apim_api_delete(
client, resource_group_name, service_name, api_id, delete_revisions=None, if_match=None, no_wait=False):
"""Deletes an existing API. """
cms = client.api
return sdk_no_wait(
no_wait,
cms.delete,
resource_group_name=resource_group_name,
service_name=service_name,
api_id=api_id,
if_match="*" if if_match is None else if_match,
delete_revisions=delete_revisions if delete_revisions is not None else False) | 5,358,170 |
def deep_copy(obj):
"""Make deep copy of VTK object."""
copy = obj.NewInstance()
copy.DeepCopy(obj)
return copy | 5,358,171 |
def search_exception(n, num_incr=1000):
""" search for exception to perfect mirsky for
given n, checking pairs
checks in reversed order to check pairs with permutations
of less fixed points first
"""
eigvals = []
in_rad = np.cos(np.pi/n)
x_ranges, lines = pm_boundary(n)
for P in reversed(list(cycle_types(n))):
for Q in rev_symmetric_group(n):
vals = [val for t in np.linspace(0,1,num=num_incr)
for val in np.linalg.eigvals(t*P + (1-t)*Q)]
eigvals = list(filter(
lambda val: val.imag > 0 and val.real != 0 and
abs(val)>in_rad, vals))
if len(eigvals) > 0:
exc_lst = list(filter(lambda val: not in_region(val, x_ranges,lines),
eigvals))
if len(exc_lst) > 0:
print(f"EXCEPTION FOUND FOR n = {n}", "\n")
print("eigenvalue:", exc_lst[0], "\n")
print(P, "\n")
print(Q, "\n")
return
print(f"No exception: n = {n}") | 5,358,172 |
def get_dunn_index(fdist, *clusters):
"""
Returns the Dunn index for the given selection of nodes.
J.C. Dunn. Well separated clusters and optimal fuzzy
partitions. 1974. J.Cybern. 4. 95-104.
"""
if len(clusters)<2:
raise ValueError, "At least 2 clusters are required"
intra_dist = []
for c in clusters:
for i in c.get_leaves():
if i is not None:
# item intraclsuterdist -> Centroid Diameter
a = fdist(i.profile, c.profile)*2
intra_dist.append(a)
max_a = numpy.max(intra_dist)
inter_dist = []
for i, ci in enumerate(clusters):
for cj in clusters[i+1:]:
# intracluster dist -> Centroid Linkage
b = fdist(ci.profile, cj.profile)
inter_dist.append(b)
min_b = numpy.min(inter_dist)
if max_a == 0.0:
D = 0.0
else:
D = min_b / max_a
return D | 5,358,173 |
def sample(words, n=10) -> str:
"""Sample n random words from a list of words."""
return [random.choice(words) for _ in range(n)] | 5,358,174 |
def extract_peaks(
imzml_path,
db,
tol_ppm=DEFAULT_TOL_PPM,
tol_mode=DEFAULT_TOL_MODE,
base_mz=DEFAULT_BASE_MZ,
):
"""
Extract all peaks from the given imzML file for the supplied database of molecules.
:param imzml_path:
:param db: A pandas DataFrame containing an 'mz' column. Additional metadata columns are also allowed.
:param tol_ppm:
The maximum distance from a theoretical m/z to search for peaks. e.g. 3 means +/- 3ppm
:param tol_mode:
The model for adjusting tol_ppm based on the area of the mass range.
To match METASPACE, specify 'tof', which means 1ppm is always mz * 1e-6 (i.e. 1ppm at every mass)
See the `ppm_to_daltons` function for more examples.
:param base_mz:
The base m/z for tolerance calculations. Doesn't matter with 'tof'.
See the `ppm_to_daltons` function for more details.
:return:
coords_df - a DataFrame mapping spectrum idx to x,y values.
Needed for converting 'peaks_df' values to images
peaks - A list of dicts. Each dict contains:
'mol': A NamedTuple of the DB peak row. Access fields with e.g. peak['mol'].formula
'peaks_df': a DataFrame with one row per found peak. Columns:
'sp': Spectrum idx
'mz': m/z
'ints': Intensity value
"""
assert 'mz' in db.columns, 'db must have an "mz" column'
assert tol_mode in TOL_MODES, f'invalid tol_mode: {tol_mode}'
p = ImzMLParser(str(imzml_path))
coords_df = pd.DataFrame(p.coordinates, columns=['x', 'y', 'z'][:len(p.coordinates[0])], dtype='i')
coords_df['x'] -= np.min(coords_df.x)
coords_df['y'] -= np.min(coords_df.y)
mz_tol_lo, mz_tol_hi = tol_edges(db.mz, tol_ppm, tol_mode, base_mz)
# Uncomment this to add the tolerance boundaries to db for debugging:
# db['mz_tol_lo'], db['mz_tol_hi'] = mz_tol_lo, mz_tol_hi
mol_peaks = [[] for sp in range(len(coords_df))]
for sp, x, y in coords_df[['x', 'y']].itertuples(True, None):
mzs, ints = p.getspectrum(sp)
mz_range_lo = np.searchsorted(mzs, mz_tol_lo, 'left')
mz_range_hi = np.searchsorted(mzs, mz_tol_hi, 'right')
mask = mz_range_lo != mz_range_hi
for peak, idx_lo, idx_hi in zip(np.flatnonzero(mask), mz_range_lo[mask], mz_range_hi[mask]):
for i in range(idx_lo, idx_hi):
mol_peaks[peak].append((sp, mzs[i], ints[i]))
empty_peaks_df = pd.DataFrame({
'sp': pd.Series(dtype='i'),
'mz': pd.Series(dtype='f'),
'ints': pd.Series(dtype='f'),
})
result = [{
'mol': db_row,
'peaks_df': pd.DataFrame(peaks, columns=['sp', 'mz', 'ints']) if peaks else empty_peaks_df
} for db_row, peaks in zip(db.itertuples(), mol_peaks)]
return coords_df, result | 5,358,175 |
def cov_dense(n_features=100, scale=0.5,
edges='ones', pos=True, force_psd=True, random_state=None):
"""
Returns a covariance matrix with a constant diagonal and whose off diagnale elements are obtained from adj_mats.complete_graph()
Parameters
----------
n_features: int
scale: float
Scale of the off diagonal entries.
edges: str
How the edges should be sampled. See adj_mats.complete_graph()
pos: bool
Should the off-diagonal entries be all positive.
force_psd: bool
Make sure the covariance matrix is positive semi-definite zeroing out all negative eigenvalues.
random_state: None, int
Random seed for sampling.
Output
------
cov: array-like, (n_features, n_features)
The sampled covariance matrix.
"""
cov = complete_graph(n_nodes=n_features, edges=edges,
pos=pos, random_state=random_state)
cov = cov * scale
np.fill_diagonal(cov, 1.0)
if force_psd:
cov = project_psd(cov)
return cov | 5,358,176 |
def measure_fwhm(array):
"""Fit a Gaussian2D model to a PSF and return the FWHM
Parameters
----------
array : numpy.ndarray
Array containing PSF
Returns
-------
x_fwhm : float
FWHM in x direction in units of pixels
y_fwhm : float
FWHM in y direction in units of pixels
"""
yp, xp = array.shape
y, x, = np.mgrid[:yp, :xp]
p_init = models.Gaussian2D()
fit_p = fitting.LevMarLSQFitter()
fitted_psf = fit_p(p_init, x, y, array)
return fitted_psf.x_fwhm, fitted_psf.y_fwhm | 5,358,177 |
def test_not_impartial(g):
"""
Test that these games are not impartial.
"""
assert not g.is_impartial | 5,358,178 |
def exists(profile, bucket, name):
"""Check if a file exists in an S3 bucket.
Args:
profile
A profile to connect to AWS with.
bucket
The name of the bucket you want to find the file in.
name
The name of a file.
Returns:
True if it exists, False if it doesn't.
"""
result = fetch_by_name(profile, bucket, name)
return len(result) > 0 | 5,358,179 |
def fetchquota(adr):
"""Retrieves the account quota information and passes the interesting
part of the json object along to the request source.
Arguments:
adr (str): The email account address of interest.
Returns:
The quota part of the json object for the response.
"""
debuginfo("Fetching quota info for account.")
return apirequest("quota", {'emailaccount': adr})["response"]["quota"] | 5,358,180 |
def saslocal():
"""Set sasurl to local."""
set_sasurl(loc='local') | 5,358,181 |
def print_basic_csv(file_name, delimiter=','):
"""This function extracts and prints csv content from given filename
Details: https://docs.python.org/2/library/csv.html
Args:
file_name (str): file path to be read
delimiter (str): delimiter used in csv. Default is comma (',')
Returns:
None
"""
csv_rows = list()
csv_attr_dict = dict()
csv_reader = None
# read csv
csv_reader = csv.reader(open(file_name, 'r'), delimiter=delimiter)
# iterate and extract data
for row in csv_reader:
print(row)
csv_rows.append(row)
# prepare attribute lists
for col in csv_rows[0]:
csv_attr_dict[col]=list()
# iterate and add data to attribute lists
for row in csv_rows[1:]:
csv_attr_dict['sno'].append(row[0])
csv_attr_dict['fruit'].append(row[1])
csv_attr_dict['color'].append(row[2])
csv_attr_dict['price'].append(row[3])
# print the result
print("\n\n")
print("CSV Attributes::")
pprint(csv_attr_dict) | 5,358,182 |
def project_login(driver):
"""
針對多綫程執行設定不同樣本編號,若修改問卷,也許提供該問卷樣本編號的第一順位號碼。
"""
SAMPLE_NUMBER = 20200101+sample_add
try:
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.XPATH, '//*[@name="{}"][1]'
.format(str(SAMPLE_NUMBER))))).click() # 選擇樣本編號作答
sleep(1)
driver.find_element_by_class_name('btn.btn-blue').click() # 點擊開始訪問
print("STEP 3[project_login]: Project Login Successfully !")
form_basic_info(driver)
except NoSuchElementException:
driver.find_element_by_xpath('//*[@id="case_in_prj_next"]/a').click() # 若搜尋不到樣本編號則尋找下一頁按鈕
return project_login(driver)
else:
return "STEP 3[project_login]: Loading took too much time !" | 5,358,183 |
def set_directory_request_data(directory, key, value):
"""Allows to associate a value to a key on the data object of a request
:param directory: The name of the directory of the request to which the data will be associated
:type directory: str
:param key: The key to which the data will be associated
:type key: str
:param value: The value returned if the data does not exist
:type value: str
"""
normalize()
request_directory = normalize_request_directory(directory)
request_directory["data"][key] = value
write_dex() | 5,358,184 |
def convert_convolutionfunction_to_image(cf):
""" Convert ConvolutionFunction to an image
:param cf:
:return:
"""
return create_image_from_array(cf.data, cf.grid_wcs, cf.polarisation_frame) | 5,358,185 |
def macro(libname):
"""Decorator for macros (Moya callables)."""
def deco(f):
exposed_elements[libname] = f
return f
return deco | 5,358,186 |
def product(*generators):
"""generate the cartesian product of infinite generators."""
generators = list(map(GenCacher, generators))
for distance in itertools.count(0):
for idxs in _summations(distance, len(generators)):
yield tuple(gen[idx] for gen, idx in zip(generators, idxs)) | 5,358,187 |
def get_dataloaders(dataset, mode='train', root=None, shuffle=True, pin_memory=True,
batch_size=8, logger=logging.getLogger(__name__), normalize=False, **kwargs):
"""A generic data loader
Parameters
----------
dataset : {"openimages", "jetimages", "evaluation"}
Name of the dataset to load
root : str
Path to the dataset root. If `None` uses the default one.
kwargs :
Additional arguments to `DataLoader`. Default values are modified.
"""
pin_memory = pin_memory and torch.cuda.is_available # only pin if GPU available
Dataset = get_dataset(dataset)
if root is None:
dataset = Dataset(logger=logger, mode=mode, normalize=normalize, **kwargs)
else:
dataset = Dataset(root=root, logger=logger, mode=mode, normalize=normalize, **kwargs)
return DataLoader(dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=NUM_DATASET_WORKERS,
collate_fn=exception_collate_fn,
pin_memory=pin_memory) | 5,358,188 |
def ie():
"""Interest Expense Function"""
ww = float(input("for Short-Term Loan Press 1:\nfor Long-Term Loan Press 2:\nfor Bonds-Payable Press 3: "))
if(ww == 1):
e = float(input("Please Enter The Principal Value: "))
ew = float(input("Please Enter Interest rate %: "))
ea = float(input("Please Enter Time in Months: "))
cc = ew/100
v = (e * cc) * (ea /12)
l = round(v)
jj = float(l) + float(e)
oo = l / (ea * 30)
print ">> Your Interest Expense for ",ea,"Months is ",l
print ">> Total Amount Paid in Maturity Date is ",jj
print ">> Your Interest Expense Per Day is",oo
elif(ww == 2):
spp = float(input(" for Single Payment Loan Press 1\n for Installment Payment Loan Press 2: "))
if(spp == 1):
pv = float(input("Please Enter Principal Value: "))
ir = float(input("Please Enter Interest rate %: "))
lp = float(input("Please Enter The Loan Period (Years): "))
mp = (float(pv) * (float(ir) / float(100))) * (float(1) / float(12))
yp = float(mp) * float(12)
semi = float(yp)/float(2)
ap = float(yp) * float(lp)
md = float(ap) + float(pv)
print ">> Your Monthly Interest Expense is ",mp
print ">> Your Semi-Annual Interest Expense is ",semi
print ">> Your Interest Expense Per Year is ",yp
print ">> Total Interest will be Paid is ",ap
print ">> Principal Value at Maturity Date is ",md
elif(spp == 2):
pip = list(input("Please Enter Each Installment Payment: "))
iir = float(input("Please Enter Interest rate %: "))
su = sum(pip)
le = len(pip)
n = 0
tie = 0
while le != 0:
iex = (float(su)*(float(iir)/float(100)))*(float(1)/float(12))
sm = float(iex)*float(6)
an = float(iex)*float(12)
ey = pip[0 + n]
dr = float(ey)+float(an)
n += 1
le -= 1
tie += float(an)
tot = float(su)+float(tie)
print "Information for Installment no.",n,"with Value Of ",ey
print ">> Your Monthly Interest Expense is",iex
print ">> Your Semi-Annual Interest Expense is",sm
print ">> Your Annual Interest Expense is",an
print ">> Total Amount Will Be Paid for The Installment is",dr,"\n"
print ">> Total Interest Expense for The Loan is ",tie
print ">> Your Total Payment for The Loan is",tot
elif(ww == 3):
from accountingpy import bp
bp() | 5,358,189 |
def stats_file(filename, shape, dtype=None, file_format='raw',
out_of_core=True, buffer_size=None, max_memory=None,
progress_frequency=None):
"""stats_file(filename, shape, dtype=None, file_format='raw',
out_of_core=True, buffer_size=None, max_memory=None,
progress_frequency=None) -> StatsInfo object
returns a StatsInfo about the content of 'filename', which is a cube with 'shape'.
If 'out_of_core' (out-of-core) is True, process 'buffer_size' elements at a time.
"""
shape = Shape(shape)
filename = interpolate_filename(filename, shape=shape, file_format=file_format, dtype=dtype)
if out_of_core and file_format == 'raw':
stats_info = stats_info_out_of_core(filename, shape=shape, dtype=dtype,
buffer_size=buffer_size, max_memory=max_memory,
progress_frequency=progress_frequency)
else:
cube = read_cube(file=filename, shape=shape, dtype=dtype, file_format=file_format)
stats_info = StatsInfo.stats_info(cube)
return stats_info | 5,358,190 |
def get_info(args):
"""
Loads todo.txt, sets up file paths, loads in any available star information, saves the
relevant parameters for each of the two main routines and sets the plotting parameters.
Parameters
----------
args : argparse.Namespace
command-line arguments
parallel : bool
if pysyd will be running in parallel mode
CLI : bool, optional
if CLI is not being used (i.e. `False`), the modules draw default values from a different location
Returns
-------
args : argparse.Namespace
the updated command-line arguments
"""
# Get parameters for all modules
args = get_parameters(args)
# Get invidual/specific star info from csv file (if it exists)
args = get_csv_info(args)
if args.cli:
# Check the input variables
check_input_args(args)
args = get_command_line(args)
set_plot_params()
return args | 5,358,191 |
def test_isupport_getattr():
"""Test using ISUPPORT parameters as read-only attributes."""
instance = isupport.ISupport(awaylen=50)
assert hasattr(instance, 'AWAYLEN')
assert not hasattr(instance, 'awaylen'), 'attributes are ALL_UPPERCASE'
assert not hasattr(instance, 'UNKNOWN')
assert instance.AWAYLEN == 50
# you can't set attributes yourself
with pytest.raises(AttributeError):
instance.AWAYLEN = 20
with pytest.raises(AttributeError):
instance.awaylen = 20
with pytest.raises(AttributeError):
instance.UNKNOWN = 'not possible' | 5,358,192 |
def _weight_func(dist):
"""Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid."""
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide="ignore"):
retval = 1.0 / dist
return retval**2 | 5,358,193 |
def enable_logging(
debug=False, http_debug=False, path=None, stream=None,
format_stream=False,
format_template='%(asctime)s %(levelname)s: %(name)s %(message)s',
handlers=None,
):
"""Enable logging output.
Helper function to enable logging. This function is available for
debugging purposes and for folks doing simple applications who want an
easy 'just make it work for me'. For more complex applications or for
those who want more flexibility, the standard library ``logging`` package
will receive these messages in any handlers you create.
:param bool debug:
Set this to ``True`` to receive debug messages.
:param bool http_debug:
Set this to ``True`` to receive debug messages including
HTTP requests and responses. This implies ``debug=True``.
:param str path:
If a *path* is specified, logging output will written to that file
in addition to sys.stderr.
The path is passed to logging.FileHandler, which will append messages
the file (and create it if needed).
:param stream:
One of ``None `` or ``sys.stdout`` or ``sys.stderr``.
If it is ``None``, nothing is logged to a stream.
If it isn't ``None``, console output is logged to this stream.
:param bool format_stream:
If format_stream is False, the default, apply ``format_template`` to
``path`` but not to ``stream`` outputs. If True, apply
``format_template`` to ``stream`` outputs as well.
:param str format_template:
Template to pass to :class:`logging.Formatter`.
:rtype: None
"""
if not stream and not path:
stream = sys.stdout
if http_debug:
debug = True
if debug:
level = logging.DEBUG
else:
level = logging.INFO
formatter = logging.Formatter(format_template)
if handlers:
for handler in handlers:
handler.setFormatter(formatter)
else:
handlers = []
if stream is not None:
console = logging.StreamHandler(stream)
if format_stream:
console.setFormatter(formatter)
handlers.append(console)
if path is not None:
file_handler = logging.FileHandler(path)
file_handler.setFormatter(formatter)
handlers.append(file_handler)
setup_logging('openstack', handlers=handlers, level=level)
setup_logging('keystoneauth', handlers=handlers, level=level)
# Turn off logging on these so that if loggers higher in the tree
# are more verbose we only get what we want out of the SDK. This is
# particularly useful when combined with tools like ansible which set
# debug logging level at the logging root.
# If more complex logging is desired including stevedore debug logging,
# enable_logging should not be used and instead python logging should
# be configured directly.
setup_logging(
'urllib3', handlers=[logging.NullHandler()], level=logging.INFO)
setup_logging(
'stevedore', handlers=[logging.NullHandler()], level=logging.INFO)
# Suppress warning about keystoneauth loggers
setup_logging('keystoneauth.discovery')
setup_logging('keystoneauth.identity.base')
setup_logging('keystoneauth.identity.generic.base') | 5,358,194 |
def two_poles(time_limit=_DEFAULT_TIME_LIMIT, random=None,
environment_kwargs=None):
"""Returns the Cartpole Balance task with two poles."""
physics = Physics.from_xml_string(*get_model_and_assets(num_poles=2))
task = Balance(swing_up=True, sparse=False, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics, task, time_limit=time_limit, **environment_kwargs) | 5,358,195 |
def process_topic_entity(entity: dict, language: str) -> bool:
"""
Given a topic entity, gather its metadata
:param entity
:param language:
:type entity dict
:type language str
:returns bool
"""
try:
# Get ID
remote_id = entity["title"]
print("%s\t%s" % ("ID".ljust(16), remote_id))
# Get name from label
name = entity["labels"][language]["value"].lower()
print("%s\t%s" % ("name".ljust(16), name))
# Get brief
brief = entity["descriptions"][language]["value"].lower()
print("%s\t%s" % ("description".ljust(16), brief))
print_end()
except Exception as err:
print_err("%s error: %s" % (remote_id, err))
return False
return True | 5,358,196 |
def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float, b: float) -> Tensor:
"""Cut & paste from PyTorch official master until it's in a few official
releases - RW Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
Args:
tensor (Tensor):
An n-dimensional `Tensor`.
mean (float):
Mean of the normal distribution.
std (float):
Standard deviation of the normal distribution.
a (float):
Minimum cutoff value.
b (float):
Maximum cutoff value.
"""
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
error_console.log(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"Fdistribution of values may be incorrect.", stacklevel=2
)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill image with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor | 5,358,197 |
def inverse_update(C, m, return_drop=False):
"""
Compute the inverse of a matrix with the m-th row and column dropped given knowledge of the inverse of the original
matrix.
C = inv(A)
B = drop_col(drop_row(A, m),m)
computes inv(B) given only C
Args:
C: inverse of full matirix
m: row and col to drop
return_drop: whether to also return the array used to drop the m-th row/col.
Returns:
B
if return_drop:
the array to drop row/col using jnp.take(v, drop_array)
"""
drop = drop_array(C.shape[0], m)
_a = jnp.take(C, drop, axis=0) # drop m row
a = jnp.take(_a, drop, axis=1)
c = jnp.take(C, drop, axis=1)[None, m, :] # drop m col
b = _a[:, m, None]
d = C[m, m]
res = a - (b @ c) / d
if return_drop:
return res, drop
return res | 5,358,198 |
def loss_calc(settings, all_batch, market_batch):
""" Calculates nn's NEGATIVE loss.
Args:
settings: contains the neural net
all_batch: the inputs to neural net
market_batch: [open close high low] used to calculate loss
Returns:
cost: loss - l1 penalty
"""
loss = settings['nn'].loss_np(all_batch, market_batch)
return -loss | 5,358,199 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.