content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def test_mremove_misspelled_component(ac_dc_network, caplog):
"""
GIVEN the AC DC exemplary pypsa network.
WHEN a misspelled component is removed with mremove
THEN the function should not change anything in the Line component
dataframe and an error should be logged.
"""
network = ac_dc_network
len_lines = len(network.lines.index)
network.mremove("Liness", ["0", "1"])
assert len_lines == len(network.lines.index)
assert caplog.records[-1].levelname == "ERROR" | 400 |
def _f7(seq: Sequence) -> List:
"""order preserving de-duplicate sequence"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))] | 401 |
def mir_snp(fpath, mirna_fa, out_dir, transcript_fa, gtf,
flank=DEFAULT_FLANK, run_rnahybrid=True):
"""
fpath: variants file
mirna_fa: miRNA sequences, FASTA
out_dir: main output directory
transcript_fa: human genome exon region fasta,created by gffread
gtf: gtf
flank: flank sequence length from vairant position
"""
fpath = os.path.abspath(fpath)
out_dir = os.path.abspath(out_dir)
seq_dir = os.path.join(out_dir, 'seq')
if not os.path.exists(seq_dir):
os.makedirs(seq_dir)
print('##start {}'.format(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())))
print('='*40)
print('##input file: {}'.format(fpath))
print('##ref fasta: {}'.format(transcript_fa))
print('##gtf: {}'.format(gtf))
print('##miRNA fasta: {}'.format(mirna_fa))
print('##flank length: {}'.format(flank))
print('##output directory: {}'.format(out_dir))
print('='*40)
print('transform variants file: ')
variants_file = os.path.join(out_dir, 'variants.txt')
if not os.path.exists(variants_file):
trans_input(fpath, variants_file, gtf)
print('flank length: {}'.format(flank))
print('extract transcript sequence - ref: ')
ref_fa = os.path.join(seq_dir, "ref.fa")
fetch_transcript_fasta(variants_file, transcript_fa, ref_fa)
check_outputf(ref_fa)
print('extract transcript sequence - alt: ')
alt_log = os.path.join(seq_dir, "alt.log")
if not os.path.exists(alt_log):
run_sub_variant_bases(variants_file, ref_fa, seq_dir)
check_outputf(alt_log)
# run RNAhybrid
if run_rnahybrid:
if not os.path.exists(mirna_fa):
sys.exit('error: query miRNA fasta not found: {}'.format(mirna_fa))
for gp in _fa_groups:
for tp in _direct_groups:
target_fa = os.path.join(seq_dir, f"{gp}-flank{flank}bp-{tp}.fa")
output_pt = os.path.join(seq_dir, f"{gp}-flank{flank}bp-{tp}-pattern.txt")
if not os.path.exists(output_pt):
print('run RNAhybrid {} - {}'.format(gp, tp))
cmd = 'RNAhybrid -f 2,7 -e {} -s 3utr_human -t "{}" -q {} > "{}"'.format(DEFAULT_ENERGY, target_fa, mirna_fa, output_pt)
run_subshell(cmd)
trans_info = parse_gtf(gtf)
stats_dir = os.path.join(out_dir, 'stats')
if not os.path.exists(stats_dir):
os.mkdir(stats_dir)
df_lst = []
for tp, tpnm in _direct_groups.items():
ref_pt = os.path.join(seq_dir, f"ref-flank{flank}bp-{tp}-pattern.txt")
alt_pt = os.path.join(seq_dir, f"alt-flank{flank}bp-{tp}-pattern.txt")
print(f"parse RNAhybrid pattern, compare ref and alt - {tp}")
dfm = merge_refalt(ref_pt, alt_pt, stats_dir, trans_info, name=tp, td=tpnm)
df_lst.append(dfm)
result_file = os.path.join(out_dir, 'result.txt')
df_cat = pd.concat(df_lst, sort=False)
df_f1 = df_cat.index.to_frame()['flag'].str.split(':', expand=True)
df_f1.columns = ['target_name', 'miRNA', 'gene', 'strand', 'target_position']
df_f2 = df_f1['target_name'].str.split('_', expand=True)
df_f2.columns = ['transcript', 'rsid', 'chr', 'start_position', 'ref', 'alt', 'variant_position']
df_res = df_f2.join(df_f1).join(df_cat)
df_res.to_csv(result_file, sep='\t', index=False)
check_outputf(result_file)
print('##end {}'.format(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))) | 402 |
def get_config(cfg):
"""
Sets the hypermeters for the optimizer and experiment using the config file
Args:
cfg: A YACS config object.
"""
config_params = {
"train_params": {
"adapt_lambda": cfg.SOLVER.AD_LAMBDA,
"adapt_lr": cfg.SOLVER.AD_LR,
"lambda_init": cfg.SOLVER.INIT_LAMBDA,
"nb_adapt_epochs": cfg.SOLVER.MAX_EPOCHS,
"nb_init_epochs": cfg.SOLVER.MIN_EPOCHS,
"init_lr": cfg.SOLVER.BASE_LR,
"batch_size": cfg.SOLVER.TRAIN_BATCH_SIZE,
"optimizer": {
"type": cfg.SOLVER.TYPE,
"optim_params": {
"momentum": cfg.SOLVER.MOMENTUM,
"weight_decay": cfg.SOLVER.WEIGHT_DECAY,
"nesterov": cfg.SOLVER.NESTEROV
}
}
},
"data_params": {
"dataset_group": cfg.DATASET.NAME,
"dataset_name": cfg.DATASET.SOURCE + '2' + cfg.DATASET.TARGET,
"source": cfg.DATASET.SOURCE,
"target": cfg.DATASET.TARGET,
"size_type": cfg.DATASET.SIZE_TYPE,
"weight_type": cfg.DATASET.WEIGHT_TYPE
}
}
return config_params | 403 |
def get_nas_transforms():
""" Returns trajectory transformations for NAS. """
return [
PadActions(),
AsArray(),
RewardsAsValueTargets(),
TileValueTargets()
] | 404 |
def epoch_to_datetime(epoch):
"""
:param epoch: str of epoch time
:return: converted datetime type
"""
return datetime.datetime.fromtimestamp(float(epoch) / 1000) | 405 |
def qe_m4(px,mlmax,Talm=None,fTalm=None):
"""
px is a pixelization object, initialized like this:
px = pixelization(shape=shape,wcs=wcs) # for CAR
px = pixelization(nside=nside) # for healpix
output: curved sky multipole=4 estimator
"""
ells = np.arange(mlmax)
#prepare temperature map
rmapT=px.alm2map(np.stack((Talm,Talm)),spin=0,ncomp=1,mlmax=mlmax)[0]
#find tbarf
t_alm=cs.almxfl(fTalm,np.sqrt((ells-3.)*(ells-2.)*(ells-1.)*ells*(ells+1.)*(ells+2.)*(ells+3.)*(ells+4.)))
alms=np.stack((t_alm,t_alm))
rmap=px.alm2map_spin(alms,0,4,ncomp=2,mlmax=mlmax)
#multiply the two fields together
rmap=np.nan_to_num(rmap)
prodmap=rmap*rmapT
prodmap=np.nan_to_num(prodmap)
if not(px.hpix): prodmap=enmap.enmap(prodmap,px.wcs)
realsp2=prodmap[0] #spin +4 real space real space field
if not(px.hpix): realsp2 = enmap.enmap(realsp2,px.wcs)
#convert the above spin4 fields to spin pm 4 alms
res1 = px.map2alm_spin(realsp2,mlmax,4,4) #will return pm4
#spin 4 ylm
ttalmsp2=rot2dalm(res1,4)[0] #pick up the spin 4 alm of the first one
ttalmsm2=rot2dalm(res1,4)[1] #pick up the spin -4 alm of the second one
m4_alm=ttalmsp2+ttalmsm2
return m4_alm | 406 |
def recorded_run(self, run_id, tale_id):
"""Start a recorded run for a tale version"""
cli = docker.from_env(version='1.28')
run = self.girder_client.get('/run/{}'.format(run_id))
tale = self.girder_client.get('/tale/{}'.format(tale_id))
user = self.girder_client.get('/user/me')
def set_run_status(run, status):
self.girder_client.patch(
"/run/{_id}/status".format(**run), parameters={'status': status}
)
# UNKNOWN = 0 STARTING = 1 RUNNING = 2 COMPLETED = 3 FAILED = 4 CANCELLED = 5
set_run_status(run, RunStatus.STARTING)
self.job_manager.updateProgress(
message='Preparing volumes', total=RECORDED_RUN_STEP_TOTAL,
current=1, forceFlush=True)
# Create Docker volume
vol_name = "%s_%s_%s" % (run_id, user['login'], new_user(6))
mountpoint = _create_docker_volume(cli, vol_name)
# Create fuse directories
_make_fuse_dirs(mountpoint, ['data', 'workspace'])
# Mount data and workspace for the run
api_key = _get_api_key(self.girder_client)
session = _get_session(self.girder_client, version_id=run['runVersionId'])
if session['_id'] is not None:
_mount_girderfs(mountpoint, 'data', 'wt_dms', session['_id'], api_key, hostns=True)
_mount_girderfs(mountpoint, 'workspace', 'wt_run', run_id, api_key)
# Build the image for the run
self.job_manager.updateProgress(
message='Building image', total=RECORDED_RUN_STEP_TOTAL,
current=2, forceFlush=True)
# Setup image tag
build_time = int(time.time())
tag = '{}/{}/{}'.format(urlparse(DEPLOYMENT.registry_url).netloc,
run_id, str(build_time))
work_dir = os.path.join(mountpoint, 'workspace')
image = self.girder_client.get('/image/%s' % tale['imageId'])
env_json = _write_env_json(work_dir, image)
# TODO: What should we use here? Latest? What the tale was built with?
repo2docker_version = REPO2DOCKER_VERSION
print(f"Using repo2docker {repo2docker_version}")
# Build currently assumes tmp directory, in this case mount the run workspace
container_config = _get_container_config(self.girder_client, tale)
work_target = os.path.join(container_config.target_mount, 'workspace')
extra_volume = {
work_dir: {
'bind': work_target,
'mode': 'rw'
}
}
try:
print("Building mage for recorded run " + tag)
ret = _build_image(cli, tale_id, image, tag, work_target, repo2docker_version, extra_volume)
if ret['StatusCode'] != 0:
raise ValueError('Image build failed for recorded run {}'.format(run_id))
# TODO: Do we push the image? Delete it at the end?
self.job_manager.updateProgress(
message='Recording run', total=RECORDED_RUN_STEP_TOTAL,
current=3, forceFlush=True)
set_run_status(run, RunStatus.RUNNING)
_recorded_run(cli, mountpoint, container_config, tag)
set_run_status(run, RunStatus.COMPLETED)
self.job_manager.updateProgress(
message='Finished recorded run', total=RECORDED_RUN_STEP_TOTAL,
current=4, forceFlush=True)
except Exception as e:
logging.error("Recorded run failed. %s", e)
logging.exception(e)
set_run_status(run, RunStatus.FAILED)
finally:
# Remove the environment.json
os.remove(env_json)
# TODO: _cleanup_volumes
for suffix in ['data', 'workspace']:
dest = os.path.join(mountpoint, suffix)
logging.info("Unmounting %s", dest)
subprocess.call("umount %s" % dest, shell=True)
# Delete the session
try:
self.girder_client.delete('/dm/session/{}'.format(session['_id']))
except Exception as e:
logging.error("Unable to remove session. %s", e)
# Delete the Docker volume
try:
volume = cli.volumes.get(vol_name)
try:
logging.info("Removing volume: %s", volume.id)
volume.remove()
except Exception as e:
logging.error("Unable to remove volume [%s]: %s", volume.id, e)
except docker.errors.NotFound:
logging.info("Volume not present [%s].", vol_name) | 407 |
def seasonality_plot_df(m, ds):
"""Prepare dataframe for plotting seasonal components.
Parameters
----------
m: Prophet model.
ds: List of dates for column ds.
Returns
-------
A dataframe with seasonal components on ds.
"""
df_dict = {'ds': ds, 'cap': 1., 'floor': 0.}
for name in m.extra_regressors:
df_dict[name] = 0.
# Activate all conditional seasonality columns
for props in m.seasonalities.values():
if props['condition_name'] is not None:
df_dict[props['condition_name']] = True
df = pd.DataFrame(df_dict)
df = m.setup_dataframe(df)
return df | 408 |
async def response(request: DiscoveryRequest, xds_type: DiscoveryTypes, host: str = 'none'):
"""
A Discovery **Request** typically looks something like:
.. code-block:: json
{
"version_info": "0",
"node": {
"cluster": "T1",
"build_version": "<revision hash>/<version>/Clean/RELEASE",
"metadata": {
"auth": "..."
}
}
}
When we receive this, we give the client the latest configuration via a
Discovery **Response** that looks something like this:
.. code-block:: json
{
"version_info": "abcdef1234567890",
"resources": []
}
The version_info is derived from :func:`sovereign.discovery.version_hash`
:param request: An envoy Discovery Request
:param xds_type: what type of XDS template to use when rendering
:param host: the host header that was received from the envoy client
:return: An envoy Discovery Response
"""
template: XdsTemplate = XDS_TEMPLATES.get(request.envoy_version, default_templates)[xds_type]
context = make_context(
node_value=extract_node_key(request.node),
template=template,
)
# If the discovery request came from a mock, it will
# typically contain this metadata key.
# This means we should prevent any decryptable data
# from ending up in the response.
if request.node.metadata.get('hide_private_keys'):
context['crypto'] = disabled_suite
config_version = '0'
if config.cache_strategy == 'context':
config_version = version_hash(context, template.checksum, request.node.common, request.resources)
if config_version == request.version_info:
return {'version_info': config_version}
kwargs = dict(
discovery_request=request,
host_header=host,
resource_names=request.resources,
**context
)
if template.is_python_source:
content = {'resources': list(template.code.call(**kwargs))}
else:
content = await template.content.render_async(**kwargs)
if config.cache_strategy == 'content':
config_version = version_hash(content)
if config_version == request.version_info:
return {'version_info': config_version}
# This is the most expensive operation, I think, so it's performed as late as possible.
if not template.is_python_source:
content = deserialize_config(content)
content['version_info'] = config_version
return remove_unwanted_resources(content, request.resources) | 409 |
def count_inner_bags(content, start_color):
"""Count inner bags"""
rules = process_content(content)
bags = rules[start_color]
count = len(bags)
while len(bags) != 0:
new_bags = []
for bag in bags:
count += len(rules[bag])
new_bags += rules[bag]
bags = new_bags
return count | 410 |
def build_generation_data(
egrid_facilities_to_include=None, generation_years=None
):
"""
Build a dataset of facility-level generation using EIA923. This
function will apply filters for positive generation, generation
efficiency within a given range, and a minimum percent of generation
from the primary fuel (if set in the config file). The returned
dataframe also includes the balancing authority for every power
plant.
Parameters
----------
egrid_facilities_to_include : list, optional
List of plant codes to include (default is None, which builds a list)
generation_years : list, optional
Years of generation data to include in the output (default is None,
which builds a list from the inventories of interest and eia_gen_year
parameters)
Returns
----------
DataFrame
Dataframe columns include:
['FacilityID', 'Electricity', 'Year']
"""
if not generation_years:
# Use the years from inventories of interest
generation_years = set(
list(inventories_of_interest.values()) + [eia_gen_year]
)
df_list = []
for year in generation_years:
gen_fuel_data = eia923_download_extract(year)
primary_fuel = eia923_primary_fuel(gen_fuel_data)
gen_efficiency = calculate_plant_efficiency(gen_fuel_data)
final_gen_df = gen_efficiency.merge(primary_fuel, on="Plant Id")
if not egrid_facilities_to_include:
if include_only_egrid_facilities_with_positive_generation:
final_gen_df = final_gen_df.loc[
final_gen_df["Net Generation (Megawatthours)"] >= 0, :
]
if filter_on_efficiency:
final_gen_df = efficiency_filter(final_gen_df)
if filter_on_min_plant_percent_generation_from_primary_fuel and not keep_mixed_plant_category:
final_gen_df = final_gen_df.loc[
final_gen_df["primary fuel percent gen"]
>= min_plant_percent_generation_from_primary_fuel_category,
:,
]
# if filter_non_egrid_emission_on_NAICS:
# # Check with Wes to see what the filter here is supposed to be
# final_gen_df = final_gen_df.loc[
# final_gen_df['NAICS Code'] == '22', :
# ]
else:
final_gen_df = final_gen_df.loc[
final_gen_df["Plant Id"].isin(egrid_facilities_to_include), :
]
ba_match = eia860_balancing_authority(year)
ba_match["Plant Id"] = ba_match["Plant Id"].astype(int)
final_gen_df["Plant Id"] = final_gen_df["Plant Id"].astype(int)
final_gen_df = final_gen_df.merge(ba_match, on="Plant Id", how="left")
final_gen_df["Year"] = int(year)
df_list.append(final_gen_df)
all_years_gen = pd.concat(df_list)
all_years_gen = all_years_gen.rename(
columns={
"Plant Id": "FacilityID",
"Net Generation (Megawatthours)": "Electricity",
}
)
all_years_gen = all_years_gen.loc[:, ["FacilityID", "Electricity", "Year"]]
all_years_gen.reset_index(drop=True, inplace=True)
all_years_gen["Year"] = all_years_gen["Year"].astype("int32")
return all_years_gen | 411 |
def get_nessus_scans():
"""Return a paginated list of Nessus scan reports.
**Example request**:
.. sourcecode:: http
GET /api/1.0/analysis/nessus?page=1 HTTP/1.1
Host: do.cert.europa.eu
Accept: application/json
**Example response**:
.. sourcecode:: http
HTTP/1.0 200 OK
Content-Type: application/json
Link: <.../api/1.0/analysis/nessus?page=1&per_page=20>; rel="First",
<.../api/1.0/analysis/nessus?page=0&per_page=20>; rel="Last"
{
"count": 3,
"items": [
{
"created": "2016-03-21T16:52:52",
"id": 4,
"report": "...",
"type": "Nessus scan"
},
{
"created": "2016-03-21T16:51:49",
"id": 3,
"report": "...",
"type": "Nessus scan"
},
{
"created": "2016-03-20T17:09:03",
"id": 2,
"report": "...",
"type": "Nessus scan"
}
],
"page": 1
}
:reqheader Accept: Content type(s) accepted by the client
:resheader Content-Type: this depends on `Accept` header or request
:resheader Link: Describe relationship with other resources
:>json array items: Nessus scan reports
:>jsonarr integer id: Scan unique ID
:>jsonarr object report: Scan report
:>json integer page: Current page number
:>json integer count: Total number of items
:status 200: Reports found
:status 404: Resource not found
"""
return ApiPagedResponse(Report.query.filter_by(type_id=4)) | 412 |
def count_failures(runner):
"""Count number of failures in a doctest runner.
Code modeled after the summarize() method in doctest.
"""
try:
from doctest import TestResults
except:
from _doctest26 import TestResults
return [TestResults(f, t) for f, t in runner._name2ft.values() if f > 0 ] | 413 |
def fnv1_64(data, hval_init=FNV1_64_INIT):
"""
Returns the 64 bit FNV-1 hash value for the given data.
"""
return fnv(data, hval_init, FNV_64_PRIME, 2**64) | 414 |
def recongnize_image_file(args, image_file, output_dir):
"""
遍历图片文件
:param input_dir:
:return:
"""
temp_name = image_file.split('/')[-1].split('.')[0]
textract_json = None
label_file = os.path.join( output_dir, temp_name + '.txt')
#print("label_file ", label_file)
#print("output_dir {} label_file {}".format(output_dir, label_file))
if os.path.exists(label_file):
try:
textract_json = recongnize_sub_image_file(args, image_file, label_file, output_dir)
except Exception as exception:
print("【Error】 图片[{}] 没有解析成功 ".format(image_file))
print('【Error】 exception [{}]'.format(exception))
traceback.print_exc()
else:
print("【Error】 图片[{}] 没有生成对应的label文件 [{}]".format(image_file, label_file))
return textract_json | 415 |
def redirectLoggerStreamHandlers(oldStream, newStream):
"""Redirect the stream of a stream handler to a different stream
"""
for handler in list(logger.handlers): #Remove old handlers
if handler.stream == oldStream:
handler.close()
logger.removeHandler(handler)
for handler in logger.handlers: #Do not add a duplicate handler
if handler.stream == newStream:
return
logger.addHandler(logging.StreamHandler(newStream)) | 416 |
def parse(file_path, out):
"""
parse the header file:
- recursively call includes if not parsed yet
- write the rest of the file to the output file
"""
print("parsing: " + file_path)
with open(file_path) as f:
for line in f:
include_match = re.search('#include[\s]*<[a-zA-Z./_]*>', line)
pragma_once = re.search('#pragma once', line)
if (include_match is not None) and (pragma_once is None):
rel_path = FileNameFromInclude(include_match.group())
if (rel_path not in ignore_headers) and (rel_path not in dont_touch_headers):
header_path = path.join(include_path, rel_path)
parse(header_path, out)
ignore_headers.append(rel_path)
elif pragma_once is None:
out.write(line) | 417 |
def parse_data_sp(source_root, parallel_roots, glob_str="**/*.wav", add_source=False):
"""
assert that parallel_root wil contain folders of following structure:
PARALLEL_ROOT/record_17/IPhone 12 Pro Max/JBL CLIP3/distance=60-loudness=15-recording_mode=default/RELATIVE_PATH_TO_WAV_FROM_SOURCE
"""
data = defaultdict(list)
source_root = Path(source_root).resolve()
parallel_roots = [Path(parallel_root) for parallel_root in parallel_roots]
# print(parallel_roots)
_class_ind_maps = defaultdict(list)
source_pathes = list(source_root.glob(glob_str))
if add_source:
_class_ind_maps["spoofing"] = ["genuine", "spoof"]
for source_path in tqdm(source_pathes):
for parallel_root in parallel_roots:
playback_device = parallel_root.parts[-2].lower().replace(" ", "")
recording_device = parallel_root.parts[-3].lower().replace(" ", "")
# print(f"{playback_device}, {recording_device}")
if not (playback_device in _class_ind_maps["playback_device"]):
_class_ind_maps["playback_device"].append(playback_device)
if not (recording_device in _class_ind_maps["recording_device"]):
_class_ind_maps["recording_device"].append(recording_device)
source_rlp = source_path.relative_to(source_root)
parallel_path = parallel_root / source_rlp
if parallel_path.exists():
data[source_path].append({
"path": parallel_path,
"spoofing": "spoof",
"playback_device": playback_device,
"recording_device": recording_device
})
if add_source:
if len(data[source_path]) > 0:
data[source_path].insert(
0, {
"path": source_path,
"spoofing": "genuine",
"playback_device": None,
"recording_device": None
})
class_ind_maps = defaultdict(dict)
print(_class_ind_maps)
for task_name, task_classes in _class_ind_maps.items():
for cls_ind, cls_name in enumerate(sorted(task_classes)):
class_ind_maps[task_name][cls_name] = cls_ind
return data, class_ind_maps | 418 |
def heat_diffusion(A, t, L, k, eps=0.0001):
"""
Computes the heat diffusion equation
Parameters
----------
A : Tensor or SparseTensor
the (N,N,) density matrix
t : float
the diffusion time
L : Tensor or SparseTensor
the (N,N,) Laplacian matrix
k : Tensor
the (N,D,) initial heat tensor
eps : float (optional)
a regularizer value (default is 0.0001)
Returns
-------
Tensor
the (N,D,) heat tensor
"""
return poisson_equation(A+t*L, k, eps=eps) | 419 |
def help() :
"""print cache help"""
log.info(log.YELLOW +
"fips cache\n"
"fips cache [config]\n" + log.DEF +
" open the CMakeCache file with your default text editor") | 420 |
def unpack_ad_info(ad_info: dict, param_name: str) -> bytes:
"""Проверяет наличие ожидаемой структуры и возвращает значение."""
# Красиво не сработает, потому что применение условий должно быть последовательным
if (
isinstance(ad_info, dict)
and ad_info.get(param_name) # noqa: W503
and isinstance(ad_info[param_name], list) # noqa: W503
and isinstance(ad_info[param_name][0], bytes) # noqa: W503
):
return ad_info[param_name][0]
return None | 421 |
def _read_file(file, sheet_name=0):
"""
Helper function used to read the file and return a pandas dataframe.
Checks if file type is a .csv or excel. If not,
returns a ValueError.
Parameters
----------
file : str
the name of the file, including the filetype extension
sheet_name : int, optional
if passing an excel file, the name of the sheet to analyze,
by default 0
Returns
-------
pandas.Dataframe
pandas dataframe containing data from file
"""
if file.endswith('.csv'):
df = pd.read_csv(file)
else:
try:
df = pd.read_excel(file, sheet_name=sheet_name)
except XLRDError:
raise ValueError("Please use a valid csv or excel file.")
return df | 422 |
def get_school_total_students(school_id, aug_school_info):
"""
Gets total number of students associated with a school.
Args:
district_id (str): NCES ID of target district (e.g. '0100005').
aug_school_info (pandas.DataFrame): Target augmented school information
(as formatted by `auxiliary.data_handler.DataHandler`).
Returns:
int: Single number comprising school-level data.
"""
return int(aug_school_info.loc[school_id]["total_students"]) | 423 |
def loci_adjust(ds, *, group, thresh, interp):
"""LOCI: Adjust on one block.
Dataset variables:
hist_thresh : Hist's equivalent thresh from ref
sim : Data to adjust
"""
sth = u.broadcast(ds.hist_thresh, ds.sim, group=group, interp=interp)
factor = u.broadcast(ds.af, ds.sim, group=group, interp=interp)
with xr.set_options(keep_attrs=True):
scen = (factor * (ds.sim - sth) + thresh).clip(min=0)
return scen.rename("scen").to_dataset() | 424 |
def output_thread():
""" output thread function
for getting inference results from Movidius NCS
running graph specific post processing of inference result
queuing the results for main thread callbacks
"""
global gRunning
try:
while gRunning:
try:
inference_result, user_data = gGraph.GetResult()
gUpdateq.put((postprocess(inference_result), user_data))
except KeyError:
# This error occurs when GetResult can't access the user param from the graph, we're just ignoring it for now
#print("KeyError")
pass
except Exception as e:
print(e)
pass
print("Output thread terminating") | 425 |
def _ClientThread(client_ip, client_user, client_pass, mvip, username, password, purge):
"""delete the volumes for a client, run as a thread"""
log = GetLogger()
SetThreadLogPrefix(client_ip)
log.info("Connecting to client")
client = SFClient(client_ip, client_user, client_pass)
account_name = client.HostnameToAccountName()
cluster = SFCluster(mvip, username, password)
try:
match_volumes = cluster.SearchForVolumes(accountName=account_name)
except UnknownObjectError:
log.passed("Account is already deleted")
return True
if len(list(match_volumes.keys())) <= 0:
log.passed("No volumes to delete")
return True
log.info("Deleting {} volumes".format(len(list(match_volumes.keys()))))
cluster.DeleteVolumes(volumeIDs=list(match_volumes.keys()), purge=purge)
log.passed("Successfully deleted volumes") | 426 |
def objective(args: Namespace, trial: optuna.trial._trial.Trial) -> float:
"""Objective function for optimization trials.
Args:
args (Namespace): Input arguments for each trial (see `config/args.json`) for argument names.
trial (optuna.trial._trial.Trial): Optuna optimization trial.
Returns:
F1 score from evaluating the trained model on the test data split.
"""
# Paramters (to tune)
args.embedding_dim = trial.suggest_int("embedding_dim", 128, 512)
args.num_filters = trial.suggest_int("num_filters", 128, 512)
args.hidden_dim = trial.suggest_int("hidden_dim", 128, 512)
args.dropout_p = trial.suggest_uniform("dropout_p", 0.3, 0.8)
args.lr = trial.suggest_loguniform("lr", 5e-5, 5e-4)
# Train (can move some of these outside for efficiency)
logger.info(f"\nTrial {trial.number}:")
logger.info(json.dumps(trial.params, indent=2))
artifacts = run(args=args, trial=trial)
# Set additional attributes
args = artifacts["args"]
performance = artifacts["performance"]
logger.info(json.dumps(performance["overall"], indent=2))
trial.set_user_attr("threshold", args.threshold)
trial.set_user_attr("precision", performance["overall"]["precision"])
trial.set_user_attr("recall", performance["overall"]["recall"])
trial.set_user_attr("f1", performance["overall"]["f1"])
return performance["overall"]["f1"] | 427 |
def read_gdwarfs(file=_GDWARFALLFILE,logg=False,ug=False,ri=False,sn=True,
ebv=True,nocoords=False):
"""
NAME:
read_gdwarfs
PURPOSE:
read the spectroscopic G dwarf sample
INPUT:
logg= if True, cut on logg, if number, cut on logg > the number (>4.2)
ug= if True, cut on u-g, if list/array cut to ug[0] < u-g< ug[1]
ri= if True, cut on r-i, if list/array cut to ri[0] < r-i< ri[1]
sn= if False, don't cut on SN, if number cut on SN > the number (15)
ebv= if True, cut on E(B-V), if number cut on EBV < the number (0.3)
nocoords= if True, don't calculate distances or transform coordinates
OUTPUT:
cut data, returns numpy.recarray
HISTORY:
2011-07-08 - Written - Bovy@MPIA (NYU)
"""
raw= _load_fits(file)
#First cut on r
indx= (raw.field('dered_r') < 20.2)*(raw.field('dered_r') > 14.5)
raw= raw[indx]
#Then cut on g-r
indx= ((raw.field('dered_g')-raw.field('dered_r')) < 0.55)\
*((raw.field('dered_g')-raw.field('dered_r')) > .48)
raw= raw[indx]
#Cut on velocity errs
indx= (raw.field('pmra_err') > 0.)*(raw.field('pmdec_err') > 0.)\
*(raw.field('vr_err') > 0.)
raw= raw[indx]
#Cut on logg?
if (isinstance(logg,bool) and logg):
indx= (raw.field('logga') > 4.2)
raw= raw[indx]
elif not isinstance(logg,bool):
indx= (raw.field('logga') > logg)
raw= raw[indx]
if isinstance(ug,bool) and ug:
indx= ((raw.field('dered_u')-raw.field('dered_g')) < 2.)\
*((raw.field('dered_u')-raw.field('dered_g')) > .6)
raw= raw[indx]
if not isinstance(ug,bool):
indx= ((raw.field('dered_u')-raw.field('dered_g')) < ug[1])\
*((raw.field('dered_u')-raw.field('dered_g')) > ug[0])
raw= raw[indx]
if isinstance(ri,bool) and ri:
indx= ((raw.field('dered_r')-raw.field('dered_i')) < .4)\
*((raw.field('dered_r')-raw.field('dered_i')) > -.1)
raw= raw[indx]
elif not isinstance(ri,bool):
indx= ((raw.field('dered_r')-raw.field('dered_i')) < ri[1])\
*((raw.field('dered_r')-raw.field('dered_i')) > ri[0])
raw= raw[indx]
if (isinstance(sn,bool) and sn):
indx= (raw.field('sna') > 15.)
raw= raw[indx]
elif not isinstance(sn,bool):
indx= (raw.field('sna') > sn)
raw= raw[indx]
if isinstance(ebv,bool) and ebv:
indx= (raw.field('ebv') < .3)
raw= raw[indx]
elif not isinstance(ebv,bool):
indx= (raw.field('ebv') < ebv)
raw= raw[indx]
if nocoords: return raw
raw= _add_distances(raw)
raw= _add_velocities(raw)
return raw | 428 |
def add_item(category_slug=None):
"""
Add a new Item Form.
:param category_slug: The category slug
"""
# Get the current category using the slug
current_category = Category.where('slug', category_slug).first()
return render_template(
'items/add.html',
categories=Category.all(),
current_category=current_category
) | 429 |
def test_list_overlays_when_dir_missing(chdir_fixture): # NOQA
"""
This test simulates checking out a frozen dataset from Git that has no
overlays written to it, i.e. where the ``.dtool/overlays`` directory is
missing.
See also:
https://github.com/jic-dtool/dtoolcore/issues/3
"""
from dtoolcore import ProtoDataSet, generate_admin_metadata
from dtoolcore import DataSet
from dtoolcore.storagebroker import DiskStorageBroker
name = "my_dataset"
admin_metadata = generate_admin_metadata(name)
dest_uri = DiskStorageBroker.generate_uri(
name=name,
uuid=admin_metadata["uuid"],
base_uri="file://.")
sample_data_path = os.path.join(TEST_SAMPLE_DATA)
local_file_path = os.path.join(sample_data_path, 'tiny.png')
# Create a minimal dataset
proto_dataset = ProtoDataSet(
uri=dest_uri,
admin_metadata=admin_metadata,
config_path=None)
proto_dataset.create()
proto_dataset.put_item(local_file_path, 'tiny.png')
proto_dataset.freeze()
# Simulate the missing overlay directory.
assert os.path.isdir(proto_dataset._storage_broker._overlays_abspath)
os.rmdir(proto_dataset._storage_broker._overlays_abspath)
assert not os.path.isdir(proto_dataset._storage_broker._overlays_abspath)
dataset = DataSet.from_uri(proto_dataset.uri)
# This call caused the bug.
overlay_names = dataset.list_overlay_names()
assert overlay_names == [] | 430 |
def track_type(time, lat, tmax=1):
"""
Determines ascending and descending tracks.
Defines unique tracks as segments with time breaks > tmax,
and tests whether lat increases or decreases w/time.
"""
# Generate track segment
tracks = np.zeros(lat.shape)
# Set values for segment
tracks[0:np.argmax(np.abs(lat))] = 1
# Output index array
i_asc = np.zeros(tracks.shape, dtype=bool)
# Loop trough individual tracks
for track in np.unique(tracks):
# Get all points from an individual track
i_track, = np.where(track == tracks)
# Test tracks length
if len(i_track) < 2:
continue
# Test if lat increases (asc) or decreases (des) w/time
i_min = time[i_track].argmin()
i_max = time[i_track].argmax()
lat_diff = lat[i_track][i_max] - lat[i_track][i_min]
# Determine track type
if lat_diff > 0:
i_asc[i_track] = True
# Output index vector's
return i_asc, np.invert(i_asc) | 431 |
def get_installed_procnames():
"""Get a list of procs currently on the file system."""
return set(get_procs()) | 432 |
def check(model_opt: onnx.ModelProto, model_ori: onnx.ModelProto, n_times: int = 5) -> None:
"""
Warning: Some models (e.g., MobileNet) may fail this check by a small magnitude.
Just ignore if it happens.
:param model_opt: The simplified ONNX model
:param model_ori: The original ONNX model
:param n_times: Generate n random inputs
"""
onnx.checker.check_model(model_opt)
for _ in range(n_times):
rand_input = generate_rand_input(model_opt)
res_opt = forward(model_opt, inputs=rand_input)
res_ori = forward(model_ori, inputs=rand_input)
for name in res_opt.keys():
assert np.allclose(res_opt[name], res_ori[name], rtol=1e-4, atol=1e-5) | 433 |
def pam_bw_as_matrix(buff, border):
"""\
Returns the QR code as list of [0, 1] lists.
:param io.BytesIO buff: Buffer to read the matrix from.
:param int border: The QR code border
"""
res = []
data, size = _image_data(buff)
for i, offset in enumerate(range(0, len(data), size)):
if i < border:
continue
if i >= size - border:
break
row_data = bytearray(data[offset + border:offset + size - border])
# Invert bytes since PAM uses 0x0 = black, 0x1 = white
res.append([b ^ 0x1 for b in row_data])
return res | 434 |
def delete_task(task_id: int):
"""Remove task with associated ID from the database."""
send_to_login = ensure_login()
if send_to_login:
return send_to_login
else:
old_task = Task.delete(task_id)
flash(f'You deleted "{old_task.title}"', "info")
return redirect(url_for("task.view_task_list")) | 435 |
def KDPReboot(cmd_args=None):
""" Restart the remote target
"""
if "kdp" != GetConnectionProtocol():
print "Target is not connected over kdp. Nothing to do here."
return False
print "Rebooting the remote machine."
lldb.debugger.HandleCommand('process plugin packet send --command 0x13')
lldb.debugger.HandleCommand('detach')
return True | 436 |
def get_version(): # noqa: E501
"""API version
The API version # noqa: E501
:rtype: str
"""
return '1.0.0' | 437 |
def bert_evaluate(model, eval_dataloader, device):
"""Evaluation of trained checkpoint."""
model.to(device)
model.eval()
predictions = []
true_labels = []
data_iterator = tqdm(eval_dataloader, desc="Iteration")
for step, batch in enumerate(data_iterator):
input_ids, input_mask, labels = batch
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
with torch.no_grad():
outputs = model(input_ids, token_type_ids=None, attention_mask=input_mask)
#loss is only output when labels are provided as input to the model ... real smooth
logits = outputs[0]
print(type(logits))
logits = logits.to('cpu').numpy()
label_ids = labels.to('cpu').numpy()
for label, logit in zip(label_ids, logits):
true_labels.append(label)
predictions.append(np.argmax(logit))
#print(predictions)
#print(true_labels)
metrics = get_metrics(true_labels, predictions)
return metrics | 438 |
def test_sign_and_recover_message():
"""Test the signing and the recovery of a message."""
account = FetchAICrypto(FETCHAI_PRIVATE_KEY_PATH)
sign_bytes = account.sign_message(message=b"hello")
assert len(sign_bytes) > 0, "The len(signature) must not be 0"
recovered_addresses = FetchAIApi.recover_message(
message=b"hello", signature=sign_bytes
)
assert (
account.address in recovered_addresses
), "Failed to recover the correct address." | 439 |
def get_cached_scts(hex_ee_hash):
""" get_cached_scts returns previously fetched valid SCT from this certificate. The key to perform this search is
the hex-encoded hash of the end-entity certificate
:param hex_ee_hash: the hex-encoded hash of the end-entity certificate
:return: a dictionary of SCTs where the keys are the log URL
"""
c = dbconn.cursor()
c.execute('''
SELECT logs.log, scts.sct
FROM certs
INNER JOIN scts
ON certs.id = scts.cert_id
INNER JOIN logs
ON scts.log_id = logs.id
WHERE certs.ee_hash = ?
AND scts.valid = 1
''', (hex_ee_hash,))
return {
log: {'sct': sct, 'valid': True}
for (log, sct) in c.fetchall()
} | 440 |
def manual_init(board: Board, initial_cells: int):
"""
Click to add/remove cells.
Stop when you've added configured number.
"""
live_cells = 0
while live_cells < initial_cells:
print(f"Cells remaining to input {initial_cells - live_cells}")
grid = plt.imshow(board.binary_grid, cmap='binary')
click_input = plt.ginput(1, timeout=-1)
input_col = int(round(click_input[0][1], 0))
input_row = int(round(click_input[0][0], 0))
if board.is_cell_alive(input_col, input_row):
board.kill(input_col, input_row)
live_cells -= 1
else:
board.make_alive(input_col, input_row)
live_cells += 1
board.set_grid()
grid.set_data(board.binary_grid)
plt.draw() | 441 |
def set_gps_model(gps):
"""Updates current gps_model and publishes merged model."""
try:
g['gps_model'] = np.squeeze(
bridge.imgmsg_to_cv2(gps, desired_encoding='mono8'))
merge_and_publish()
except CvBridgeError:
rospy.loginfo("Error converting gps model to cv2")
raise | 442 |
def bin_to_hex() -> None:
"""Convierte un binario a hexadecimal."""
n = str_input("> ")
result = hex(int(n, 2))[2:]
textbox(str(result)) | 443 |
def all_gather_multigpu(
output_tensor_lists, input_tensor_list, group=None, async_op=False
):
"""
Gathers tensors from the whole group in a list.
Each tensor in ``tensor_list`` should reside on a separate GPU
Only nccl backend is currently supported
tensors should only be GPU tensors
Complex tensors are supported.
Args:
output_tensor_lists (List[List[Tensor]]): Output lists. It should
contain correctly-sized tensors on each GPU to be used for output
of the collective, e.g. ``output_tensor_lists[i]`` contains the
all_gather result that resides on the GPU of
``input_tensor_list[i]``.
Note that each element of ``output_tensor_lists`` has the size of
``world_size * len(input_tensor_list)``, since the function all
gathers the result from every single GPU in the group. To interpret
each element of ``output_tensor_lists[i]``, note that
``input_tensor_list[j]`` of rank k will be appear in
``output_tensor_lists[i][k * world_size + j]``
Also note that ``len(output_tensor_lists)``, and the size of each
element in ``output_tensor_lists`` (each element is a list,
therefore ``len(output_tensor_lists[i])``) need to be the same
for all the distributed processes calling this function.
input_tensor_list (List[Tensor]): List of tensors(on different GPUs) to
be broadcast from current process.
Note that ``len(input_tensor_list)`` needs to be the same for
all the distributed processes calling this function.
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
async_op (bool, optional): Whether this op should be an async op
Returns:
Async work handle, if async_op is set to True.
None, if not async_op or if not part of the group
"""
if _rank_not_in_group(group):
return
output_tensor_lists = [
[t if not t.is_complex() else torch.view_as_real(t) for t in l]
for l in output_tensor_lists
]
input_tensor_list = [
t if not t.is_complex() else torch.view_as_real(t) for t in input_tensor_list
]
if group is None:
default_pg = _get_default_group()
work = default_pg.allgather(output_tensor_lists, input_tensor_list)
else:
work = group.allgather(output_tensor_lists, input_tensor_list)
if async_op:
return work
else:
work.wait() | 444 |
def test_rejects_invalid_year() -> None:
"""It rejects invalid years."""
x = pd.Series(
[
"nan",
"x",
"1.2",
]
)
error = parse_year(x)
pd.testing.assert_series_equal(x, pd.Series(error["values"])) | 445 |
def get_uuid_from_str(input_id: str) -> str:
"""
Returns an uuid3 string representation generated from an input string.
:param input_id:
:return: uuid3 string representation
"""
return str(uuid.uuid3(uuid.NAMESPACE_DNS, input_id)) | 446 |
def extend_request(request_id=None, workload_id=None, lifetime=30):
"""
extend an request's lifetime.
:param request_id: The id of the request.
:param workload_id: The workload_id of the request.
:param lifetime: The life time as umber of days.
"""
return requests.extend_request(request_id=request_id, workload_id=workload_id, lifetime=lifetime) | 447 |
def from_bytes(buf: bytes) -> str:
"""Return MIME type from content in form of bytes-like type.
Example:
>>> import defity
>>> defity.from_bytes(b'some-binary-content')
'image/png'
"""
_guard_buf_arg(buf)
# We accept many input data types just for user's convenience. We still convert
# it to immutable bytes to pass down to Rust function.
return _mod.from_bytes(bytes(buf)) | 448 |
def pl__5__create_train_frame_sequences(ctvusts_by_tcp__lte_1, frame_sequences__by__tcpctvustsfs, train_tcpctvustsfs__gt__1):
"""
returns:
train_tcpctvustsfs__all
(
<TokenID>,
<CameraPerspective>,
<ASLConsultantID>,
<TargetVideoFilename>,
<UtteranceSequence>,
<TokenSequence>,
<FrameSequence>
)
"""
train__ctvusts_by_tcp__lte_1__keys = (
ctvusts_by_tcp__lte_1
| "Beam PL: extract ((TokenID,CameraPerspective,ASLConsultantID,TargetVideoFilename,UtteranceSequence,TokenSequence), '<ctvusts_by_tcp__lte_1_tpl__has_key>') for join to tcpctvustsfs" >> beam.Map(
lambda ctvusts_by_tcp__lte_1_tpl : (
(
ctvusts_by_tcp__lte_1_tpl[0], # TokenID
ctvusts_by_tcp__lte_1_tpl[1], # CameraPerspective
ctvusts_by_tcp__lte_1_tpl[2], # ASLConsultantID
ctvusts_by_tcp__lte_1_tpl[3], # TargetVideoFilename
ctvusts_by_tcp__lte_1_tpl[4], # UtteranceSequence
ctvusts_by_tcp__lte_1_tpl[5] # TokenSequence
),
"<ctvusts_by_tcp__lte_1_tpl__has_key>"
)
)
)
train_tcpctvustsfs__lte_1 = (
({
'has_key': train__ctvusts_by_tcp__lte_1__keys,
'frame_sequences': frame_sequences__by__tcpctvustsfs
})
| "Beam PL: join ctvusts_by_tcp__lte_1 to tcpctvustsfs" >> beam.CoGroupByKey()
# the above produces tuples of the form:
# (
# (
# <TokenID>,
# <CameraPerspective>,
# <ASLConsultantID>,
# <TargetVideoFilename>,
# <UtteranceSequence>,
# <TokenSequence>
# ),
# {
# 'has_key': listof('<ctvusts_by_tcp__lte_1_tpl__has_key>'), # should have only one/single element
# 'frame_sequences': listof(<FrameSequence>) # many
# }
# )
| "Beam PL: filter out mismatches from joined train__ctvusts_by_tcp__lte_1 to tcpctvustsfs" >> beam.Filter(
lambda joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl:
len(joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[1]['has_key'])>0 and \
len(joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[1]['frame_sequences'])>0
)
| "Beam PL: 'explode' listof(<FrameSequence>) from joined train__ctvusts_by_tcp__lte_1 to tcpctvustsfs to list of tuples" >> beam.Map(
lambda joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl: [
(
joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[0][0], # TokenID
joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[0][1], # CameraPerspective
joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[0][2], # ASLConsultantID
joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[0][3], # TargetVideoFilename
joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[0][4], # UtteranceSequence
joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[0][5], # TokenSequence
frame_seq
) for frame_seq in sorted(joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl[1]['frame_sequences'])
]
)
| "Beam PL: 'explode' listof((TokenID,CameraPerspective,ASLConsultantID,TargetVideoFilename,UtteranceSequence,TokenSequence, FrameSequence)) from joined ttrain__ctvusts_by_tcp__lte_1 to tcpctvustsfs" >> beam.FlatMap(
lambda list_joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl: list_joined__train__ctvusts_by_tcp__lte_1__to__tcpctvustsfs__tpl
)
)
train_tcpctvustsfs__all = (
(train_tcpctvustsfs__gt__1, train_tcpctvustsfs__lte_1)
| f"Beam PL: merge train_tcpctvustsfs__gt__1 with train_tcpctvustsfs__lte_1" >> beam.Flatten()
)
return train_tcpctvustsfs__all | 449 |
def createList(value, n):
"""
@param value: value to initialize the list
@param n: list size to be created
@return: size n list initialized to value
"""
return [value for i in range (n)] | 450 |
def label_class_num(label):
"""
标签的种类
:param label:
:return:
"""
return label.shape[1] | 451 |
def heur(puzzle, item_total_calc, total_calc):
"""
Heuristic template that provides the current and target position for each number and the
total function.
Parameters:
puzzle - the puzzle
item_total_calc - takes 4 parameters: current row, target row, current col, target col.
Returns int.
total_calc - takes 1 parameter, the sum of item_total_calc over all entries, and returns int.
This is the value of the heuristic function
"""
t = 0
for row in range(3):
for col in range(3):
val = puzzle.peek(row, col) - 1
target_col = val % 3
target_row = val / 3
# account for 0 as blank
if target_row < 0:
target_row = 2
t += item_total_calc(row, target_row, col, target_col)
return total_calc(t) | 452 |
def make_argparse_help_safe(s):
"""Make strings safe for argparse's help.
Argparse supports %{} - templates. This is sometimes not needed.
Make user supplied strings safe for this.
"""
return s.replace('%', '%%').replace('%%%', '%%') | 453 |
def _file_content_hash(file_name, encoding, database=None, newline=None):
"""
Returns the file content as well as the hash of the content
Use the database to keep a persistent cache of the last content
hash. If the file modification date has not changed assume the
hash is the same and do not re-open the file.
"""
if database is None:
content = read_file(file_name, encoding=encoding, newline=newline)
return content, hash_string(content)
key = f"cached._file_content_hash({file_name!s}, newline={newline!s})".encode()
if key not in database:
content = read_file(file_name, encoding=encoding, newline=newline)
content_hash = hash_string(content)
timestamp = os.path.getmtime(file_name)
database[key] = timestamp, content_hash
return content, content_hash
timestamp = os.path.getmtime(file_name)
last_timestamp, last_content_hash = database[key]
if timestamp != last_timestamp:
content = read_file(file_name, encoding=encoding, newline=newline)
content_hash = hash_string(content)
database[key] = timestamp, content_hash
return content, content_hash
return None, last_content_hash | 454 |
def leaderboard(avatars, usernames, levels):
"""
Draw the leaderboard. Return the path of the image.
The path points to a temporary file with extension png. The caller
is responsible for removing this temporary file.
avatars is 10 top users' avatar images. It should be a list of
BytesIO or path openable by PIL.Image.open()
usernames is 10 top users' usernames.
levels is 10 top users' levels.
"""
template = Image.open("assets/leaderboard_template.png")
canvas = ImageDraw.Draw(template)
iterator = enumerate(zip(avatars, usernames, levels))
for i, (avatar, username, level) in iterator:
offset_y = 75 * i
avatar_img = Image.open(avatar).resize((66, 66))
template.paste(avatar_img, (5, 99 + offset_y))
template.paste(AVATAR_MASK_66, (5, 99 + offset_y), AVATAR_MASK_66)
canvas.text((175, 113 + offset_y), username, font=UBUNTU_31)
canvas.text((565, 115 + offset_y), f"Level: {level}", font=UBUNTU_25)
fd, filename = tempfile.mkstemp(suffix=".png")
os.close(fd)
template.save(filename)
template.close()
return filename | 455 |
def create_pysm_commands(
mapfile,
nside,
bandcenter_ghz,
bandwidth_ghz,
beam_arcmin,
coord,
mpi_launch,
mpi_procs,
mpi_nodes,
):
"""
Return lines of shell code to generate the precomputed input sky map.
"""
mpistr = "{}".format(mpi_launch)
if mpi_procs != "":
mpistr = "{} {} 1".format(mpistr, mpi_procs)
if mpi_nodes != "":
mpistr = "{} {} 1".format(mpistr, mpi_nodes)
outstr = "# Create sky model\n"
outstr = '{}if [ ! -e "{}" ]; then\n'.format(outstr, mapfile)
outstr = '{} echo "Creating sky model {} ..."\n'.format(outstr, mapfile)
outstr = '{} {} ./pysm_sky.py --output "{}" --nside {} --bandcenter_ghz {} --bandwidth_ghz {} --beam_arcmin {} --coord {}\n'.format(
outstr,
mpistr,
mapfile,
nside,
bandcenter_ghz,
bandwidth_ghz,
beam_arcmin,
coord,
)
outstr = "{}fi\n".format(outstr)
outstr = "{}\n".format(outstr)
return outstr | 456 |
def edits1(word):
""" All edits that are one edit away from `word`. """
letters = 'abcdefghijklmnopqrstuvwxyz'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts) | 457 |
def test_umlaut_update_client_description():
"""
Tests umlaut on description string.
"""
pm = MockPymill('key')
clientid = "client_12345"
pm.update_client(client_id = clientid, email="[email protected]", description='Ümläüt')
assert pm.api_called
assert pm.call_args['params'].get('description') == 'Ümläüt' | 458 |
def process_text_cn(text: str):
"""中文文本处理"""
text = del_white_chars(text)
text = sub_punctuation(text)
return text | 459 |
def total_elastic_cross_section_browning1994_cm2(atomic_number, energy_keV):
"""
From browning1994
Valid in the range 100 eV to 30 keV for elements 1 to 92.
"""
Z = atomic_number
E = energy_keV
factor = 3.0e-18
power_z = math.pow(Z, 1.7)
power_e = math.pow(E, 0.5)
nominator = factor*power_z
denominator = E + 0.005 * power_z * power_e + 0.0007 * Z * Z / power_e
cross_section_cm2 = nominator/denominator
return cross_section_cm2 | 460 |
def connector_setup():
"""
Fill the sandbox with fresh data by sending simple text-only messages.
"""
config = Config
now = datetime.datetime.now().timestamp()
if config.get_test_data()["last_update_timestamp"] + config.min_update_interval <= now:
data_filler: DataFiller = DataFiller(
from_emails=config.senders, to=config.recipients, number_of_messages=config.messages_per_sender
)
data_filler.fill()
config.update_timestamp(now)
yield | 461 |
def includeme(config):
"""
Initialize the model for a Pyramid app.
Activate this setup using ``config.include('datameta.models')``.
"""
settings = config.get_settings()
settings['tm.manager_hook'] = 'pyramid_tm.explicit_manager'
# use pyramid_tm to hook the transaction lifecycle to the request
config.include('pyramid_tm')
# use pyramid_retry to retry a request when transient exceptions occur
config.include('pyramid_retry')
session_factory = get_session_factory(get_engine(settings))
config.registry['dbsession_factory'] = session_factory
# make request.dbsession available for use in Pyramid
config.add_request_method(
# r.tm is the transaction manager used by pyramid_tm
lambda r: get_tm_session(session_factory, r.tm),
'dbsession',
reify=True
) | 462 |
def plot_fancy(nodes, elems, phi=None, charge=None, u=None, charge_max=None,
show=False, save=None, num_intp=100, title=None, clabel=None,
animation_mode=True, latex=False):
""" Plots fancily. """
if animation_mode:
fig = Figure(colorbar=False, tight_layout=True, show=show,
xlabel="", ylabel="", save=save, ticks=False, latex=latex)
else:
fig = Figure(colorbar=True, tight_layout=False, show=show,
xlabel=tex_escape("x"), ylabel=tex_escape("y"),
save=save, ticks=True, latex=latex)
if phi is None:
phi = -np.ones(len(nodes))
if charge is None:
charge = np.zeros(len(nodes))
if charge_max is None:
charge_max = max(np.max(np.abs(charge)), 1e-10)
cmap = plt.cm.get_cmap('Greys')
cmap._init()
cmap._lut[:, :] = 0.
length = len(cmap._lut[:, -1])
# cmap._lut[:, -1] = np.linspace(0., 1.0, length)
cmap._lut[:length//2, -1] = 0.
cmap._lut[length//2:, -1] = 1.
phi[phi > 1.] = 1.
phi[phi < -1.] = -1.
plt.tripcolor(nodes[:, 0], nodes[:, 1], elems, charge,
cmap=plt.get_cmap("coolwarm"), shading="gouraud",
vmin=-charge_max, vmax=charge_max)
plt.tricontourf(nodes[:, 0], nodes[:, 1], elems, phi,
cmap=cmap, levels=[-2.0, 0., 2.0], antialiased=True)
if u is not None:
Lx = nodes[:, 0].max()-nodes[:, 0].min()
Ly = nodes[:, 1].max()-nodes[:, 1].min()
dx = max(Lx, Ly)/num_intp
Nx = int(Lx/dx)
Ny = int(Ly/dx)
x_i, y_i = np.meshgrid(
np.linspace(dx+nodes[:, 0].min(),
nodes[:, 0].max()-dx, Nx),
np.linspace(dx+nodes[:, 1].min(),
nodes[:, 1].max()-dx, Ny))
triang = mtri.Triangulation(nodes[:, 0], nodes[:, 1], elems)
ux_interp = mtri.LinearTriInterpolator(triang, u[:, 0])
uy_interp = mtri.LinearTriInterpolator(triang, u[:, 1])
phi_interp = mtri.LinearTriInterpolator(triang, phi)
ux_i = ux_interp(x_i, y_i)
uy_i = uy_interp(x_i, y_i)
phi_i = phi_interp(x_i, y_i)
ux_i = np.array(ux_i.filled(0.))
uy_i = np.array(uy_i.filled(0.))
phi_i = np.array(phi_i.filled(0.))
u_norm = np.sqrt(ux_i**2 + uy_i**2)
lw = np.zeros_like(ux_i)
lw[:] += 5*u_norm/(u_norm.max() + 1e-10)
mask = np.zeros(ux_i.shape, dtype=bool)
mask[phi_i > 0.] = True
ux_i_2 = np.ma.array(ux_i, mask=mask)
fig.ax.streamplot(x_i, y_i,
ux_i_2, uy_i,
color="k",
density=0.6,
linewidth=lw)
mask = np.zeros(ux_i.shape, dtype=bool)
mask[phi_i < 0.] = True
ux_i_2 = np.ma.array(ux_i, mask=mask)
fig.ax.streamplot(x_i, y_i,
ux_i_2, uy_i,
color="w",
density=0.6,
linewidth=lw)
return fig | 463 |
def buildModelGPT(modelType='gpt2-medium'):
"""
This function builds the model of the function und returns it based on GPT
"""
## Create Model
# Load pre-trained model tokenizer (vocabulary)
tokenizer = GPT2Tokenizer.from_pretrained(modelType)
# Load pre-trained model (weights)
model = GPT2LMHeadModel.from_pretrained(modelType)
# Set the model in evaluation mode to deactivate the DropOut modules
# This is IMPORTANT to have reproducible results during evaluation!
model.eval()
return model, tokenizer | 464 |
async def test_resume_all_workers_empty_json(aresponses):
"""Test resume_all_workers() method is handled correctly when given empty json."""
aresponses.add(
MATCH_HOST,
"/unmanic/api/v2/workers/worker/resume/all",
"POST",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text="{}",
),
match_querystring=True,
)
async with ClientSession() as session:
unmanic = Unmanic(HOST, PORT, session=session)
with pytest.raises(UnmanicError):
await unmanic.resume_all_workers() | 465 |
def vid_to_list(filepath):
"""
Converts a video file to a list of 3d arrays of dim (h, w, c)
Input:
filepath: (str) full filepath of video
Output:
vid: (ndarray) list of 3d numpy arrays, of shape (height, width, color)
"""
cap = cv.VideoCapture(filepath)
list_of_frames = []
while True:
ret, frame = cap.read()
if ret:
frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
list_of_frames.append(frame)
else:
break
return list_of_frames | 466 |
def match_aws_gcp(gcp_index_path, aws_index_paths):
"""Match AWS and GCP metadata datasets
Note this assumes that if the _folder_ exists, then all files exist in the
COGS bucket. I.e. it doesn't check that all files are correctly there.
"""
gcp_df = load_google_metadata(gcp_index_path)
for aws_df in load_aws_metadata(aws_index_paths):
merged = aws_df.merge(
gcp_df,
left_index=True,
right_index=True,
how='left',
indicator=True)
# TODO: indicator validation?
gcp_df[(gcp_df['mgrs_tile'] == '53RLP')]
gcp_df[(gcp_df['mgrs_tile'] == '53RLP')
& (gcp_df['sensing_day'] == pd.to_datetime('2017-01-14'))]
len(test[test['_merge'] == 'left_only'])
test[test['_merge'] == 'left_only'] | 467 |
def from_pickle(fname=interpolator_path):
"""Loads grid inperpolator from pickle located at `fname`.
"""
with open(fname, "rb") as f:
grid = pickle.load(f)
return grid | 468 |
def click_instance_center(instance_name:str, ocr_result:list):
"""鼠标单击instance_name
Args:
instance_name (str): 实例名称
ocr_result (list): 识别结果列表
"""
if len(ocr_result) == 0:
return
ret_list = [ line[0] for line in ocr_result if instance_name in line[1][0] ]
if len(ret_list) == 0:
logger.info('当前页面不存在{}'.format(instance_name))
return
else:
ret = ret_list[0]
x = int((ret[0][0]+ret[1][0])/2)
y = int((ret[1][1]+ret[2][1])/2)
zoom_left_click(x, y) | 469 |
def test_drop_privs():
"""
test that privileges are dropped when jsonp is requested
so that we cannot get private data
"""
tiddler = Tiddler('private')
tiddler.bag = 'foo_private'
tiddler.text = 'some text'
store.put(tiddler)
user_cookie = get_auth('foo', 'foobar')
callback = 'callback'
response, _ = http.request('http://foo.0.0.0.0:8080/bags/'
'foo_private/tiddlers/private?callback=%s' % callback,
method='GET',
headers={
'Cookie': 'tiddlyweb_user="%s"' % user_cookie,
'Accept': 'application/json'
})
assert response['status'] == '401'
response, _ = http.request('http://foo.0.0.0.0:8080/recipes/'
'foo_private/tiddlers/private?callback=%s' % callback,
method='GET',
headers={
'Cookie': 'tiddlyweb_user="%s"' % user_cookie,
'Accept': 'application/json'
})
assert response['status'] == '401'
response, _ = http.request('http://foo.0.0.0.0:8080/bags/foo_private?'
'callback=%s' % callback,
method='GET',
headers={
'Cookie': 'tiddlyweb_user="%s"' % user_cookie,
'Accept': 'application/json'
})
assert response['status'] == '401'
response, _ = http.request('http://foo.0.0.0.0:8080/recipes/foo_private?'
'callback=%s' % callback,
method='GET',
headers={
'Cookie': 'tiddlyweb_user="%s"' % user_cookie,
'Accept': 'application/json'
})
assert response['status'] == '401'
response, _ = http.request('http://foo.0.0.0.0:8080/bags/foo_private/'
'tiddlers?callback=%s' % callback,
method='GET',
headers={
'Cookie': 'tiddlyweb_user="%s"' % user_cookie,
'Accept': 'application/json'
})
assert response['status'] == '401'
response, _ = http.request('http://foo.0.0.0.0:8080/recipes/foo_private/'
'tiddlers?callback=%s' % callback,
method='GET',
headers={
'Cookie': 'tiddlyweb_user="%s"' % user_cookie,
'Accept': 'application/json'
})
assert response['status'] == '401'
response, _ = http.request('http://foo.0.0.0.0:8080/bags/'
'foo_private/tiddlers/private',
method='GET',
headers={
'Cookie': 'tiddlyweb_user="%s"' % user_cookie,
'Accept': 'application/json'
})
assert response['status'] == '200' | 470 |
def get_prob_and_custom_prob_per_crops(
logits_for_patches,
img_size_work_px_space,
n_pixels_in_crop,
descendent_specifier,
target_list,
rf,
DEVICE,
):
"""Determine the probability and the custom probability (i.e. the non-Deep-Learning "logit", cf. Appendix C.2) for crops according to the descendent_specifier, i.e. either each crop or only the four corner crops.
Note that for the grouping of patches into one crop, each directly neighboring patch is considered (stride 1: logits_for_patches_reshaped[start_row:stop_row:stride_1, start_col:stop_col:stride_1]). This enables us to both either select all data for all crops or only the data for the corner crops. This is in contrast to the value that was used to train and evaluate BagNet-33 (stride = 8).
Args:
logits_for_patches: logit predictions for each patch
torch tensor, dtype = torch.float32
np_array of dimensions n_patches x 1000
img_size_work_px_space: number of image pixels in latest parent
n_pixels_in_crop: size of child crop
descendent_specifier: choice between selecting all crops ("stride1") or only four corner crops ("Ullman4")
target_list: list of targets
rf: number of pixels in image crop for BagNet-33
rf stands for receptive field size
Returns:
prob_per_crop: list of length n_crops^2 containing the probabilities per relevant crop
custom_prob_per_crop: list of length n_crops^2 containing the custom probabilities per relevant crop
"""
# When the crop is larger than 33x33 (or in fact 37x37 because that's the
# next larger pixel size appearing in the decreasing order of pixels when
# decreasing by 80% for each crop from 224 pixels), group patches into
# crops to calculate the probaiblities and the custom probabilities
if img_size_work_px_space > 37:
# calculate how many crops there are
n_crops = img_size_work_px_space - n_pixels_in_crop + 1
# calculate how many patches contribute to one crop in one dimensions
# (i.e. width or height)
n_patches_contribute_to_crop = n_pixels_in_crop - rf + 1
# make matrix square instead of one-dimensional along the patch-axis
patch_square_length = int(np.sqrt(logits_for_patches.size()[0]))
logits_for_patches_reshaped = torch.reshape(
logits_for_patches,
(patch_square_length,
patch_square_length,
logits_for_patches.shape[1]),
)
# loop through each crop
prob_per_crop = []
custom_prob_per_crop = []
for start_row in range(n_crops):
stop_row = start_row + n_patches_contribute_to_crop
for start_col in range(n_crops):
stop_col = start_col + n_patches_contribute_to_crop
# average logits over patches
logit_avg_of_cur_patch = torch.mean(
torch.mean(
logits_for_patches_reshaped[
start_row:stop_row, start_col:stop_col
],
dim=0,
),
dim=0,
)
# calculate probabilities
prob_for_targets_summed = get_prob_for_logits(
logit_avg_of_cur_patch[None, :], target_list
)
prob_per_crop.append(prob_for_targets_summed)
# calculate custom probabilities
cur_custom_prob_per_crop = get_custom_prob(
logit_avg_of_cur_patch[None, :], target_list, DEVICE
)
custom_prob_per_crop.append(cur_custom_prob_per_crop[0])
# patches correspond to crops
else:
custom_prob_per_crop = get_custom_prob(
logits_for_patches, target_list, DEVICE)
prob_per_crop = list(
get_prob_for_logits(
logits_for_patches,
target_list))
# if only the four corner crops are of interest ("Ullman4"), get that data
# only
if descendent_specifier == "Ullman4":
prob_per_crop, custom_prob_per_crop = extract_corner_data_for_Ullman4(
prob_per_crop, custom_prob_per_crop
)
return prob_per_crop, custom_prob_per_crop | 471 |
def xywh_to_xyxy(boxes):
"""Convert [x y w h] box format to [x1 y1 x2 y2] format."""
if boxes is None or len(boxes) == 0:
return boxes
boxes = np.array(boxes)
return np.hstack((boxes[:, 0:2], boxes[:, 0:2] + boxes[:, 2:4] - 1)) | 472 |
def unionanlex():
"""Realiza la construcción del analizador léxico a través de la unión de diferentes AFNs
"""
lst = [] # Lista donde se guardarán los AFNs a unir
id = 0
while id != -1:
print("AFNs escogidos:\n", lst)
print("Escoja un ID de los AFNs disponibles (termine con -1):")
imprimirAFs()
id = leerID("ID: ", -1)
if id == -1:
break
lst.append(id)
lst = list(set(lst)) # Remueve duplicados
# Se unen y crean el analizador léxico
analizador.union([afns[i] for i in lst])
if analizador.afn != None:
print("Unión especial realizada correctamente")
guardarAFN(copy.deepcopy(analizador.afn))
else:
print("No se pudo realizar la unión especial") | 473 |
def vscat(a,fig=None,ls=None,marker='o',nmin=2,mhmin=-3,density=False,out=None) :
""" Make histograms of VSCATTER for different bins of Teff H], given min NVISITS, and min [M/H]
"""
if fig == None : fig,ax=plots.multi(4,6,hspace=0.001,wspace=0.4,figsize=(12,8))
else : fig,ax=fig
tbins=[3000,3500,4000,4500,5500,8000,30000]
hbins=[8,11,12,13,15]
try: snr = a['SNREV']
except: snr=a['SNR']
j=np.where(snr > 300) [0]
snr[j] = 300
for i in range(len(tbins)-1) :
ax[i,0].text(0.9,0.9,'{:d}<=RV_TEFF<{:d}'.format(tbins[i],tbins[i+1]),ha='right',transform=ax[i,0].transAxes,fontsize=8)
for j in range(len(hbins)-1) :
ax[0,j].set_title('{:d}<=H<{:d}'.format(hbins[j],hbins[j+1]))
gd = np.where((a['RV_TEFF']>=tbins[i]) & (a['RV_TEFF']<tbins[i+1]) &
(a['H']>=hbins[j]) & (a['H']<hbins[j+1]) &
(a['NVISITS']>nmin) & (a['RV_FEH']>mhmin) & (a['VSCATTER'] > 0)) [0]
print(tbins[i],tbins[i+1],hbins[j],hbins[j+1],nmin,len(gd))
try :
#plots.plotc(ax[i,2],snr[gd],a['VSCATTER'][gd],a['RV_FEH'][gd],marker=marker,xr=[0,310],yr=[0,1],xt='S/N',yt='VSCATTER')
ax[i,j].hist(a['VSCATTER'][gd],bins=np.arange(0,1,0.01),ls=ls,histtype='step',color=colors[j],normed=density)
ax[i,j].set_xlabel('VSCATTER (km/s)')
ax[i,j].plot([0.1,0.1],ax[i,j].get_ylim())
#ax[i,1].hist(a['VSCATTER'][gd],bins=np.arange(0,1,0.01),histtype='step',cumulative=True,normed=True,ls=ls,color=colors[j])
#ax[i,1].set_xlabel('VSCATTER')
except : pass
if out is not None :
fig.savefig(out+'.png')
plt.close()
fig.suptitle('NVISITS>{:d} [M/H]>{:6.2f}'.format(nmin,mhmin))
return fig,ax | 474 |
def install(ctx, platform, arch, java_version='8'):
"""
Download and extract the Java JDK.
"""
global java_home, java_dir
try:
url = azul_jdk[java_version][f'{platform}:{arch}']
except KeyError:
raise NotImplementedError(
f'Unsupported java version, platform or architecture: {java_version}, {platform}, {arch}')
file_name = url.split('/')[-1]
base_dir = Path.cwd()
downloaded_path = base_dir / file_name
if not downloaded_path.exists():
_download_url(url, str(downloaded_path))
java_home = java_dir / file_name
if not java_home.exists():
_extract_archive(downloaded_path, java_dir) | 475 |
def parse_unique_count_for_column(column_df, column):
"""
returns column specific distribution details.
sample output,
```
"<column_df>": {
"<>": 30
}
```
"""
return {column: get_unique_counts_of_column(column_df)} | 476 |
def transaction():
"""
Get database transaction object
:return: _TransactionContext object
usage:
with transaction():
# transactions operation
pass
>>> def update_profile(t_id, name, rollback):
... u = dict(id=t_id, name=name, email='%[email protected]' % name, password=name, last_modified=time.time())
... insert('testuser', **u)
... update('update testuser set password=%s where id=%s', name.upper(), t_id)
... if rollback:
... raise StandardError('will cause rollback...')
>>> with transaction():
... update_profile(900301, 'Python', False)
>>> select_one('select * from testuser where id=%s', 900301).name
u'Python'
>>> with transaction():
... update_profile(900302, 'Ruby', True)
Traceback (most recent call last):
...
StandardError: will cause rollback...
>>> select('select * from testuser where id=%s', 900302)
[]
"""
return _TransactionContext() | 477 |
def _BoundedIntRange(
description: str = "",
description_tooltip: str = None,
layout: Union[Dict[str, Any], Element[ipywidgets.widgets.widget_layout.Layout]] = {},
max: int = 100,
min: int = 0,
style: Union[Dict[str, Any], Element[ipywidgets.widgets.widget_description.DescriptionStyle]] = {},
value: tuple = (0, 1),
on_description: typing.Callable[[str], Any] = None,
on_description_tooltip: typing.Callable[[str], Any] = None,
on_layout: typing.Callable[[Union[Dict[str, Any], Element[ipywidgets.widgets.widget_layout.Layout]]], Any] = None,
on_max: typing.Callable[[int], Any] = None,
on_min: typing.Callable[[int], Any] = None,
on_style: typing.Callable[[Union[Dict[str, Any], Element[ipywidgets.widgets.widget_description.DescriptionStyle]]], Any] = None,
on_value: typing.Callable[[tuple], Any] = None,
) -> Element[ipywidgets.widgets.widget_int._BoundedIntRange]:
"""
:param description: Description of the control.
:param description_tooltip: Tooltip for the description (defaults to description).
:param max: Max value
:param min: Min value
:param style: Styling customizations
:param value: Tuple of (lower, upper) bounds
"""
kwargs: Dict[Any, Any] = without_default(_BoundedIntRange, locals())
if isinstance(kwargs.get("layout"), dict):
kwargs["layout"] = Layout(**kwargs["layout"])
if isinstance(kwargs.get("style"), dict):
kwargs["style"] = DescriptionStyle(**kwargs["style"])
widget_cls = ipywidgets.widgets.widget_int._BoundedIntRange
comp = react.core.ComponentWidget(widget=widget_cls)
return Element(comp, **kwargs) | 478 |
def load_static_mnist(args, **kwargs):
"""
Dataloading function for static mnist. Outputs image data in vectorized form: each image is a vector of size 784
"""
args.dynamic_binarization = False
args.input_type = 'binary'
args.input_size = [1, 28, 28]
# start processing
def lines_to_np_array(lines):
return np.array([[int(i) for i in line.split()] for line in lines])
with open(os.path.join('data', 'MNIST_static', 'binarized_mnist_train.amat')) as f:
lines = f.readlines()
x_train = lines_to_np_array(lines).astype('float32')
with open(os.path.join('data', 'MNIST_static', 'binarized_mnist_valid.amat')) as f:
lines = f.readlines()
x_val = lines_to_np_array(lines).astype('float32')
with open(os.path.join('data', 'MNIST_static', 'binarized_mnist_test.amat')) as f:
lines = f.readlines()
x_test = lines_to_np_array(lines).astype('float32')
# shuffle train data
np.random.shuffle(x_train)
# idle y's
y_train = np.zeros((x_train.shape[0], 1))
y_val = np.zeros((x_val.shape[0], 1))
y_test = np.zeros((x_test.shape[0], 1))
# pytorch data loader
train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train))
train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs)
validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val))
val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs)
test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test))
test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, args | 479 |
def get_benchmark_snapshot(benchmark_df,
threshold=_MIN_FRACTION_OF_ALIVE_TRIALS_AT_SNAPSHOT):
"""Finds the latest time where |threshold| fraction of the trials were still
running. In most cases, this is the end of the experiment. However, if less
than |threshold| fraction of the trials reached the end of the experiment,
then we will use an earlier "snapshot" time for comparing results.
Returns a data frame that only contains the measurements of the picked
snapshot time.
"""
# Allow overriding threshold with environment variable as well.
threshold = environment.get('BENCHMARK_SAMPLE_NUM_THRESHOLD', threshold)
num_trials = benchmark_df.trial_id.nunique()
trials_running_at_time = benchmark_df.time.value_counts()
criteria = trials_running_at_time >= threshold * num_trials
ok_times = trials_running_at_time[criteria]
latest_ok_time = ok_times.index.max()
benchmark_snapshot_df = benchmark_df[benchmark_df.time == latest_ok_time]
return benchmark_snapshot_df | 480 |
def configChromeDriver(webVisible : bool = True , filePathDownload : str = None, filePathWebDriver: str = None) -> WebDriver:
"""
Configure o seu Chrome Driver:
- webVisible ==> Por padrão True para ocultar o webDriver.
- filePathDownload ==> Por padrão será criado uma pasta Downloads na raiz do projeto, caso não seja informado uma pasta para envio dos downloads ("nameFolder\\folderDownload").
- filePathWebDriver ==> Informar o endereço completo, inclusive ("nameFolder\\91\\chromedriver.exe").
Por padrão é utilizado a pasta raiz (webDriver), caso ela não exista, cria-la e colocar a pasta do driver nomeada com o numero da versão.
"""
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
from os import getcwd
filePathDownload = filePathDownload or pathDownload()
filePathWebDriver = filePathWebDriver or fr"{getcwd()}\webDriver\{lastWebDriver()}\chromedriver.exe"
options = Options()
options.headless = webVisible
prefs = {"download.default_directory": filePathDownload}
options.add_experimental_option("prefs", prefs)
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument("--lang=pt")
return webdriver.Chrome(executable_path=filePathWebDriver, options=options) | 481 |
def create_trigger_function_with_trigger(server, db_name, schema_name,
func_name):
"""This function add the trigger function to schema"""
try:
connection = utils.get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'],
server['sslmode'])
pg_cursor = connection.cursor()
query = "CREATE FUNCTION " + schema_name + "." + func_name + \
"()" \
" RETURNS trigger LANGUAGE 'plpgsql' STABLE LEAKPROOF" \
" SECURITY DEFINER SET enable_sort=true AS $BODY$ BEGIN" \
" NULL; END; $BODY$"
pg_cursor.execute(query)
connection.commit()
# Get 'oid' from newly created function
pg_cursor.execute("SELECT pro.oid, pro.proname FROM"
" pg_proc pro WHERE pro.proname='%s'" %
func_name)
functions = pg_cursor.fetchone()
connection.close()
return functions
except Exception:
traceback.print_exc(file=sys.stderr) | 482 |
def get_results_seq_len(given_true_eig,
hidden_dim,
input_dim,
min_seq_len,
max_seq_len,
num_sampled_seq_len,
num_repeat,
input_mean,
input_stddev,
output_noise_stddev,
init_state_mean=0.0,
init_state_stddev=0.0,
generate_diagonalizable_only=False,
random_seed=0):
"""Get results for varying sequence lengths.
Args:
given_true_eig: Ground truth of eigenvalues. If None, generate random
eigenvalues from uniform [-1,1] in each repeat of experiment.
hidden_dim: Assumed hidden dim. If 0, use true hidden dim.
input_dim: The input dim.
min_seq_len: Min seq len in experiments.
max_seq_len: Max seq len in experiments.
num_sampled_seq_len: Number of sampled seq len values in between min and max
seq len.
num_repeat: Number of repeated experiments for each seq_len.
input_mean: Scalar or 1D array of length hidden state dim.
input_stddev: Scalar of 1D array of length hidden state dim.
output_noise_stddev: Scalar.
init_state_mean: Scalar or 1D array of length hidden state dim.
init_state_stddev: Scalar of 1D array of length hidden state dim.
generate_diagonalizable_only: Whether to only use diagonalizable LDSs in
simulations.
random_seed: Random seed, integer.
Returns:
A pandas DataFrame with columns `method`, `seq_len`, `t_secs`,
`failed_ratio`, and `l2_r_error`.
The same method and seq_len will appear in num_repeat many rows.
"""
np.random.seed(random_seed)
progress_bar = tqdm.tqdm(total=num_repeat * num_sampled_seq_len)
gen = lds.SequenceGenerator(
input_mean=input_mean,
input_stddev=input_stddev,
output_noise_stddev=output_noise_stddev,
init_state_mean=init_state_mean,
init_state_stddev=init_state_stddev)
# seq_len_vals = np.linspace(min_seq_len, max_seq_len, num_sampled_seq_len)
# seq_len_vals = [int(round(x)) for x in seq_len_vals]
min_inv_sqrt_seq_len = 1. / np.sqrt(max_seq_len)
max_inv_sqrt_seq_len = 1. / np.sqrt(min_seq_len)
inv_sqrt_seq_len_vals = np.linspace(min_inv_sqrt_seq_len,
max_inv_sqrt_seq_len, num_sampled_seq_len)
seq_len_vals = [int(round(1. / (x * x))) for x in inv_sqrt_seq_len_vals]
learning_fns = create_learning_fns(hidden_dim)
metric_dict = {
k: [] for k in [
'method', 'seq_len', 't_secs', 'l2_a_error', 'l2_r_error',
'failed_convg'
]
}
for _ in xrange(num_repeat):
if given_true_eig is not None:
ground_truth = lds.generate_linear_dynamical_system(
hidden_dim, input_dim, eigvalues=given_true_eig)
else:
ground_truth = lds.generate_linear_dynamical_system(
hidden_dim, input_dim, diagonalizable=generate_diagonalizable_only)
true_eig = ground_truth.get_spectrum()
for seq_len in seq_len_vals:
seq = gen.generate_seq(ground_truth, seq_len=seq_len)
for k, fn in learning_fns.iteritems():
start_t = timeit.default_timer()
with warnings.catch_warnings(record=True) as caught:
warnings.filterwarnings(
'always', category=sm_exceptions.ConvergenceWarning)
if FLAGS.hide_inputs:
eig_pred = fn(seq.outputs, None)
else:
eig_pred = fn(seq.outputs, seq.inputs)
t_elapsed = timeit.default_timer() - start_t
metric_dict['seq_len'].append(seq_len)
metric_dict['method'].append(k)
metric_dict['t_secs'].append(t_elapsed)
metric_dict['l2_a_error'].append(np.linalg.norm(true_eig - eig_pred))
metric_dict['l2_r_error'].append(
np.linalg.norm(true_eig - eig_pred) / np.linalg.norm(true_eig))
metric_dict['failed_convg'].append(False)
for w in caught:
if w.category in [
RuntimeWarning, sm_exceptions.ConvergenceWarning,
sm_exceptions.HessianInversionWarning
]:
metric_dict['failed_convg'][-1] = True
else:
warnings.warn(w.message, w.category)
progress_bar.update(1)
progress_bar.close()
return pd.DataFrame(data=metric_dict) | 483 |
def test_basic_cli(tmp_path, scope_files):
"""Test that basic cli works."""
cmd = ["mokapot", scope_files[0], "--dest_dir", tmp_path]
subprocess.run(cmd, check=True)
assert Path(tmp_path, "mokapot.psms.txt").exists()
assert Path(tmp_path, "mokapot.peptides.txt").exists() | 484 |
def episode_to_timestep_batch(
episode: rlds.BatchedStep,
return_horizon: int = 0,
drop_return_horizon: bool = False,
flatten_observations: bool = False,
calculate_episode_return: bool = False) -> tf.data.Dataset:
"""Converts an episode into multi-timestep batches.
Args:
episode: Batched steps as provided directly by RLDS.
return_horizon: int describing the horizon to which we should accumulate the
return.
drop_return_horizon: bool whether we should drop the last `return_horizon`
steps to avoid mis-calculated returns near the end of the episode.
flatten_observations: bool whether we should flatten dict-based observations
into a single 1-d vector.
calculate_episode_return: Whether to calculate episode return. Can be an
expensive operation on datasets with many episodes.
Returns:
rl_dataset.DatasetType of 3-batched transitions, with scalar rewards
expanded to 1D rewards
This means that for every step, the corresponding elements will be a batch of
size 3, with the first batched element corresponding to *_t-1, the second to
*_t and the third to *_t+1, e.g. you can access the previous observation as:
```
o_tm1 = el[types.OBSERVATION][0]
```
Two additional keys can be added: 'R_t' which corresponds to the undiscounted
return for horizon `return_horizon` from time t (always present), and
'R_total' which corresponds to the total return of the associated episode (if
`calculate_episode_return` is True). Rewards are converted to be (at least)
one-dimensional, prior to batching (to avoid ()-shaped elements).
In this example, 0-valued observations correspond to o_{t-1}, 1-valued
observations correspond to o_t, and 2-valued observations correspond to
s_{t+1}. This same structure is true for all keys, except 'R_t' and 'R_total'
which are both scalars.
```
ipdb> el[types.OBSERVATION]
<tf.Tensor: shape=(3, 11), dtype=float32, numpy=
array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.]], dtype=float32)>
```
"""
steps = episode[rlds.STEPS]
if drop_return_horizon:
episode_length = steps.cardinality()
steps = steps.take(episode_length - return_horizon)
# Calculate n-step return:
rewards = steps.map(lambda step: step[rlds.REWARD])
batched_rewards = rlds.transformations.batch(
rewards, size=return_horizon, shift=1, stride=1, drop_remainder=True)
returns = batched_rewards.map(tf.math.reduce_sum)
output = tf.data.Dataset.zip((steps, returns)).map(_append_n_step_return)
# Calculate total episode return for potential filtering, use total # of steps
# to calculate return.
if calculate_episode_return:
dtype = jnp.float64 if jax.config.jax_enable_x64 else jnp.float32
# Need to redefine this here to avoid a tf.data crash.
rewards = steps.map(lambda step: step[rlds.REWARD])
episode_return = rewards.reduce(dtype(0), lambda x, y: x + y)
output = output.map(
functools.partial(
_append_episode_return, episode_return=episode_return))
output = output.map(_expand_scalars)
if flatten_observations:
output = output.map(_flatten_observations)
output = rlds.transformations.batch(
output, size=3, shift=1, drop_remainder=True)
return output | 485 |
def test_async_without_ambi():
"""Equivalent example without ambisync"""
async def testmain():
async def foo():
await asyncio.sleep(1.3)
print('foo')
asyncio.create_task(foo())
test = NoAmbiAsync('noambi test')
await test.my_method('noambi arg')
asyncio.run(testmain()) | 486 |
def parse(authz_file, modules):
"""Parse a Subversion authorization file.
Return a dict of modules, each containing a dict of paths, each containing
a dict mapping users to permissions. Only modules contained in `modules`
are retained.
"""
parser = UnicodeConfigParser(ignorecase_option=False)
parser.read(authz_file)
groups = {}
aliases = {}
sections = {}
for section in parser.sections():
if section == 'groups':
for name, value in parser.items(section):
groups.setdefault(name, set()).update(to_list(value))
elif section == 'aliases':
for name, value in parser.items(section):
aliases[name] = value.strip()
else:
for name, value in parser.items(section):
parts = section.split(':', 1)
module, path = parts[0] if len(parts) > 1 else '', parts[-1]
if module in modules:
sections.setdefault((module, path), []) \
.append((name, value))
def resolve(subject, done):
if subject.startswith('@'):
done.add(subject)
for members in groups[subject[1:]] - done:
for each in resolve(members, done):
yield each
elif subject.startswith('&'):
yield aliases[subject[1:]]
else:
yield subject
authz = {}
for (module, path), items in sections.iteritems():
section = authz.setdefault(module, {}).setdefault(path, {})
for subject, perms in items:
readable = 'r' in perms
# Ordering isn't significant; any entry could grant permission
section.update((user, readable)
for user in resolve(subject, set())
if not section.get(user))
return authz | 487 |
def send_expiry_note(invite, request, user_name):
"""
Send a notification email to the issuer of an invitation when a user
attempts to accept an expired invitation.
:param invite: ProjectInvite object
:param request: HTTP request
:param user_name: User name of invited user
:return: Amount of sent email (int)
"""
subject = (
SUBJECT_PREFIX
+ ' '
+ SUBJECT_EXPIRY.format(
user_name=user_name, project=invite.project.title
)
)
message = get_email_header(
MESSAGE_HEADER.format(
recipient=invite.issuer.get_full_name(), site_title=SITE_TITLE
)
)
message += MESSAGE_EXPIRY_BODY.format(
role=invite.role.name,
project=invite.project.title,
user_name=user_name,
user_email=invite.email,
date_expire=localtime(invite.date_expire).strftime('%Y-%m-%d %H:%M'),
site_title=SITE_TITLE,
project_label=get_display_name(invite.project.type),
)
if not settings.PROJECTROLES_EMAIL_SENDER_REPLY:
message += NO_REPLY_NOTE
message += get_email_footer()
return send_mail(subject, message, [invite.issuer.email], request) | 488 |
def main():
"""
Main function to run script.
"""
startTime = datetime.now()
logging.info("Running wcry_scanner")
cli_options = gather_cli_options()
rhosts_open_445 = run_masscan(cli_options['subnet'])
rhosts_ms17_vuln = run_nmap(rhosts_open_445)
results_to_file(rhosts_ms17_vuln, cli_options['outfile'])
logging.info("Completion time: {}".format(datetime.now() - startTime)) | 489 |
def level_is_between(level, min_level_value, max_level_value):
"""Returns True if level is between the specified min or max, inclusive."""
level_value = get_level_value(level)
if level_value is None:
# unknown level value
return False
return level_value >= min_level_value and level_value <= max_level_value | 490 |
def voronoiMatrix(sz=512,percent=0.1,num_classes=27):
"""
Create voronoi polygons.
Parameters
----------
sz : int
row and column size of the space in which the circle is placed
percent : float
Percent of the space to place down centers of the voronoi polygons.
Smaller percent makes the polygons larger
num_classes : int
Number of classes to assign to each of the voronoi polygons
Returns
-------
X : 2D array
Array containing all voronoi polygons
"""
X = np.zeros((sz,sz))
#fill in percentage of the space
locs = np.random.rand(sz,sz)<=percent
vals = np.random.randint(0,num_classes,size=(sz,sz))
X[locs]=vals[locs]
#get all the indices of the matrix
cc,rr = np.meshgrid(np.arange(0,sz),np.arange(0,sz))
f = np.zeros((sz**2,2))
f[:,0]=rr.ravel() #feature1
f[:,1]=cc.ravel() #feature2
t = X.ravel() #target
train_ind = locs.ravel()
f_train = f[train_ind]
t_train = t[train_ind]
clf = neighbors.KNeighborsClassifier(n_neighbors=1)
clf.fit(f_train, t_train)
preds = clf.predict(f)
locs = f.astype(int)
X[locs[:,0],locs[:,1]] = preds
return X | 491 |
def genVBOF2(virus_record, model, model_name=None):
"""New version of the genVBOF function by Hadrien.
Builds a Virus Biomass Objective Function (basically a virus biomass
production reaction, from aminoacids and nucleotides) from a genbank
file.
Params:
- virus_record: genbank record of a virus (output from Bio.SeqIO.parse)
- model: a cobra metabolic model (cobra.core.model.Model)
Returns:
- virus biomass objective function (cobra.core.reaction.Reaction)
"""
met_dict = load_metabolite_id_dict(model, model_name=model_name)
# VIRUS IDENTIFICATION
taxonomy = " ".join([taxon.lower() for taxon in virus_record.annotations["taxonomy"]])
if "betacoronavirus" not in taxonomy:
raise NotImplementedError('Virus family is not supported: Unable to create VBOF. Consult _README')
short_name, full_name = get_virus_names(virus_record)
# AMINOACID COUNT
all_cds = {feature for feature in virus_record.features if feature.type == "CDS"}
# Check that our own virus_composition dict contain exactly the
# proteins defined in the genbank file, no more, no less.
protein_names_in_gb_file = {cds.qualifiers["product"][0] for cds in all_cds}
protein_names_in_our_data = {protein_name for protein_name in virus_composition[short_name]["proteins"]}
assert protein_names_in_gb_file == protein_names_in_our_data
virus_aa_composition = Counter()
# protein name -> number of atp involved in its peptide bonds formations
# (accounting for the number of copies of protein)
peptide_bond_formation = dict()
for cds in all_cds:
protein_name = cds.qualifiers["product"][0]
aa_sequence = cds.qualifiers["translation"][0]
aa_count = Counter(aa_sequence)
copies_per_virus = virus_composition[short_name]["proteins"][protein_name]
virus_aa_composition += multiply_counter(aa_count, copies_per_virus)
peptide_bond_formation[protein_name] = (len(aa_sequence) * k_atp_protein - k_atp_protein) * copies_per_virus
# [3] Precursor frequency
# Genome [Nucleotides]
Cg = virus_composition[short_name]["Cg"] # number of genome copies per virus
virus_nucl_count = Counter(str(virus_record.seq))
countA = virus_nucl_count["A"]
countC = virus_nucl_count["C"]
countG = virus_nucl_count["G"]
countU = virus_nucl_count["T"] # Base 'T' is pseudo for base 'U'
antiA = countU
antiC = countG
antiG = countC
antiU = countA
# Count summation
totNTPS = (Cg * (countA + countC + countG + countU + antiA + antiC + antiG + antiU))
totAA = sum(count for count in virus_aa_composition.values())
# [4] VBOF Calculations
# Nucleotides
# mol.ntps/mol.virus
V_a = (Cg*(countA + antiA))
V_c = (Cg*(countC + antiC))
V_g = (Cg*(countG + antiG))
V_u = (Cg*(countU + antiU))
# g.ntps/mol.virus
G_a = V_a * ntpsDict["atp"]
G_c = V_c * ntpsDict["ctp"]
G_g = V_g * ntpsDict["gtp"]
G_u = V_u * ntpsDict["ttp"]
# Amino Acids
# g.a/mol.virus
G_aa = {aa: count * aaDict[aa] for aa, count in virus_aa_composition.items()}
# Total genomic and proteomic molar mass
M_v = (G_a + G_c + G_g + G_u) + sum(G_aa.values())
# Stoichiometric coefficients
# Nucleotides [mmol.ntps/g.virus] (for the genome)
S_atp = 1000 * (V_a/M_v)
S_ctp = 1000 * (V_c/M_v)
S_gtp = 1000 * (V_g/M_v)
S_utp = 1000 * (V_u/M_v)
# Amino acids [mmol.aa/g.virus]
S_aa = {aa: 1000 * V_aa / M_v for aa, V_aa in virus_aa_composition.items()}
# Energy requirements
# Genome: Phosphodiester bond formation products [Pyrophosphate]
# SARS Cov 2 is a single stranded RNA virus: it has to first do an
# intermediary reverse copy of itself and then replicate itself from
# that intermediary strand.
genTemp = (((countA + countC + countG + countU) * k_ppi) - k_ppi)
genRep = (((antiA + antiC + antiG + antiU) * k_ppi) - k_ppi)
genTot = genTemp + genRep
V_ppi = genTot
S_ppi = 1000 * (V_ppi / M_v)
# Proteome: Peptide bond formation [ATP + H2O]
# Note: ATP used in this process is denoated as ATPe/Ae [e = energy version]
V_Ae = sum(peptide_bond_formation.values())
S_Ae = 1000 * (V_Ae / M_v)
# [5] VBOF Reaction formatting and output
# Left-hand terms: Nucleotides
# Note: ATP term is a summation of genome and energy requirements
S_ATP = (S_atp + S_Ae) * -1
S_CTP = S_ctp * -1
S_GTP = S_gtp * -1
S_UTP = S_utp * -1
# Left-hand terms: Amino Acids
S_AAf = {aa: -coef for aa, coef in S_aa.items()}
# Left-hand terms: Energy Requirements
S_H2O = S_Ae * -1
# Right-hand terms: Energy Requirements
S_ADP = S_Ae
S_Pi = S_Ae
S_H = S_Ae
S_PPi = S_ppi
reaction_name = short_name + '_prodrxn_VN'
virus_reaction = Reaction(reaction_name)
virus_reaction.name = full_name + ' production reaction'
virus_reaction.subsystem = 'Virus Production'
virus_reaction.lower_bound = 0
virus_reaction.upper_bound = 1000
virus_reaction.add_metabolites(({
met_dict['atp']: S_ATP,
met_dict['ctp']: S_CTP,
met_dict['gtp']: S_GTP,
met_dict['utp']: S_UTP,
met_dict['A']: S_AAf['A'],
met_dict['R']: S_AAf['R'],
met_dict['N']: S_AAf['N'],
met_dict['D']: S_AAf['D'],
met_dict['C']: S_AAf['C'],
met_dict['Q']: S_AAf['Q'],
met_dict['E']: S_AAf['E'],
met_dict['G']: S_AAf['G'],
met_dict['H']: S_AAf['H'],
met_dict['I']: S_AAf['I'],
met_dict['L']: S_AAf['L'],
met_dict['K']: S_AAf['K'],
met_dict['M']: S_AAf['M'],
met_dict['F']: S_AAf['F'],
met_dict['P']: S_AAf['P'],
met_dict['S']: S_AAf['S'],
met_dict['T']: S_AAf['T'],
met_dict['W']: S_AAf['W'],
met_dict['Y']: S_AAf['Y'],
met_dict['V']: S_AAf['V'],
met_dict['h2o']: S_H2O,
met_dict['adp']: S_ADP,
met_dict['Pi']: S_Pi,
met_dict['h']: S_H,
met_dict['PPi']: S_PPi}))
return virus_reaction | 492 |
def parse_args(arguments: Sequence, options: List[str] = None) -> Dict:
"""
Parse input arguments.
Simple assessment that module AWS Glue is not available in pyshell jobs.
Parameters
----------
arguments
Sequence of options and values to be parsed. (sys.argv)
options
Options which value is resolved.
Returns
-------
Parsed options and values.
"""
LOGGER.debug("Parsing arguments: %s options: %s", arguments, options)
try:
import awsglue.utils as au
except ImportError:
return parse_args_fallback(arguments, options)
try:
resolved = au.getResolvedOptions(args=arguments, options=options)
LOGGER.debug("awsglue.utils args resolved: %s", resolved)
return resolved
except au.GlueArgumentError:
return parse_args_fallback(arguments, options) | 493 |
def get_current_ms_time() -> int:
"""
:return: the current time in milliseconds
"""
return int(time.time() * 1000) | 494 |
def get_service_info(): # noqa: E501
"""Get information about Workflow Execution Service.
May include information related (but not limited to) the workflow descriptor formats, versions supported, the WES API versions supported, and information about general service availability. # noqa: E501
:rtype: ServiceInfo
"""
return adapter.get_service_info() | 495 |
def str_of_tuple(d, str_format):
"""Convert tuple to str.
It's just str_format.format(*d). Why even write such a function?
(1) To have a consistent interface for key conversions
(2) We want a KeyValidationError to occur here
Args:
d: tuple if params to str_format
str_format: Auto fields format string. If you have manual fields, consider auto_field_format_str to convert.
Returns:
parametrized string
>>> str_of_tuple(('hello', 'world'), "Well, {} dear {}!")
'Well, hello dear world!'
"""
try:
return str_format.format(*d)
except Exception as e:
raise KeyValidationError(e) | 496 |
def _intersect(bboxes1, bboxes2):
"""
bboxes: t x n x 4
"""
assert bboxes1.shape[0] == bboxes2.shape[0]
t = bboxes1.shape[0]
inters = np.zeros((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32)
_min = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32)
_max = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32)
w = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32)
h = np.empty((bboxes1.shape[1], bboxes2.shape[1]), dtype=np.float32)
for i in range(t):
np.maximum.outer(bboxes1[i, :, 0], bboxes2[i, :, 0], out=_min)
np.minimum.outer(bboxes1[i, :, 2], bboxes2[i, :, 2], out=_max)
np.subtract(_max + 1, _min, out=w)
w.clip(min=0, out=w)
np.maximum.outer(bboxes1[i, :, 1], bboxes2[i, :, 1], out=_min)
np.minimum.outer(bboxes1[i, :, 3], bboxes2[i, :, 3], out=_max)
np.subtract(_max + 1, _min, out=h)
h.clip(min=0, out=h)
np.multiply(w, h, out=w)
inters += w
return inters | 497 |
def test_r4_3_create_product(description, expected):
"""
Testing R4-3: The description of the product can be arbitrary characters,
with a minimum length of 20 characters and a maximum of 2000 characters.
"""
# Register a test user, if exists will just return false
register('Test0', '[email protected]', 'Password1!')
try:
assert createProduct(productName='p0',
description=description,
price=10.0,
last_modified_date=dt.datetime(2021, 10, 8),
owner_email='[email protected]') is expected
except ValueError:
assert not expected | 498 |
def convert_x_to_bbox(x,score=None):
"""
Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
[x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right
"""
w = np.sqrt(np.abs(x[2] * x[3]))
if(w<=0):
w=1
h = x[2] / w
if(score==None):
return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.],np.float32)
else:
return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5)) | 499 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.