content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def _get_distance(captcha_url):
"""
获取缺口距离
:param captcha_url: 验证码 url
:return:
"""
save_path = os.path.abspath('...') + '\\' + 'images'
if not os.path.exists(save_path):
os.mkdir(save_path)
img_path = _pic_download(captcha_url, 'captcha')
img1 = cv2.imread(img_path, 0)
img2 = cv2.imread(save_path + '\\' + "slider.jpg", 0)
res = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED)
loc = np.where(res >= 0.6)
for pt in zip(*loc[::-1]):
p = pt
try:
cv2.imshow('Detected', img1[p[1]:, p[0]:])
cv2.waitKey(3000)
except Exception as e:
print(e.args)
return None
res = cv2.resize(img1, (255, int(300 * (255 / 600))), interpolation=cv2.INTER_CUBIC)
cv2.imshow("res", res[:, int(p[0] * (255 / 600) + 15):])
# cv2.waitKey(3000)
return int(p[0] * (290 / 600)) | 5,358,800 |
def obj_to_solids(filename: str, material_path: str = "", scale=64):
"""
Turns an .obj file to VMF solids, **BETA** it's very finicky and remember to invert normals
:param filename: The name of the .obj file with path (ex: "test/wall.obj")
:param material_path: The path to the .VMT's using same names (from the game materials folder, ex: "custom/ramp/"
:param scale: The scale applied to the entire .obj
"""
if material_path[-1] != "/":
material_path += "/"
scene = Wavefront(filename, collect_faces=True)
for mesh in scene.mesh_list:
solid = Solid()
for face in mesh.faces[::2]:
side = Side()
for i, vertex in enumerate(face):
vs = str(scene.vertices[vertex])
v = Convert.string_to_vertex(vs)
v.multiply(scale)
side.plane[i] = v
solid.add_sides(side)
solid.editor = Editor()
solid.set_texture(material_path + mesh.materials[0].name[0:-3])
solid.rotate_x(Vertex(), 90)
yield solid | 5,358,801 |
def adjust_contrast(img, contrast_factor):
"""Adjust contrast of an Image.
Args:
img (PIL Image): PIL Image to be adjusted.
contrast_factor (float): How much to adjust the contrast. Can be any
non negative number. 0 gives a solid gray image, 1 gives the
original image while 2 increases the contrast by a factor of 2.
Returns:
PIL Image: Contrast adjusted image.
"""
if not is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Contrast(img)
img = enhancer.enhance(contrast_factor)
return img | 5,358,802 |
def write_configurations(root, ra, dec, center, obs_obj, obs_ap, obs_mode, objid=None, path=None,
ndig=None, tight=False, target_file=None, ra_c=None, dec_c=None):
"""
Write a set of configuration files for each FOBOS observation.
Args:
root (:obj:`str`):
The root name for all output files.
ra (`numpy.ndarray`_):
Right ascension coordinates for all considered objects.
dec (`numpy.ndarray`_):
Declination coordinates for all considered objects. Shape must
match ``ra``.
center (:obj:`tuple`):
RA and DEC coordinates for the FOBOS pointing center.
obs_obj (:obj:`list`):
List of `numpy.ndarray`_ objects identifying the indices of the
objects observed from the provided list of coordinates. The number
of items in the list sets the number of revisits to the same
pointing. This is the same as the second object returned by
:func:`~producer.plan.configure_observations`.
obs_ap (:obj:`list`):
List of `numpy.ndarray`_ objects identifying the indices of the
FOBOS apertures used for each object observed. List length must
match ``obs_obj``. This is the same as the fourth object returned
by :func:`~producer.plan.configure_observations`. The aperture
indices must match indices when instantiating a
:class:`~producer.deploy.FOBOSApertures` object in the specified
mode (``obs_mode``).
obs_mode (:obj:`list`):
List of `numpy.ndarray`_ objects identifying the FOBOS mode; see
:class:`~producer.deploy.FOBOSApertures`. List length must match
``obs_obj``. This is the same as the last object returned by
:func:`~producer.plan.configure_observations`.
objid (`numpy.ndarray`_, optional):
An array with identifiers for each object. Each array element must
convert directly to a string. Uniqueness is not checked. Shape
must match ``ra``. If None, just set to the 0-indexed array index.
path (:obj:`str`, optional):
Root path for all output files. If None, either set to the parent
path provided by ``root`` or set to the current directory.
ndig (:obj:`int`, optional):
Number of digits to use for the observation number in the output
file names. If None, this is set by the number of configurations to
write. E.g., 9 observations or less yield ``ndig=1``, 10-99
observations yield ``ndig=2``, etc.
tight (:obj:`bool`, optional):
Output the configuration in "tight" format, where unallocated
apertures are not included.
target_file (:obj:`str`, optional):
Name of the file with the original targets. If provided, will be
included in header of output configuration files.
ra_c (:obj:`int`, optional):
1-indexed column number with the RA coordinates in ``target_file``.
Ignored if ``target_file`` is None.
dec_c (:obj:`int`, optional):
1-indexed column number with the DEC coordinates in ``target_file``.
Ignored if ``target_file`` is None.
"""
# Check the coordinate arrays
if ra.ndim > 1:
raise ValueError('Input coordinate arrays must be 1D.')
if dec.shape != ra.shape:
raise ValueError('Shape of coordinate arrays differ.')
nobj = ra.size
objid_type = 'name'
if objid is None:
objid = numpy.arange(nobj).astype(str)
objid_type = 'index'
if objid.shape != ra.shape:
raise ValueError('Object ID array does not match shape of coordinate arrays.')
objidlen = numpy.amax([len(o) for o in objid])
_center = numpy.atleast_1d(center).ravel()
if _center.size != 2:
raise ValueError('Center coordinates must provide RA and DEC only.')
# Check the observation lists
nobs = len(obs_obj)
if len(obs_ap) != nobs:
raise ValueError(f'Incorrect number of aperture arrays; expected {nobs}, got '
f'{len(obs_ap)}.')
if len(obs_mode) != nobs:
raise ValueError(f'Incorrect number of instrument modes; expected {nobs}, got '
f'{len(obs_mode)}.')
for indx in obs_obj:
if numpy.any(indx >= nobj):
raise ValueError('Object selection indices out of bounds of coordinate arrays.')
# Set the file name number of digits
if ndig is None:
ndig = int(numpy.ceil(numpy.log10(nobs+1)))
# Construct and check the output root
_root = Path(root).resolve()
if _root.is_dir():
raise ValueError('Provided root is a directory. Must provide leading name of files.')
if path is None:
_path = _root.parent
_root = _root.name
else:
_path = Path(path).resolve()
if not _path.is_dir():
warnings.warn(f'Request path, {path}, currently does not exist and will be created.')
_root = _root.name
for i in range(nobs):
ofile = str(_path / f'{_root}_{i+1:0{ndig}}.db')
ap = FOBOSApertures(mode=obs_mode[i])
if numpy.any(obs_ap[i] >= ap.nap):
raise ValueError(f'Aperture selection indices for observation {i+1} are invalid.')
cfg = ConfigParser()
cfg['FOBOS_MODE'] = {'version': ap.version,
'mode': ','.join(ap.mode.astype(str)),
'baseline': ap.baseline,
'design': ap.config
}
cfg['TARGETS'] = {'source': 'Unknown' if target_file is None else target_file,
'ra_col': 'None' if ra_c is None else ra_c,
'dec_col': 'None' if dec_c is None else dec_c,
'id_type': objid_type,
'allocated': len(obs_obj[i])
}
cfg['POINTING'] = {'ra': f'{_center[0]:12.8f}',
'dec': f'{_center[1]:12.8f}'
}
with io.StringIO() as f:
cfg.write(f)
cfg_str = f.getvalue()
header_text = 'FOBOS configuration file\n\n' \
f'Generated: {time.strftime("%a %d %b %Y %H:%M:%S", time.localtime())}\n\n'
header_text += '-'*70 + '\n\n'
header_text += cfg_str
header_text += '-'*70 + '\n\n'
header_text += 'Columns are:\n' \
' MID - Module ID number\n' \
' SID - Spectrograph ID number\n' \
' BID - Starbug ID number\n' \
' Type - Starbug payload type: (0) single-fiber, (1) 37-fiber \n' \
' IFU, (3) imaging bundle, (4) flux-calibration bundle.\n' \
' OBJID - Object ID \n' \
' RA - Target right ascension (decimal degrees)\n' \
' Dec - Target declination (decimal degrees)\n\n' \
f'{"MID":>3} {"SID":>3} {"BID":>3} {"Type":>4} ' \
f'{"OBJID":>{objidlen}} {"RA":>12} {"DEC":>11}'
tab = empty_configuration_table(nrows=ap.nap, objidlen=objidlen)
# NOTE: The syntax "tab['MID'][:]" is needed here so that the table
# column doesn't loose it's other attributes (format, description, etc.)
tab['MID'][:] = ap.mid
tab['SID'][:] = ap.spc
tab['BID'][:] = ap.bid
tab['Type'][:] = ap.payload
tab['OBJID'][:] = 'None'
tab['OBJID'][obs_ap[i]] = objid[obs_obj[i]]
tab['RA'][obs_ap[i]] = ra[obs_obj[i]]
tab['DEC'][obs_ap[i]] = dec[obs_obj[i]]
if tight:
indx = tab['OBJID'] != 'None'
tab = tab[indx]
print(f'Writing: {ofile}')
with open(ofile, 'w') as f:
for l in header_text.split('\n'):
f.write(f'# {l}\n')
tab.write(f, format='ascii.fixed_width_no_header', delimiter=' ') | 5,358,803 |
def matplotlib_view(gviz: Digraph):
"""
Views the diagram using Matplotlib
Parameters
---------------
gviz
Graphviz
"""
return gview.matplotlib_view(gviz) | 5,358,804 |
def shuf_repeat(lst, count):
""" Xiaolong's code expects LMDBs with the train list shuffled and
repeated, so creating that here to avoid multiple steps. """
final_list = []
ordering = range(len(lst))
for _ in range(count):
np.random.shuffle(ordering)
final_list += [lst[i] for i in ordering]
assert len(final_list) == count * len(lst)
return final_list | 5,358,805 |
def create_query(table_name, schema_dict):
"""
see datatypes documentation here:
https://www.postgresql.org/docs/11/datatype.html
"""
columns = db_schema[table_name]
return (
f"goodbooks_{table_name}",
[f"{column} {value}" for column, value in columns.items()],
) | 5,358,806 |
def new_document(
source_path: str, settings: Any = None
) -> Tuple[nodes.document, JSONReporter]:
"""Return a new empty document object.
Replicates ``docutils.utils.new_document``, but uses JSONReporter,
which is also returned
Parameters
----------
source_path : str
The path to or description of the source text of the document.
settings : optparse.Values
Runtime settings. If none are provided, a default core set will
be used. If you will use the document object with any Docutils
components, you must provide their default settings as well. For
example, if parsing, at least provide the parser settings,
obtainable as follows::
settings = docutils.frontend.OptionParser(
components=(docutils.parsers.rst.Parser,)
).get_default_values()
"""
# TODO cache creation, as in sphinx.util.docutils.new_document, possibly using a
# 'partial' lru_cache, as in https://stackoverflow.com/a/37611009/5033292
if settings is None:
settings = OptionParser().get_default_values()
# TODO can probably remove decode_path, given python 3 only support
source_path = decode_path(source_path)
reporter = JSONReporter(
source_path,
settings.report_level,
settings.halt_level,
stream=settings.warning_stream,
debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler,
)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
return document, reporter | 5,358,807 |
def delete_repleciation(zfssrcfs, repel_uuid):
"""ZFS repleciation action status
accepts: An exsistng ZFS action uuid (id).
returns: the ZFS return status code.
"""
r = requests.delete(
"%s/api/storage/v1/replication/actions/%s"
% (url, repel_uuid), auth=zfsauth, verify=False,
headers=jsonheader,
)
logger.info("Deleting local repleciation for %s(%s)", repel_uuid, zfssrcfs)
return r.status_code | 5,358,808 |
def load_checkpoint(
neox_args, model, optimizer, lr_scheduler, inference=False, iteration=None
):
"""Load a model checkpoint and return the iteration."""
if neox_args.deepspeed:
load_optim_and_scheduler = (
not neox_args.no_load_optim
) # TODO: These should be configured by separate args
if neox_args.finetune:
load_optim_and_scheduler = False
if iteration is not None:
tag = f"global_step{iteration}"
else:
tag = None
checkpoint_name, state_dict = model.load_checkpoint(
neox_args.load,
load_optimizer_states=load_optim_and_scheduler,
load_lr_scheduler_states=load_optim_and_scheduler,
tag=tag,
)
if checkpoint_name is None:
# if an iteration is specified, we want to raise an error here rather than
# continuing silently, since we are trying to load a specific checkpoint
if iteration is not None:
available_checkpoints = sorted(
[
int(i.name.replace("global_step", ""))
for i in Path(neox_args.load).glob("global_step*")
]
)
raise ValueError(
f"Unable to load checkpoint for iteration {iteration}. \nAvailable iterations: {pformat(available_checkpoints)}"
)
if mpu.get_data_parallel_rank() == 0:
print("Unable to load checkpoint.")
return 0 # iteration 0, if not checkpoint loaded
else:
raise ValueError("Must be using deepspeed to use neox")
# Set iteration.
if neox_args.finetune:
iteration = 0
else:
iteration = state_dict.get("iteration") or state_dict.get(
"total_iters"
) # total_iters backward compatible with older checkpoints
if iteration is None:
raise ValueError(
f"Unable to load iteration from checkpoint {checkpoint_name} with keys {state_dict.keys()}, exiting"
)
# Check arguments.
if "args" in state_dict:
checkpoint_args = state_dict["args"]
check_checkpoint_args(neox_args=neox_args, checkpoint_args=checkpoint_args)
print_rank_0(
" > validated currently set args with arguments in the checkpoint ..."
)
else:
print_rank_0(" > could not find arguments in the checkpoint for validation...")
# Check loaded checkpoint with forward pass
if neox_args.checkpoint_validation_with_forward_pass:
if "checkpoint_validation_logits" in state_dict:
check_forward_pass(
neox_args=neox_args,
model=model,
checkpoint_logits=state_dict["checkpoint_validation_logits"],
inference=inference,
)
print_rank_0(" > validated loaded checkpoint with forward pass ...")
else:
if mpu.get_data_parallel_rank() == 0:
print(
" > WARNING: checkpoint_validation_with_forward_pass is configured but no checkpoint validation data available in checkpoint {}".format(
checkpoint_name
)
)
# rng states.
if not neox_args.finetune and not neox_args.no_load_rng:
try:
random.setstate(state_dict["random_rng_state"])
np.random.set_state(state_dict["np_rng_state"])
torch.set_rng_state(state_dict["torch_rng_state"])
torch.cuda.set_rng_state(state_dict["cuda_rng_state"])
mpu.get_cuda_rng_tracker().set_states(state_dict["rng_tracker_states"])
except KeyError:
print_rank_0(
"Unable to load optimizer from checkpoint {}. "
"Specify --no-load-rng or --finetune to prevent "
"attempting to load the optimizer state, "
"exiting ...".format(checkpoint_name)
)
sys.exit()
torch.distributed.barrier()
if mpu.get_data_parallel_rank() == 0:
print(" successfully loaded {}".format(checkpoint_name))
return iteration | 5,358,809 |
def test_move_zeros(source, result):
"""Test that function move_zeros returns a list matching result."""
from move_zeros import move_zeros
assert move_zeros(source) == result | 5,358,810 |
def get_logged_in_session(websession: aiohttp.ClientSession) -> RenaultSession:
"""Get initialised RenaultSession."""
return RenaultSession(
websession=websession,
country=TEST_COUNTRY,
locale_details=TEST_LOCALE_DETAILS,
credential_store=get_logged_in_credential_store(),
) | 5,358,811 |
def __getStationName(name, id):
"""Construct a station name."""
name = name.replace("Meetstation", "")
name = name.strip()
name += " (%s)" % id
return name | 5,358,812 |
def processVideo(event):
"""Function get called on left click event on the button 'calculateVideo'
Opens a browse dialog to select the video file.
Processes the video for detecting faces and making predictions on them by loading the trained models.
Displays the result in the GUI.
:param event: The function is bounded to the button calculateVideo("Calculate Attendence from Video") and activated by left click event.
"""
status.config(text='Processing Video...')
txt.delete(0.0, END)
txt.insert(END, 'Processing video.\n')
path = browse_file()
result = predictFromVideo(path)
status.config(text='Result computed successfully.')
presentlist = list(result)
txt.delete(0.0, END)
txt.insert(END, 'Students present are:' + '\n')
num = 1
for name in presentlist:
txt.insert(END, str(num) + '. ' + name + '\n')
num += 1
if num == 1:
txt.delete(0.0, END)
txt.insert(END, 'No persons found.' + '\n')
calculateVideo.configure(background=orig_color_calculatevideo) | 5,358,813 |
def load_hist(path):
"""
load spatial histogram
"""
# load all hist properties
logpYX = io.loadmat(os.path.join(path, 'logpYX'))['value']
xlab = io.loadmat(os.path.join(path, 'xlab'))['value']
ylab = io.loadmat(os.path.join(path, 'ylab'))['value']
rg_bin = io.loadmat(os.path.join(path, 'rg_bin'))['value']
prior_count = io.loadmat(os.path.join(path, 'prior_count'))['value']
# fix some of the properties, convert to torch tensors
logpYX = torch.tensor(logpYX, dtype=torch.float)
xlab = torch.tensor(xlab[0], dtype=torch.float)
ylab = torch.tensor(ylab[0], dtype=torch.float)
rg_bin = torch.tensor(rg_bin[0], dtype=torch.float)
prior_count = prior_count.item()
# build the SpatialHist instance
H = SpatialHist()
H.set_properties(logpYX, xlab, ylab, rg_bin, prior_count)
return H | 5,358,814 |
def test_default_reporter():
"""By default, `reader` isn't implemented."""
assert TestReporter().reader() is None
# close does nothing by default.
assert TestReporter().close() is None | 5,358,815 |
def post_url(url):
"""Post url argument type
:param str url: the post url
:rtype: str
:returns: the post url
"""
url = url.strip()
if len(url) == 0:
raise ArgumentTypeError("A url is required")
elif len(url) > Url.URL_LENGTH:
raise ArgumentTypeError("The url length is over the maximum allowed")
return url | 5,358,816 |
def create_planner(request):
"""Create a new planner and redirect to new planner page."""
user = request.user
plan = Plan.objects.create(author=user)
plan.save()
return HttpResponseRedirect(reverse('planner:edit_plan', args=[plan.id], )) | 5,358,817 |
def rule_matching_evaluation(df, model, seed_num, rein_num, eval_num, label_map, refer_label, lime_flag=True, scan_flag=False
, content_direction='forward', xcol_name='text', n_cores=20):
"""A integrated rule extraction, refinement and validation process.
On the dataset, sample based methods are used. Seed rules are extracted and unmatched samples in
reinforcement samples are re-fed into extraction procedure. Validation are conducted in loops until
certain condition is meet.
Args:
df: dataframe to be explained.
model: model that can classify instances.
seed_num: sample size for seed rule generation.
rein_num: sample size for reinforcement procedure.
eval_num: sample size for evaluation procedure.
label_map: label text and value mappings.
refer_label: the reference label for lime.
lime_flag: on-off flag for lime based inference rules.
scan_flag: on-off flag for LCS based scan rules.
content_direction: cut out sequences from 'forward' or 'backward'
xcol_name: column name for content to be explained in df.
n_cores: number of cores to utilize.
Returns:
match_result: match result on evaluation test sets.
rules_tobe_validate: final rules generated.
matched_rules: rules hit by evaluation test samples.
"""
# shuffle dataset
df.sample(frac=1, random_state=1)
# generate seed rules
df_for_seed = df[df['target'] == label_map['malicious']].sample(seed_num, random_state=2)
rules_seed = get_rules(df_for_seed, model, label_map, 'malicious', lime_flag=lime_flag, scan_flag=scan_flag, content_direction=content_direction, n_cores=n_cores)
print(rules_seed)
# reinforce rules
max_iter_times = 2
df_split = np.array_split(df, max_iter_times)
rules_tobe_validate = rules_seed
for i in range(0, max_iter_times):
print('--------------------------------------------------------------------------------------------------------')
print('--------------------------------------------------------------------------------------------------------')
print('--------------------------------------------------------------------------------------------------------')
print('Reinforce iteration loop %d'% (i+1))
print('Seed rules number: %d' % rules_tobe_validate.shape[0])
df_for_reinforce = df_split[i].sample(rein_num, random_state=3)
match_result, rules_tobe_validate = rule_validation(df_for_reinforce, rules_tobe_validate, n_cores=n_cores)
# # make duplicate removal for each validation
# rules_tobe_validate = rule_deduplicate(rules_tobe_validate)
metrics = get_metrics(match_result)
print(metrics)
if float(metrics['acc']) > 0.98:
print("Validation finished, metrics is fine.")
break
else:
# Reinforcement the unrecognizable malicious flows according to validation results
df_rein = match_result.loc[(match_result.match == 0) & (match_result.target == label_map['malicious'])][['text', 'target']]
df_rein['text'] = df_rein['text'].astype(str)
result_rein = get_rules(df_rein, model, label_map, 'malicious', lime_flag=lime_flag, scan_flag=scan_flag, content_direction=content_direction, n_cores=n_cores)
result_final = pd.concat([rules_tobe_validate, result_rein])
# index start from 1
result_final.index = np.arange(1, len(result_final)+1)
rules_tobe_validate = result_final
print('New rein rules number: %d' % result_rein.shape[0])
print('--------------------------------------------------------------------------------------------------------')
print('--------------------------------------------------------------------------------------------------------')
print('--------------------------------------------------------------------------------------------------------')
df_for_final_eval = df.sample(seed_num, random_state=4)
match_result, rules_tobe_validate = rule_validation(df_for_final_eval, rules_tobe_validate, final_flag=True, n_cores=n_cores)
if rules_tobe_validate.shape[0] == 0:
print("Rule extraction failed!!!!!")
return 0, 0, 0
else:
print('The final results are:')
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
matched_rules = get_final_rules(match_result, rules_tobe_validate)
metrics = get_metrics(match_result)
print(metrics)
print(matched_rules)
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
print("Final validation finished")
return match_result, rules_tobe_validate, matched_rules | 5,358,818 |
def login():
""" Typical login page """
# if current user is already logged in, then don't log in again
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid username or password')
return redirect(url_for('login'))
# user exists and password is correct
login_user(user, remember=form.remember_me.data)
# if user came from a local page, then return them to that
# page after authentication ... else go to /index
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('index')
return redirect(next_page)
# GET just renders the empty login screen
return render_template('login.html', title='Sign In', form=form) | 5,358,819 |
def create_timeperiod_map(
start: spec.Timestamp = None,
end: spec.Timestamp = None,
length: spec.Timelength = None,
) -> spec.TimeperiodMap:
"""create Timeperiod with representation TimeperiodMap
## Inputs
- start: Timestamp
- end: Timestamp
- length: Timelength
## Returns
- TimeperiodMap
"""
start, end = compute_start_end(start=start, end=end, length=length)
return {'start': start, 'end': end} | 5,358,820 |
async def patched_send_async(self, *args, **kwargs):
"""Patched send function that push to queue idx of server to which request is routed."""
buf = args[0]
if buf and len(buf) >= 6:
op_code = int.from_bytes(buf[4:6], byteorder=PROTOCOL_BYTE_ORDER)
# Filter only caches operation.
if 1000 <= op_code < 1100:
requests.append(self.port % 100)
return await old_send_async(self, *args, **kwargs) | 5,358,821 |
def add_event_records(df, event_type, event_date):
"""Add event records for the event type."""
log(f'Adding {DATASET_ID} event records for {event_type}')
this_year = datetime.now().year
df = df.loc[df[event_date].notnull(), :].copy()
df['event_id'] = db.create_ids(df, 'events')
df['dataset_id'] = DATASET_ID
df['year'] = df[event_date].dt.strftime('%Y').astype(int)
df['year'] = df['year'].apply(lambda x: x - 100 if x > this_year else x)
df['day'] = df[event_date].dt.strftime('%j').astype(int)
df['event_type'] = event_type
df['event_json'] = util.json_object(df, EVENT_FIELDS)
df.loc[:, db.EVENT_FIELDS].to_sql(
'events', db.connect(), if_exists='append', index=False)
return df | 5,358,822 |
def _renderPath(path,drawFuncs,countOnly=False,forceClose=False):
"""Helper function for renderers."""
# this could be a method of Path...
points = path.points
i = 0
hadClosePath = 0
hadMoveTo = 0
active = not countOnly
for op in path.operators:
if op == _MOVETO:
if forceClose:
if hadMoveTo and pop!=_CLOSEPATH:
hadClosePath += 1
if active:
drawFuncs[_CLOSEPATH]()
hadMoveTo += 1
nArgs = _PATH_OP_ARG_COUNT[op]
j = i + nArgs
drawFuncs[op](*points[i:j])
i = j
if op == _CLOSEPATH:
hadClosePath += 1
pop = op
if forceClose and hadMoveTo and pop!=_CLOSEPATH:
hadClosePath += 1
if active:
drawFuncs[_CLOSEPATH]()
return hadMoveTo == hadClosePath | 5,358,823 |
def save_depth_image(filename, depth, filetype=None):
"""
Saves a depth image (e.g. generated by render module) to an image file.
We support several file types:
'tum', saving as '.png', depth values are multiplied with 5000 and stored as uint16 (see
https://vision.in.tum.de/data/datasets/rgbd-dataset/file_formats).
'exr', saving as '.exr', depth values are stored as float in a single-channel EXR image.
'exr3', saving as '.exr', depth values are stored as float in a three-channel (RGB) EXR image -- this option adds
overhead and should not be used unless required by dependencies postprocessing the images.
Note that EXR options are only available if `pyexr` can be loaded, which requires OpenEXR to be installed on the
system. See https://stackoverflow.com/a/68102521/1264582 for further information.
:param filename: Name of the file. Need not contain the ending, unless no filetype specified, in which case we try
to guess the filetype based on the ending.
:param depth: (h, w) ndarray with float values in meter.
:param filetype: string, optional, one of 'tum', 'exr', 'exr3'.
"""
# guess filetype from filename if not provided
if filetype is None:
if filename.endswith('.exr'):
filetype = 'exr'
elif filename.endswith('.png'):
filetype = 'tum'
else:
raise ValueError(f'Could not guess filetype from filename {filename}.')
# check if we support the requested filetype
supported_file_types = ['tum']
if pyexr is not None:
supported_file_types.extend(['exr', 'exr3'])
if filetype not in supported_file_types:
raise ValueError(f'given filetype {filetype} not supported. mut be one of {supported_file_types}.')
if filetype == 'tum':
if not filename.endswith('.png'):
filename += '.png'
# use tum file format (which is actually a scaled 16bit png)
# https://vision.in.tum.de/data/datasets/rgbd-dataset/file_formats
imageio.imwrite(filename, (depth * 5000).astype(np.uint16))
elif filetype == 'exr':
if not filename.endswith('.exr'):
filename += '.exr'
pyexr.write(filename, depth, channel_names=['Z'], precision=pyexr.FLOAT)
elif filetype == 'exr3':
if not filename.endswith('.exr'):
filename += '.exr'
# store images to file (extend to three channels and store in exr)
# this is for compatibility with GPNet dataset, although it bloats the file size
img = np.repeat(depth, 3).reshape(depth.shape[0], depth.shape[1], 3)
pyexr.write(filename, img, channel_names=['R', 'G', 'B'], precision=pyexr.FLOAT) | 5,358,824 |
def _search_focus(s, code=None):
""" Search for a particular module / presentation.
The search should return only a single item. """
if not code:
code = input("Module code (e.g. TM129-17J): ")
results = _search_by_code(s, code)
if not len(results):
print('Nothing found for "{}"'.format(code))
elif len(results) > 1:
print(
"Please be more specific:\n\t{}\n".format(
"\n\t".join([r[0].split(" ")[0] for r in results])
)
)
else:
return results[0]
return (None, None) | 5,358,825 |
def visualize(image, pred, label=None):
"""make visualization"""
n_plot = 2 if label is None else 3
fig = plt.figure()
ax = fig.add_subplot(1, n_plot, 1)
imgplot = plt.imshow(image)
ax.set_title('Image')
ax = fig.add_subplot(1, n_plot, 2)
imgplot = plt.imshow(pred)
ax.set_title('Prediction')
if n_plot > 2:
ax = fig.add_subplot(1, n_plot, 3)
imgplot = plt.imshow(label)
ax.set_title('Ground Truth')
fig.tight_layout()
plt.savefig(f'visualization/{args.model[:-3]}_validation.png')
plt.show() | 5,358,826 |
def parse_line(sample):
"""Parse an ndjson line and return ink (as np array) and classname."""
class_name = sample["word"]
inkarray = sample["drawing"]
stroke_lengths = [len(stroke[0]) for stroke in inkarray]
total_points = sum(stroke_lengths)
np_ink = np.zeros((total_points, 3), dtype=np.float32)
current_t = 0
for stroke in inkarray:
for i in [0, 1]:
np_ink[current_t:(current_t + len(stroke[0])), i] = stroke[i]
current_t += len(stroke[0])
np_ink[current_t - 1, 2] = 1 # stroke_end
# Preprocessing.
# 1. Size normalization.
lower = np.min(np_ink[:, 0:2], axis=0)
upper = np.max(np_ink[:, 0:2], axis=0)
scale = upper - lower
scale[scale == 0] = 1
np_ink[:, 0:2] = (np_ink[:, 0:2] - lower) / scale
# 2. Compute deltas.
np_ink = np_ink[1:, 0:2] - np_ink[0:-1, 0:2]
return np_ink, class_name | 5,358,827 |
def test_missing(metafunc, fixtures: Dict[str, Any]) -> None:
"""Test function not found among fixtures, don't parametrize."""
generate_tests(metafunc, fixtures) # act
metafunc.parametrize.assert_not_called() | 5,358,828 |
def evalRPN(self, tokens):
# ! 求解逆波兰式,主要利用栈
"""
:type tokens: List[str]
:rtype: int
"""
stack = []
for item in tokens:
# print(stack)
if item.isdigit():
stack.append(int(item))
if item[0] == '-' and len(item) > 1 and item[1:].isdigit():
stack.append(int(item))
if item == '*':
num1 = stack.pop()
num2 = stack.pop()
stack.append(num1 * num2)
if item == '/':
num1 = stack.pop()
num2 = stack.pop()
stack.append(int(num2 / num1))
if item == '+':
num1 = stack.pop()
num2 = stack.pop()
stack.append(num1 + num2)
if item == '-':
num1 = stack.pop()
num2 = stack.pop()
stack.append(num2 - num1)
return stack[0] | 5,358,829 |
def _validate_flags():
"""Returns True if flag values are valid or prints error and returns False."""
if FLAGS.list_ports:
print("Input ports: '%s'" % (
"', '".join(midi_hub.get_available_input_ports())))
print("Ouput ports: '%s'" % (
"', '".join(midi_hub.get_available_output_ports())))
return False
if FLAGS.bundle_files is None:
print('--bundle_files must be specified.')
return False
if (len(FLAGS.bundle_files.split(',')) > 1 and
FLAGS.generator_select_control_number is None):
tf.logging.warning(
'You have specified multiple bundle files (generators), without '
'setting `--generator_select_control_number`. You will only be able to '
'use the first generator (%s).',
FLAGS.bundle_files[0])
return True | 5,358,830 |
def proto_test(test):
"""
If test is a ProtoTest, I just return it. Otherwise I create a ProtoTest
out of test and return it.
"""
if isinstance(test, ProtoTest):
return test
else:
return ProtoTest(test) | 5,358,831 |
def get_frames():
"""Get frames for an episode
Params:
episode: int
The episode for which the frames shall be returned
Returns:
frames: dict
The frames for an episode per timestep
"""
episode = int(request.args.get('user'))
frames = data_preprocessor.get_frames_for_episode(episode)
return frames, 200, JSON_TYPE | 5,358,832 |
async def _(message: Message):
"""
Reacts to all messages containing
"hi" or "hello" and ignores text case
"""
await message.answer("Hi!") | 5,358,833 |
def remove(s1,s2):
"""
Returns a copy of s, with all characters in s2 removed.
Examples:
remove('abc','ab') returns 'c'
remove('abc','xy') returns 'abc'
remove('hello world','ol') returns 'he wrd'
Parameter s1: the string to copy
Precondition: s1 is a string
Parameter s2: the characters to remove
Precondition: s2 is a string
"""
assert isinstance(s1) == str
assert isinstance(s2) == str
result = ''
for x in s1:
if not x in s2:
result = result + x
return result | 5,358,834 |
def get_firebase_db_url():
"""Grabs the databaseURL from the Firebase config snippet. Regex looks
scary, but all it is doing is pulling the 'databaseURL' field from the
Firebase javascript snippet"""
regex = re.compile(r'\bdatabaseURL\b.*?["\']([^"\']+)')
cwd = os.path.dirname(__file__)
try:
with open(os.path.join(cwd, 'templates', config.FIREBASE_CONFIG)) as f:
url = next(regex.search(line) for line in f if regex.search(line))
except StopIteration:
raise ValueError(
'Error parsing databaseURL. Please copy Firebase web snippet '
'into templates/{}'.format(config.FIREBASE_CONFIG))
return url.group(1) | 5,358,835 |
def get_issuer_plan_ids(issuer):
"""Given an issuer id, return all of the plan ids registered to that issuer."""
df = pd.read_csv(PATH_TO_PLANS)
df = df[df.IssuerId.astype(str) == issuer]
return set(df.StandardComponentId.unique()) | 5,358,836 |
def add_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
# Training settings
parser.add_argument('--model', type=str, default='mobilenet', metavar='N',
help='neural network used in training')
parser.add_argument('--dataset', type=str, default='cifar10', metavar='N',
help='dataset used for training')
parser.add_argument('--data_dir', type=str, default='./../../../data/cifar10',
help='data directory')
parser.add_argument('--partition_method', type=str, default='hetero', metavar='N',
help='how to partition the dataset on local workers')
parser.add_argument('--partition_alpha', type=float, default=0.5, metavar='PA',
help='partition alpha (default: 0.5)')
parser.add_argument('--defense_type', type=str, default='weak_dp', metavar='N',
help='the robust aggregation method to use on the server side')
parser.add_argument('--norm_bound', type=str, default=30.0, metavar='N',
help='the norm bound of the weight difference in norm clipping defense.')
parser.add_argument('--stddev', type=str, default=0.025, metavar='N',
help='the standard deviation of the Gaussian noise added in weak DP defense.')
parser.add_argument('--client_num_in_total', type=int, default=1000, metavar='NN',
help='number of workers in a distributed cluster')
parser.add_argument('--client_num_per_round', type=int, default=4, metavar='NN',
help='number of workers')
#parser.add_argument('--attack_method', type=str, default="blackbox",
# help='describe the attack type: blackbox|pgd|graybox|no-attack|')
parser.add_argument('--poison_type', type=str, default='southwest',
help='specify source of data poisoning: |ardis|(for EMNIST), |southwest|howto|(for CIFAR-10)')
# TODO(hwang): we will add PGD attack soon, stay tuned!
#parser.add_argument('--adv_lr', type=float, default=0.02,
# help='learning rate for adv in PGD setting')
parser.add_argument('--attack_freq', type=int, default=10,
help='a single adversary per X federated learning rounds e.g. 10 means there will be an attacker in each 10 FL rounds.')
parser.add_argument('--batch_size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--client_optimizer', type=str, default='adam',
help='SGD with momentum; adam')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--wd', help='weight decay parameter;', type=float, default=0.001)
parser.add_argument('--epochs', type=int, default=5, metavar='EP',
help='how many epochs will be trained locally')
parser.add_argument('--comm_round', type=int, default=10,
help='how many round of communications we shoud use')
parser.add_argument('--is_mobile', type=int, default=0,
help='whether the program is running on the FedML-Mobile server side')
parser.add_argument('--frequency_of_the_test', type=int, default=1,
help='the frequency of the algorithms')
parser.add_argument('--gpu_server_num', type=int, default=1,
help='gpu_server_num')
parser.add_argument('--gpu_num_per_server', type=int, default=4,
help='gpu_num_per_server')
args = parser.parse_args()
return args | 5,358,837 |
def parse_coords(lines):
"""Parse skbio's ordination results file into coords, labels, eigvals,
pct_explained.
Returns:
- list of sample labels in order
- array of coords (rows = samples, cols = axes in descending order)
- list of eigenvalues
- list of percent variance explained
For the file format check
skbio.math.stats.ordination.OrdinationResults.from_file
Strategy: read the file using skbio's parser and return the objects
we want
"""
try:
pcoa_results = OrdinationResults.from_file(lines)
return (pcoa_results.site_ids, pcoa_results.site, pcoa_results.eigvals,
pcoa_results.proportion_explained)
except FileFormatError:
if type(lines) == file:
lines.seek(0)
return qiime_parse_coords(lines) | 5,358,838 |
def run_ann(model, train, test, params_save_path, iteration, optimizer, loss, callbacks=None, valid=None,
shuffle_training=True,
batch_size=16,
num_epochs=30):
"""
Run analog network with cross-validation
:param batch_size: batch size during training
:param model: reference to the tensorflow model
:param train: pair of training data (x_train, y_train)
:param valid: pair of validation data (x_val, y_val)
:param test: pair of testing data (x_test, y_test)
:param params_save_path: output path to save weights of the network
:param iteration: number of the iteration in CV
:param shuffle_training: shuffle samples
:param num_epochs: number of epochs to train for
:return: accuracy, precision, recall, f1 and confusion matrix from the testing data
"""
x_train, y_train = train[0], train[1]
x_test, y_test = test[0], test[1]
if valid is not None:
x_valid, y_valid = valid[0], valid[1]
converter = nengo_dl.Converter(model)
with nengo_dl.Simulator(converter.net, minibatch_size=batch_size) as simulator:
simulator.compile(optimizer=optimizer,
loss=loss,
metrics=['accuracy'])
input_layer = converter.inputs[model.get_layer('input_layer')] # get the input layer reference
output_layer = converter.outputs[model.get_layer('output_layer')] # get the output layer reference
# fit the model with the training data
simulator.fit(
x={input_layer: x_train}, y={output_layer: y_train},
validation_data=(
{input_layer: x_valid}, {output_layer: y_valid}
) if valid is not None else None,
epochs=num_epochs,
shuffle=shuffle_training,
callbacks=callbacks
# early stop to avoid overfitting
)
simulator.save_params(params_save_path) # save weights to the file
# Get the statistics
accuracy, precision, recall, f1, confusion_matrix = get_metrics(simulator, output_layer, x_test, y_test,
batch_size,
f'{iteration}. CNN')
return {
'accuracy': accuracy,
'precision': precision,
'recall': recall,
'f1': f1,
'confusion_matrix': confusion_matrix
} | 5,358,839 |
def get_sector(session, sector_name=None, sector_id=None):
""" Get a sector by it's name or id. """
return get_by_name_or_id(session, Sector, model_id=sector_id, name=sector_name) | 5,358,840 |
def is_valid_pre_6_2_version(xml):
"""Returns whether the given XML object corresponds to an XML output file of Quantum ESPRESSO pw.x pre v6.2
:param xml: a parsed XML output file
:return: boolean, True when the XML was produced by Quantum ESPRESSO with the old XML format
"""
element_header = xml.find('HEADER')
if element_header is None:
return False
element_format = element_header.find('FORMAT')
if element_format is None:
return False
try:
name = element_format.attrib['NAME']
except KeyError:
return False
if name != 'QEXML':
return False
return True | 5,358,841 |
def set_log_level(loglevel_str):
""" change log level for all handlers """
if loglevel_str == ps.LOG_DEBUG:
level = logging.DEBUG
elif loglevel_str == ps.LOG_INFO:
level = logging.INFO
elif loglevel_str == ps.LOG_ERROR:
level = logging.ERROR
elif type(loglevel_str) not in (str,):
level = loglevel_str
else:
level = LOG_LEVEL
for handler in logger.handlers:
handler.setLevel(level)
logger.setLevel(level) | 5,358,842 |
def exportdf (df =None, refout:str =None, to:str =None, savepath:str =None,
modname:str ='_wexported_', reset_index:bool =True):
"""
Export dataframe ``df`` to `refout` files. `refout` file can
be Excell sheet file or '.json' file. To get more details about
the `writef` decorator , see :doc:`watex.utils.decorator.writef`.
:param refout:
Output filename. If not given will be created refering to the
exported date.
:param to: Export type; Can be `.xlsx` , `.csv`, `.json` and else.
:param savepath:
Path to save the `refout` filename. If not given
will be created.
:param modname: Folder to hold the `refout` file. Change it accordingly.
:returns:
- `df_`: new dataframe to be exported.
"""
if df is None :
warnings.warn(
'Once ``df`` arguments in decorator :`class:~decorator.writef`'
' is selected. The main type of file ready to be written MUST be '
'a pd.DataFrame format. If not an error raises. Please refer to '
':doc:`~.utils.decorator.writef` for more details.')
raise Wex.WATexError_file_handling(
'No dataframe detected. Please provided your dataFrame.')
df_ =df.copy(deep=True)
if reset_index is True :
df_.reset_index(inplace =True)
if savepath is None :
savepath = savePath(modname)
return df_, to, refout, savepath, reset_index | 5,358,843 |
def invalidate_enrollment_mode_cache(sender, instance, **kwargs): # pylint: disable=unused-argument
"""
Invalidate the cache of CourseEnrollment model.
"""
cache_key = CourseEnrollment.cache_key_name(
instance.user.id,
str(instance.course_id)
)
cache.delete(cache_key) | 5,358,844 |
def clean_bpoa_seniority_list(csv):
"""Clean a digitized BPOA seniority list."""
dirty = pd.read_csv(csv)
clean = pd.DataFrame()
clean["job_title"] = dirty["Rank"]
clean["last_name"] = dirty["Last name"]
clean["first_name"] = dirty["First Name"]
clean = clean.apply(correct_name, axis=1)
clean["star_no"] = dirty["Badge No."]
clean["employment_date"] = dirty["Hire Date"].apply(pd.to_datetime)
return clean | 5,358,845 |
def create_profiling_group(agentOrchestrationConfig=None, clientToken=None, profilingGroupName=None):
"""
Creates a profiling group.
See also: AWS API Documentation
Exceptions
:example: response = client.create_profiling_group(
agentOrchestrationConfig={
'profilingEnabled': True|False
},
clientToken='string',
profilingGroupName='string'
)
:type agentOrchestrationConfig: dict
:param agentOrchestrationConfig: The agent orchestration configuration.\n\nprofilingEnabled (boolean) -- [REQUIRED]\n\n
:type clientToken: string
:param clientToken: [REQUIRED]\nUnique, case-sensitive identifier that you provide to ensure the idempotency of the request.\nThis parameter specifies a unique identifier for the new profiling group that helps ensure idempotency.\nThis field is autopopulated if not provided.\n
:type profilingGroupName: string
:param profilingGroupName: [REQUIRED]\nThe name of the profiling group.\n
:rtype: dict
ReturnsResponse Syntax
{
'profilingGroup': {
'agentOrchestrationConfig': {
'profilingEnabled': True|False
},
'arn': 'string',
'createdAt': datetime(2015, 1, 1),
'name': 'string',
'profilingStatus': {
'latestAgentOrchestratedAt': datetime(2015, 1, 1),
'latestAgentProfileReportedAt': datetime(2015, 1, 1),
'latestAggregatedProfile': {
'period': 'P1D'|'PT1H'|'PT5M',
'start': datetime(2015, 1, 1)
}
},
'updatedAt': datetime(2015, 1, 1)
}
}
Response Structure
(dict) --
The structure representing the createProfilingGroupResponse.
profilingGroup (dict) --
Information about the new profiling group
agentOrchestrationConfig (dict) --
profilingEnabled (boolean) --
arn (string) --
The Amazon Resource Name (ARN) identifying the profiling group.
createdAt (datetime) --
The time, in milliseconds since the epoch, when the profiling group was created.
name (string) --
The name of the profiling group.
profilingStatus (dict) --
The status of the profiling group.
latestAgentOrchestratedAt (datetime) --
The time, in milliseconds since the epoch, when the latest agent was orchestrated.
latestAgentProfileReportedAt (datetime) --
The time, in milliseconds since the epoch, when the latest agent was reported..
latestAggregatedProfile (dict) --
The latest aggregated profile
period (string) --
The time period.
start (datetime) --
The start time.
updatedAt (datetime) --
The time, in milliseconds since the epoch, when the profiling group was last updated.
Exceptions
CodeGuruProfiler.Client.exceptions.ServiceQuotaExceededException
CodeGuruProfiler.Client.exceptions.InternalServerException
CodeGuruProfiler.Client.exceptions.ConflictException
CodeGuruProfiler.Client.exceptions.ValidationException
CodeGuruProfiler.Client.exceptions.ThrottlingException
:return: {
'profilingGroup': {
'agentOrchestrationConfig': {
'profilingEnabled': True|False
},
'arn': 'string',
'createdAt': datetime(2015, 1, 1),
'name': 'string',
'profilingStatus': {
'latestAgentOrchestratedAt': datetime(2015, 1, 1),
'latestAgentProfileReportedAt': datetime(2015, 1, 1),
'latestAggregatedProfile': {
'period': 'P1D'|'PT1H'|'PT5M',
'start': datetime(2015, 1, 1)
}
},
'updatedAt': datetime(2015, 1, 1)
}
}
:returns:
profilingEnabled (boolean) --
"""
pass | 5,358,846 |
def topk(table, metric, dimensions, is_asc, k, **kwargs):
""" This function returns both the results according to the intent
as well as the debiasing suggestions.
Some of the oversights considered in this intent are-
1. Regression to the mean
2. Looking at tails to find causes - TODO
Args:
table: Type-pandas.dataframe
It has the contents of the csv file
metric: Type-string
It is the name of the column according to which we sort,
and in the case when grouping has to be done,
summary operator is applied on metric. Metric could a column
containing strings, if we are applying count operator on it.
dimensions: Type-list of str
It is the name of column we want.
In query:'top 5 batsman according to runs', dimension is 'batsman'.
When summary_operator is not None, we group by dimensions.
is_asc: Type-Bool
Denotes the sort order, True for ascending, False for Descending
k: Type-int
It is the number of entries to be taken
date_range: Type-tuple
Tuple of start_date and end_date
date_column_name: Type-str
It is the name of column which contains date
date_format: Type-str
It is required by datetime.strp_time to parse the date in the format
Format Codes
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
slices: Type-List of tuples
Tuple represents the conditon to keep the row.
(column_name, filter, value)
column_name - is the value of the column that the
condition is applied upon.
filter - Filters enum members, ex. Filters.IN
summary_operator: Type-summary_operators enum members
It denotes the summary operator, after grouping by dimensions.
ex. SummaryOperators.MAX, SummaryOperators.SUM
Note-summary_operator is always applied on metric column passed,
and only when grouping is done
Returns:
The function will return both suggestions and the results in a tuple.
(results, suggestions)
results: Type - pandas dataframe, The results of the intended top-k
suggestions: Type - List of strings, List of suggestions.
"""
date_column_name = kwargs.get('date_column_name', 'date')
date_range = kwargs.get('date_range', None)
date_format = kwargs.get('date_format', '%Y-%m-%d')
slices = kwargs.get('slices', None)
summary_operator = kwargs.get('summary_operator', None)
result_table = topk_results(table, metric, dimensions, is_asc, k,
date_column_name=date_column_name,
date_range=date_range, date_format=date_format,
slices=slices,
summary_operator=summary_operator)
suggestions = []
duplicates_in_topk_suggestion = duplicates_in_topk(result_table, dimensions)
if duplicates_in_topk_suggestion is not None:
suggestions.append(duplicates_in_topk_suggestion)
else:
# Check for RMT suggestion only when no duplicates present.
rmt_suggestion = regression_to_mean(table, metric, dimensions, is_asc, k,
date_column_name=date_column_name,
date_range=date_range,
date_format=date_format, slices=slices,
summary_operator=summary_operator)
if rmt_suggestion is not None:
suggestions.append(rmt_suggestion)
results_without_k_condition = topk_results(table, metric, dimensions, is_asc, -1,
date_column_name=date_column_name,
date_range=date_range, date_format=date_format,
slices=slices,
summary_operator=summary_operator)
more_than_just_topk_suggestion = more_than_just_topk(results_without_k_condition, k, metric)
if more_than_just_topk_suggestion is not None:
suggestions.append(more_than_just_topk_suggestion)
looking_at_tails_suggestion = looking_at_tails(results_without_k_condition, k, metric)
if looking_at_tails_suggestion is not None:
suggestions.append(looking_at_tails_suggestion)
return (result_table, suggestions) | 5,358,847 |
def addMovieElement(findings, data):
""" Helper Function which handles unavailable information for each movie"""
if len(findings) != 0:
data.append(findings[0])
else:
data.append("")
return data | 5,358,848 |
def get_bst_using_min_and_max_value(preorder: list) -> Node:
"""
time complexity: O(n)
space complexity: O(n)
"""
def construct_tree(min_: int, max_: int) -> Optional[Node]:
nonlocal pre_index
nonlocal l
if pre_index >= l:
return None
value = preorder[pre_index]
if min_ < value < max_:
node = Node(value)
pre_index += 1
node.left = construct_tree(min_, value)
node.right = construct_tree(value, max_)
return node
return None
pre_index: int = 0
l: int = len(preorder)
return construct_tree(-1_000_000, 1_000_000) | 5,358,849 |
def qwtStepSize(intervalSize, maxSteps, base):
"""this version often doesn't find the best ticks: f.e for 15: 5, 10"""
minStep = divideInterval(intervalSize, maxSteps, base)
if minStep != 0.0:
# # ticks per interval
numTicks = math.ceil(abs(intervalSize / minStep)) - 1
# Do the minor steps fit into the interval?
if (
qwtFuzzyCompare(
(numTicks + 1) * abs(minStep), abs(intervalSize), intervalSize
)
> 0
):
# The minor steps doesn't fit into the interval
return 0.5 * intervalSize
return minStep | 5,358,850 |
def get_trimmed_glyph_name(gname, num):
"""
Glyph names cannot have more than 31 characters.
See https://docs.microsoft.com/en-us/typography/opentype/spec/...
recom#39post39-table
Trims an input string and appends a number to it.
"""
suffix = '_{}'.format(num)
return gname[:31 - len(suffix)] + suffix | 5,358,851 |
def vmatrix(ddir, file_prefix):
""" generate vmatrix DataFile
"""
name = autofile.name.vmatrix(file_prefix)
writer_ = autofile.write.vmatrix
reader_ = autofile.read.vmatrix
return factory.DataFile(ddir=ddir, name=name,
writer_=writer_, reader_=reader_) | 5,358,852 |
def fetch_precision_overlay(precision):
"""
Returns the overlay for the given precision value as cv2 image.
"""
overlay_folder = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../assets/precision_overlays'
)
img_path = os.path.join(
overlay_folder, f'overlay_{str(int(precision*100)).zfill(3)}.png'
)
assert os.path.isfile(img_path), f'overlay does not exist at {img_path}'
return cv2.imread(img_path) | 5,358,853 |
def write_dataframe_to_file(df, outpath, **kwargs):
"""
Write a pandas database as a csv to provided file path
Input:
df: pandas dataframe
outpath: path to write csv
Ouput: None
Raises:
FileNotFoundError: if not a valid path
"""
try:
df.to_csv(outpath, **kwargs)
except FileNotFoundError:
LOG.error("Cannot write to: {}".format(outpath))
raise | 5,358,854 |
def get_argument_sources(
source: Source,
node: ast.Call,
func: Callable,
vars_only: bool,
pos_only: bool
) -> MutableMapping[str, Union[ast.AST, str]]:
"""Get the sources for argument from an ast.Call node
>>> def func(a, b, c, d=4):
>>> ...
>>> x = y = z = 1
>>> func(y, x, c=z)
>>> # argument_sources = {'a': 'y', 'b', 'x', 'c': 'z'}
>>> func(y, x, c=1)
>>> # argument_sources = {'a': 'y', 'b', 'x', 'c': ast.Num(n=1)}
"""
# <Signature (a, b, c, d=4)>
signature = inspect.signature(func, follow_wrapped=False)
# func(y, x, c=z)
# ['y', 'x'], {'c': 'z'}
arg_sources = [argnode_source(source, argnode, vars_only)
for argnode in node.args]
kwarg_sources = {
argnode.arg: argnode_source(source,
argnode.value,
vars_only)
for argnode in node.keywords
} if not pos_only else {}
bound_args = signature.bind_partial(*arg_sources, **kwarg_sources)
argument_sources = bound_args.arguments
# see if *args and **kwargs have anything assigned
# if not, assign () and {} to them
for parameter in signature.parameters.values():
if parameter.kind == inspect.Parameter.VAR_POSITIONAL:
argument_sources.setdefault(parameter.name, ())
if parameter.kind == inspect.Parameter.VAR_KEYWORD:
argument_sources.setdefault(parameter.name, {})
return argument_sources | 5,358,855 |
def cli(src, dest, dry_run):
"""Move all empty videos to a folder specified by the user.
Args:
src: Path that must already exist with the videos to process
dest: Path, where to dump the files
dry_run: boolean
"""
os.makedirs(dest, exist_ok=True)
for src_file in os.listdir(src):
full_src_file = os.path.abspath(os.path.join(src, src_file))
if not src_file.endswith(".mjpg"):
click.echo(f"Found a non video file named: {src_file}")
continue
click.echo(f"Processing file {src_file} ...")
is_empty = video_process_content(full_src_file)
if is_empty:
click.echo(f"Moving {full_src_file} to {dest}{src_file}")
if not dry_run:
shutil.move(full_src_file, os.path.join(dest, src_file)) | 5,358,856 |
def test_config_valid():
"""
Test invalid values for all relevant fields individually.
This tests values, that are supposed to fail.
This does not test if no valid configuration is marked
as invalid!
"""
with pytest.raises(Exception):
config_valid(dummy_config)
valid = dummy_config.copy()
valid["prod_type"] = "PathConfigProd5Trans80"
valid["workflow_kind"] = "lstchain"
valid["stages_to_run"] = ["r0_to_dl1"]
valid["stages"] = {"r0_to_dl1": [{"input": None, "output": None}]}
assert config_valid(valid)
invalid_workflow = valid.copy()
invalid_workflow["workflow_kind"] = "MARS"
with pytest.raises(Exception):
config_valid(invalid_workflow)
missing_reference = valid.copy()
missing_reference["stages_to_run"] = ["dl1ab"]
# dl1ab stage is not described
with pytest.raises(KeyError):
config_valid(missing_reference)
missing_reference["stages"] = {"dl1ab": []}
missing_reference["dl1_noise_tune_data_run"] = "file"
with pytest.raises(KeyError):
config_valid(missing_reference)
missing_reference["dl1_noise_tune_mc_run"] = "file"
config_valid(missing_reference) | 5,358,857 |
def set_icons(icons_light, icons_dark, light=False):
""" Set icon theme with plasma-changeicons for light and dark schemes
Args:
icons_light (str): Light mode icon theme
icons_dark (str): Dark mode icon theme
light (bool): wether using light or dark mode
"""
if light and icons_light != None:
icons=icons_light
elif not light and icons_dark != None:
icons=icons_dark
else:
icons=None
if icons!=None:
changeicons_error=subprocess.check_output("/usr/lib/plasma-changeicons "+icons,
shell=True,stderr=subprocess.STDOUT,universal_newlines=True).strip()
logging.info(f'{icons} {changeicons_error}') | 5,358,858 |
def _register_dataset(service, dataset, compression):
"""Registers a dataset with the tf.data service.
This transformation is similar to `register_dataset`, but supports additional
parameters which we do not yet want to add to the public Python API.
Args:
service: A string or a tuple indicating how to connect to the tf.data
service. If it's a string, it should be in the format
`[<protocol>://]<address>`, where `<address>` identifies the dispatcher
address and `<protocol>` can optionally be used to override the default
protocol to use. If it's a tuple, it should be (protocol, address).
dataset: A `tf.data.Dataset` to register with the tf.data service.
compression: How to compress the dataset's elements before transferring them
over the network. "AUTO" leaves the decision of how to compress up to the
tf.data service runtime. `None` indicates not to compress.
Returns:
A scalar int64 tensor of the registered dataset's id.
"""
_validate_compression(compression)
if isinstance(service, tuple):
protocol, address = service
else:
protocol, address = _parse_service(service)
external_state_policy = dataset.options().experimental_external_state_policy
if external_state_policy is None:
external_state_policy = ExternalStatePolicy.WARN
encoded_spec = ""
if context.executing_eagerly():
coder = nested_structure_coder.StructureCoder()
encoded_spec = coder.encode_structure(
dataset.element_spec).SerializeToString()
if compression == COMPRESSION_AUTO:
dataset = dataset.map(
lambda *x: compression_ops.compress(x),
num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = dataset.prefetch(dataset_ops.AUTOTUNE)
dataset = dataset._apply_debug_options() # pylint: disable=protected-access
dataset_id = gen_experimental_dataset_ops.register_dataset(
dataset._variant_tensor, # pylint: disable=protected-access
address=address,
protocol=protocol,
external_state_policy=external_state_policy.value,
element_spec=encoded_spec)
return dataset_id | 5,358,859 |
def rivers_by_station_number(stations,N):
"""function that uses stations_by_rivers to return a dictionary that it then
itterates each river for, summing the number of stations on the river into tuples"""
stationsOfRivers = stations_by_rivers(stations)
listOfNumberStations = []
for river in stationsOfRivers:
listOfNumberStations.append((river, len(stationsOfRivers[river])))
listofNumberStationsSorted = []
listofNumberStationsSorted = sorted_by_key(listOfNumberStations, 1, True)
boo = True
while boo == True:
if listofNumberStationsSorted[N-1][1] == listofNumberStationsSorted[N][1]:
N += 1
else:
boo =False
return listofNumberStationsSorted[:N] | 5,358,860 |
def generate_itoa_dict(
bucket_values=[-0.33, 0, 0.33], valid_movement_direction=[1, 1, 1, 1]):
"""
Set cartesian product to generate action combination
spaces for the fetch environments
valid_movement_direction: To set
"""
action_space_extended = [bucket_values if m == 1 else [0]
for m in valid_movement_direction]
return list(itertools.product(*action_space_extended)) | 5,358,861 |
def try_convert(value, datetime_to_ms=False, precise=False):
"""Convert a str into more useful python type (datetime, float, int, bool), if possible
Some precision may be lost (e.g. Decimal converted to a float)
>>> try_convert('false')
False
>>> try_convert('123456789.123456')
123456789.123456
>>> try_convert('1234')
1234
>>> try_convert(1234)
1234
>>> try_convert(['1234'])
['1234']
>>> try_convert('12345678901234567890123456789012345678901234567890', precise=True)
12345678901234567890123456789012345678901234567890L
>>> try_convert('12345678901234567890123456789012345678901234567890.1', precise=True)
Decimal('12345678901234567890123456789012345678901234567890.1')
"""
if not isinstance(value, basestring):
return value
if value in db.YES_VALUES or value in db.TRUE_VALUES:
return True
elif value in db.NO_VALUES or value in db.FALSE_VALUES:
return False
elif value in db.NULL_VALUES:
return None
try:
if not precise:
try:
return int(value)
except:
try:
return float(value)
except:
pass
else:
dec, i, f = None, None, None
try:
dec = Decimal(value)
except:
return try_convert(value, precise=False)
try:
i = int(value)
except:
try:
f = float(value)
except:
pass
if dec is not None:
if dec == i:
return i
elif dec == f:
return f
return dec
except:
pass
try:
dt = dateutil.parse(value)
if dt and isinstance(dt, datetime.datetime) and (3000 >= dt.year >= 1900):
if datetime_to_ms:
return db.datetime_in_milliseconds(dt)
return dt
except:
pass
return value | 5,358,862 |
def signin(request, auth_form=AuthenticationForm,
template_name='accounts/signin_form.html',
redirect_field_name=REDIRECT_FIELD_NAME,
redirect_signin_function=signin_redirect, extra_context=None):
"""
Signin using email or username with password.
Signs a user in by combining email/username with password. If the
combination is correct and the user :func:`is_active` the
:func:`redirect_signin_function` is called with the arguments
``REDIRECT_FIELD_NAME`` and an instance of the :class:`User` who is is
trying the login. The returned value of the function will be the URL that
is redirected to.
A user can also select to be remembered for ``ACCOUNTS_REMEMBER_DAYS``.
:param auth_form:
Form to use for signing the user in. Defaults to the
:class:`AuthenticationForm` supplied by accounts.
:param template_name:
String defining the name of the template to use. Defaults to
``accounts/signin_form.html``.
:param redirect_field_name:
Form field name which contains the value for a redirect to the
succeeding page. Defaults to ``next`` and is set in
``REDIRECT_FIELD_NAME`` setting.
:param redirect_signin_function:
Function which handles the redirect. This functions gets the value of
``REDIRECT_FIELD_NAME`` and the :class:`User` who has logged in. It
must return a string which specifies the URI to redirect to.
:param extra_context:
A dictionary containing extra variables that should be passed to the
rendered template. The ``form`` key is always the ``auth_form``.
**Context**
``form``
Form used for authentication supplied by ``auth_form``.
"""
form = auth_form()
if request.method == 'POST':
form = auth_form(request.POST, request.FILES)
if form.is_valid():
identification = form.cleaned_data['identification']
password = form.cleaned_data['password']
remember_me = form.cleaned_data['remember_me']
user = authenticate(identification=identification,
password=password)
if user.is_active:
login(request, user)
if remember_me:
request.session.set_expiry(accounts_settings.ACCOUNTS_REMEMBER_ME_DAYS[1] * 86400)
else:
request.session.set_expiry(0)
if accounts_settings.ACCOUNTS_USE_MESSAGES:
messages.success(request, _('You have been signed in.'),
fail_silently=True)
# Whereto now?
redirect_to = redirect_signin_function(
request.GET.get(redirect_field_name), user)
return redirect(redirect_to)
else:
return redirect(reverse('accounts_disabled',
kwargs={'username': user.username}))
if not extra_context:
extra_context = dict()
extra_context.update({
'form': form,
'next': request.GET.get(redirect_field_name),
})
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request) | 5,358,863 |
def run_syncdb(database_info):
"""Make sure that the database tables are created.
database_info -- a dictionary specifying the database info as dictated by Django;
if None then the default database is used
Return the identifier the import process should use.
"""
django.setup()
dataset_identifier = 'default'
if database_info: # create an entry in DATABASES if database_info is present
dataset_identifier = '12345'
while dataset_identifier in settings.DATABASES:
dataset_identifier = str(random.randint(1, 2000000))
settings.DATABASES[dataset_identifier] = database_info
call_command('migrate', database=dataset_identifier)
return dataset_identifier | 5,358,864 |
def autoclean_cv(training_dataframe, testing_dataframe, drop_nans=False, copy=False,
encoder=None, encoder_kwargs=None, ignore_update_check=False):
"""Performs a series of automated data cleaning transformations on the provided training and testing data sets
Unlike `autoclean()`, this function takes cross-validation into account by learning the data transformations
from only the training set, then applying those transformations to both the training and testing set.
By doing so, this function will prevent information leak from the training set into the testing set.
Parameters
----------
training_dataframe: pandas.DataFrame
Training data set
testing_dataframe: pandas.DataFrame
Testing data set
drop_nans: bool
Drop all rows that have a NaN in any column (default: False)
copy: bool
Make a copy of the data set (default: False)
encoder: category_encoders transformer
The a valid category_encoders transformer which is passed an inferred cols list. Default (None: LabelEncoder)
encoder_kwargs: category_encoders
The a valid sklearn transformer to encode categorical features. Default (None)
ignore_update_check: bool
Do not check for the latest version of datacleaner
Returns
----------
output_training_dataframe: pandas.DataFrame
Cleaned training data set
output_testing_dataframe: pandas.DataFrame
Cleaned testing data set
"""
global update_checked
if ignore_update_check:
update_checked = True
if not update_checked:
update_check('datacleaner', __version__)
update_checked = True
if set(training_dataframe.columns.values) != set(testing_dataframe.columns.values):
raise ValueError('The training and testing DataFrames do not have the same columns. '
'Make sure that you are providing the same columns.')
if copy:
training_dataframe = training_dataframe.copy()
testing_dataframe = testing_dataframe.copy()
if drop_nans:
training_dataframe.dropna(inplace=True)
testing_dataframe.dropna(inplace=True)
if encoder_kwargs is None:
encoder_kwargs = {}
for column in training_dataframe.columns.values:
# Replace NaNs with the median or mode of the column depending on the column type
try:
column_median = training_dataframe[column].median()
training_dataframe[column].fillna(column_median, inplace=True)
testing_dataframe[column].fillna(column_median, inplace=True)
except TypeError:
column_mode = training_dataframe[column].mode()[0]
training_dataframe[column].fillna(column_mode, inplace=True)
testing_dataframe[column].fillna(column_mode, inplace=True)
# Encode all strings with numerical equivalents
if str(training_dataframe[column].values.dtype) == 'object':
if encoder is not None:
column_encoder = encoder(**encoder_kwargs).fit(training_dataframe[column].values)
else:
column_encoder = LabelEncoder().fit(training_dataframe[column].values)
training_dataframe[column] = column_encoder.transform(training_dataframe[column].values)
testing_dataframe[column] = column_encoder.transform(testing_dataframe[column].values)
return training_dataframe, testing_dataframe | 5,358,865 |
def make_coroutine_from_tree(tree, filename="<aexec>", symbol="single",
local={}):
"""Make a coroutine from a tree structure."""
dct = {}
tree.body[0].args.args = list(map(make_arg, local))
exec(compile(tree, filename, symbol), dct)
return asyncio.coroutine(dct[CORO_NAME])(**local) | 5,358,866 |
def update_Sigmai(Yi, Es, Vars):
"""
Return new Sigma_i: shape k
"""
return np.mean((Yi - Es) ** 2, axis=1) + np.mean(Vars, axis=1) | 5,358,867 |
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_offsets = tf.cast(flat_offsets, tf.int64)
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
# https://github.com/tensorflow/tensorflow/issues/36236
output_tensor = tf.gather(flat_sequence_tensor*1, flat_positions)
return output_tensor | 5,358,868 |
def check_sub_schema_dict(sub_schema: typing.Any) -> dict:
"""Check that a sub schema in an allOf is a dict."""
if not isinstance(sub_schema, dict):
raise exceptions.MalformedSchemaError(
"The elements of allOf must be dictionaries."
)
return sub_schema | 5,358,869 |
def get_proto_root(workspace_root):
"""Gets the root protobuf directory.
Args:
workspace_root: context.label.workspace_root
Returns:
The directory relative to which generated include paths should be.
"""
if workspace_root:
return "/{}".format(workspace_root)
else:
return "" | 5,358,870 |
def test_two_models_restored(tmpdir):
"""
Test if one can ``_restore_model`` and use two ``BaseModels``.
This is regression test for issue #83 (One can not create and use more than one instance of ``BaseModel``).
"""
tmpdir2 = tempfile.mkdtemp()
model1 = TrainableModel(dataset=None, log_dir=tmpdir, **_IO, optimizer=_OPTIMIZER)
model2 = TrainableModel(dataset=None, log_dir=tmpdir2, **_IO, optimizer=_OPTIMIZER)
batch = {'input': [[1] * 10], 'target': [[0] * 10]}
for _ in range(1000):
model1.run(batch, train=True)
model1.save('')
model2.save('')
# test if one can ``_restore_model`` two models and use them at the same time
restored_model1 = BaseModel(dataset=None, log_dir='', restore_from=tmpdir, **_IO)
restored_model2 = BaseModel(dataset=None, log_dir='', restore_from=tmpdir2, **_IO)
trained_value = restored_model1.graph.get_tensor_by_name('var:0').eval(session=restored_model1.session)
assert np.allclose([0]*10, trained_value)
default_value = restored_model2.graph.get_tensor_by_name('var:0').eval(session=restored_model2.session)
assert np.allclose([2]*10, default_value)
shutil.rmtree(tmpdir2) | 5,358,871 |
def bwimcp(J, K, x, tr=.2, alpha=.05):
"""
Multiple comparisons for interactions
in a split-plot design.
The analysis is done by taking difference scores
among all pairs of dependent groups and
determining which of
these differences differ across levels of Factor A
using trimmed means. FWE is controlled via Hochberg's
method. For MOM or M-estimators
(possibly not implemented yet), use spmcpi which
uses a bootstrap method
:param J: int
Number of J levels associated with Factor A
:param K: int
Number of K levels associated with Factor B
:param x: Pandas DataFrame
Each column represents a cell in the factorial design. For example,
a 2x3 design would correspond to a DataFrame with 6 columns
(levels of Factor A x levels of Factor B).
Order your columns according to the following pattern
(traversing each row in a matrix):
- the first column contains data for level 1 of Factor A
and level 1 of Factor B
- the second column contains data for level 1 of Factor A
and level 2 of Factor B
- column `K` contains the data for level 1 of Factor A
and level `K` of Factor B
- column `K` + 1 contains the data for level 2 of Factor A
and level 1 of Factor B
- and so on ...
:param tr: float
Proportion to trim (default is .2)
:param alpha: float
Alpha level (default is .05)
:return:
Dictionary of results
con: array
Contrast matrix
output: DataFrame
Difference score, p-value, and critical value for each contrast relating to the interaction
"""
x=pandas_to_arrays(x)
x=remove_nans_based_on_design(x, [J, K], 'between_within')
MJ = (J ** 2 - J) // 2
MK = (K ** 2 - K) // 2
JMK = J * MK
MJMK = MJ * MK
Jm = J - 1
#output = np.zeros([MJMK, 7])
output = np.zeros([MJMK, 4])
_, _, con = con2way(J,K)
m = np.array(np.arange(J*K)).reshape(J,K)
ic=0
test=np.array([])
for j in range(J):
for jj in range(J):
if j < jj:
for k in range(K):
for kk in range(K):
if k<kk:
#output[ic, 0]=j
#output[ic, 1]=jj
#output[ic, 2]=k
output[ic, 0]=ic
x1 = x[m[j, k]] - x[m[j, kk]]
x2 = x[m[jj, k]] - x[m[jj, kk]]
#print(f'X1 comparing cells {j, k} to {j, kk}')
#print(f'X2 comparing cells {jj, k} to {jj, kk}')
temp = yuen(x1, x2)
output[ic, 1] = trim_mean(x1, tr) - trim_mean(x2, tr)
#output[ic, 4] = trim_mean(x1, tr) - trim_mean(x2, tr)
test=np.append(test, temp['p_value'])
output[ic, 2] = test[ic]
#output[ic, 5] = test[ic]
ic+=1
ncon = len(test)
dvec = alpha / np.arange(1, ncon+1)
temp2 = (-test).argsort()
zvec = dvec[0:ncon]
#output[temp2, 6] = zvec
output[temp2, 3] = zvec
#output[:, 6] = output[:, 6]
output[:, 3] = output[:, 3]
col_names=["con_num", "psihat", "p_value", "p_crit"]
#col_names=["A_x", "A_y", "B_x", "B_y", "psihat", "p_value", "p_crit"]
results=pd.DataFrame(output, columns=col_names)
results={'con': con, 'output': pd.DataFrame(output, columns=col_names)}
return results | 5,358,872 |
def trigger_decoder(mode: str, trigger_path: str=None) -> tuple:
"""Trigger Decoder.
Given a mode of operation (calibration, copy phrase, etc) and
a path to the trigger location (*.txt file), this function
will split into symbols (A, ..., Z), timing info (32.222), and
targetness (target, nontarget). It will also extract any saved
offset information and pass that back.
PARAMETERS
----------
:param: mode: mode of bci operation. Note the mode changes how triggers
are saved.
:param: trigger_path: [Optional] path to triggers.txt file
:return: tuple: symbol_info, trial_target_info, timing_info, offset.
"""
# Load triggers.txt
if not trigger_path:
trigger_path = load_txt_data()
# Get every line of trigger.txt
with open(trigger_path, 'r+') as text_file:
# most trigger files has three columns:
# SYMBOL, TARGETNESS_INFO[OPTIONAL], TIMING
trigger_txt = [line.split() for line in text_file]
# extract stimuli from the text
stimuli_triggers = [line for line in trigger_txt
if line[1] == 'target' or
line[1] == 'nontarget']
# from the stimuli array, pull our the symbol information
symbol_info = list(map(lambda x: x[0], stimuli_triggers))
# If operating mode is free spell, it only has 2 columns
# otherwise, it has 3
if mode != 'free_spell':
trial_target_info = list(map(lambda x: x[1], stimuli_triggers))
timing_info = list(map(lambda x: eval(x[2]), stimuli_triggers))
else:
trial_target_info = None
timing_info = list(map(lambda x: eval(x[1]), stimuli_triggers))
# Get any offset or calibration triggers
offset_array = [line[2] for line in trigger_txt
if line[0] == 'offset']
calib_trigger_array = [line[2] for line in trigger_txt
if line[0] == 'calibration_trigger']
# If present, calculate the offset between the DAQ and Triggers from display
if len(offset_array) == 1 and len(calib_trigger_array) == 1:
# Extract the offset and calibration trigger time
offset_time = float(offset_array[0])
calib_trigger_time = float(calib_trigger_array[0])
# Calculate the offset (ASSUMES DAQ STARTED FIRST!)
offset = offset_time - calib_trigger_time
# Otherwise, assume no observed offset
else:
offset = 0
return symbol_info, trial_target_info, timing_info, offset | 5,358,873 |
def method_comparison(filename=None, extension="png", usetex=False,
passed_ax=None, **kwargs):
"""
Create a plot comparing how estimated redshift changes as a
function of dispersion measure for each DM-z relation.
Parameters
----------
filename: string or None, optional
The filename of the saved figure. Default: *None*
extension: string, optional
The format to save the figure. e.g "png", "pdf", "eps", etc...
Default: "png"
usetex: bool, optional
Use LaTeX for for fonts.
passed_ax: or None, optional
Generates
---------
A figure displaying how estimated redshift changes as a function of
dispersion measure for each of the different cosmologies.
"""
set_rc_params(usetex)
if passed_ax:
ax = passed_ax
else:
fig = plt.figure(figsize=(8, 8), constrained_layout=True)
ax = fig.add_subplot(111)
method_list = methods.available_methods()
dm_vals = np.linspace(0, 3000, 1000)
colours = ["#1b9e77", "#d95f02", "#7570b3"]
label = [r"$\rm{Ioka 2003}$", r"$\rm{Inoue 2004}$", r"$\rm{Zhang 2018}$"]
for j, method in enumerate(method_list):
z_vals = np.zeros(len(dm_vals))
if 'cosmology' in kwargs:
cosmology = kwargs['cosmology']
else:
cosmology = 'Planck18'
table_name = "".join(["_".join([method, cosmology]), ".npz"])
lookup_table = table.load(table_name)
for i, dm in enumerate(dm_vals):
z_vals[i] = table.get_z_from_table(dm, lookup_table)
ax.plot(dm_vals, z_vals, colours[j], label=label[j], **kwargs)
if not passed_ax:
ax.set_ylabel(r"$\rm{Redshift}$")
ax.set_xlabel(r"$\rm{DM\ \left[pc \ cm^{-3}\right]}$")
ax.legend(loc='lower right', frameon=False)
if filename is not None:
plt.savefig(".".join([filename, extension]))
if passed_ax:
return ax
else:
return fig | 5,358,874 |
def logCompression(pilImg):
"""Does log compression processing on a photo
Args:
pilImg (PIL Image format image): Image to be processed
"""
npImg = PILtoNumpy(pilImg)
c = 255 / (np.log10(1 + np.amax(npImg)))
for all_pixels in np.nditer(npImg, op_flags=['readwrite']):
all_pixels[...] = c * np.log10(1 + all_pixels)
return NumpytoPIL(npImg) | 5,358,875 |
def quantile_turnover(quantile_factor, quantile, period=1):
"""
Computes the proportion of names in a factor quantile that were
not in that quantile in the previous period.
Parameters
----------
quantile_factor : pd.Series
DataFrame with date, asset and factor quantile.
quantile : int
Quantile on which to perform turnover analysis.
period: int, optional
Number of days over which to calculate the turnover.
Returns
-------
quant_turnover : pd.Series
Period by period turnover for that quantile.
"""
quant_names = quantile_factor[quantile_factor == quantile]
quant_name_sets = quant_names.groupby(level=['date']).apply(
lambda x: set(x.index.get_level_values('asset')))
name_shifted = quant_name_sets.shift(period)
new_names = (quant_name_sets - name_shifted).dropna()
quant_turnover = new_names.apply(
lambda x: len(x)) / quant_name_sets.apply(lambda x: len(x))
quant_turnover.name = quantile
return quant_turnover | 5,358,876 |
def dmp_div(f, g, u, K):
"""
Polynomial division with remainder in ``K[X]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_div(x**2 + x*y, 2*x + 2)
(0, x**2 + x*y)
>>> R, x,y = ring("x,y", QQ)
>>> R.dmp_div(x**2 + x*y, 2*x + 2)
(1/2*x + 1/2*y - 1/2, -y + 1)
"""
if K.is_Field:
return dmp_ff_div(f, g, u, K)
else:
return dmp_rr_div(f, g, u, K) | 5,358,877 |
def do_auto_install(config_name: str,
app_name: str,
port: Optional[int],
hostname: Optional[str] = '') -> None:
"""Performs non-interactive IDE install"""
configs = get_run_configs(config_name)
if config_name in configs:
print(f'Config with name {config_name} is already exists. Exiting...')
sys.exit(2)
apps = get_all_apps(pattern=app_name)
if len(apps) == 0:
print(f'There is no apps with name {app_name}, supported by Projector. Exiting...')
sys.exit(2)
if len(apps) > 1:
print(f'There are too many apps with name matched to {app_name}.')
print('Examples:')
for app in apps[:5]:
print(app.name)
print('Please provide more specific name. Exiting...')
sys.exit(2)
app = apps[0]
run_config = get_quick_config(config_name)
run_config.update_channel = RunConfig.NOT_TESTED
if port:
run_config.projector_port = port
if hostname:
run_config.custom_names = hostname
run_config.projector_host = hostname
install_app(run_config, app) | 5,358,878 |
def is_designated_holiday(timestamp):
"""
Returns True if the date is one of Piedmont’s "designated holidays":
- New Years Day (January 1st)
- Memorial Day (last Monday of May)
- Independence Day (July 4th)
- Labor Day (First Monday of September)
- Thanksgiving Day (4th Thursday in November)
- Christmas Day (December 25th)
"""
dow = timestamp.weekday()
day = timestamp.day
month = timestamp.month
if month == JANUARY and timestamp.day == 1:
return True
elif month == MAY and dow == MONDAY and day > 25:
return True
elif month == JULY and day == 4:
return True
elif month == SEPTEMBER and dow == MONDAY and day < 8:
return True
elif month == NOVEMBER and dow == THURSDAY and 21 < day < 29:
return True
elif month == DECEMBER and day == 25:
return True
else:
return False | 5,358,879 |
def check(val, desc=None, as_warn=False) -> SimpleAssertions:
"""
function based assertion call
:param val: val to check
:param desc: optional, description of val
:param as_warn: if set, convert assertion error to warning message
:return: assertionClass
"""
return SimpleAssertions(as_warn=as_warn).check(val, desc) | 5,358,880 |
def ChangeExtension(filename, newExtension):
"""ChangeExtension(filename, newExtension) -> str
Replaces the extension of the filename with the given one.
If the given filename has no extension, the new extension is
simply appended.
arguments:
filename
string corresponding to the filename whose extension to change.
newExtension
string corresponding to the new extension to append. Do not
prepend with a period ('.').
returns:
string corresponding to the new filename.
"""
try:
# Isolate the filename
slashIndex = filename.rfind('/')
backslashIndex = filename.rfind('\\')
if (backslashIndex > slashIndex):
slashIndex = backslashIndex;
# Look for an existing extension
periodIndex = filename.rfind('.')
if (periodIndex > slashIndex):
return filename[0 : periodIndex] + "." + newExtension
else:
return filename + "." + newExtension
except IndexError, e:
return "" | 5,358,881 |
def file_based_input_fn_builder(input_file,
seq_length,
is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_masks": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"sent_label_ids": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record,
name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32. So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100, seed=np.random.randint(10000))
d = d.apply(tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn | 5,358,882 |
def test_udp_syslog_get(client_sh_udp: object, log_directory: str) -> None:
"""Testing GET resource
Args:
client_sh_udp (fixture): The test client.
log_directory (fixture): The fully qualified path for the log directory.
"""
logfile: str = os.path.join(log_directory, 'syslog_server.log')
key = f'{uuid4()}'
params = {'key': key}
response: Result = client_sh_udp.simulate_get('/middleware', params=params)
assert response.status_code == 200
assert response.text == f'Logged - {key}'
for level in ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']:
text = f'SERVER-UDP - {level} - {level} {key}'
assert has_text(logfile, text) is True, f'Failed to find text {text}' | 5,358,883 |
def synchrotron_thin_spectrum(freqs, ne, te, bfield):
"""Optically thin (unobsorbed) synchrotron spectrum.
Units of erg/cm^3/s/Hz
NY95b Eq 3.9
"""
const = 4.43e-30 # erg/cm^3/s/Hz
theta_e = K_BLTZ * te / (MELC * SPLC * SPLC)
v0 = QELC * bfield / (2*np.pi*MELC*SPLC)
xm = 2*freqs/(3*v0*np.square(theta_e))
iprime = _synch_fit_func_iprime(xm)
esyn = const * 4*np.pi*ne*freqs*iprime/sp.special.kn(2, 1/theta_e)
return esyn | 5,358,884 |
def reset_monotonic_time(value=0.0):
"""
Make the monotonic clock return the real time on its next
call.
"""
global _current_time # pylint:disable=global-statement
_current_time = value | 5,358,885 |
def extract_val_setup(timestamp, lat, lon, dataPath = "Data/IceData/"):
""" Extracts a timestamped value from a NSIDC GeoTIFF File
Inputs:
timestamp = datetime struct of sample
lat = sample latitude
lon = sample longitude
dataPath = path to GeoTIFF files
Outputs:
GeoTIFF raw value - please see https://nsidc.org/sites/nsidc.org/files/G02135-V3.0_0.pdf
"""
local_path = os.path.join(os.getcwd(), dataPath)
Path(local_path).mkdir(parents=True, exist_ok=True)
if lat < 0:
filename = generate_NSIDC_filename(timestamp, "S")
area = areas[0]
elif lat >= 0:
filename = generate_NSIDC_filename(timestamp, "N")
area = areas[1]
else:
print("=== Invalid Ice Area? ===")
raise ValueError
local_filename = local_path + filename
dataset = rasterio.open(local_filename)
if DEBUG:
rasterio.plot.show(dataset)
ice_data = dataset.read(1)
rev_xform = ~dataset.transform
outProj = Proj(dataset.crs)
inProj = Proj('epsg:4326')
coordxform = Transformer.from_proj(inProj, outProj)
# print("=== Proj Setup Complete ===")
return [ice_data, coordxform, rev_xform, area] | 5,358,886 |
def create_message(service, to, subject, message_text):
"""Create a message for an email.
Args:
sender: Email address of the sender.
to: Email address of the receiver.
subject: The subject of the email message.
message_text: The text of the email message.
Returns:
An object containing a base64url encoded email object.
"""
message = MIMEText(message_text)
users = service.users()
myProfile = users.getProfile(userId='me').execute()
message['to'] = to
message['from'] = myProfile['emailAddress']
message['subject'] = subject
return {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode()} | 5,358,887 |
def numpy_ndarray(nb_arr):
"""Return a copy of numba DeviceNDArray data as a numpy.ndarray.
"""
return nb_arr.copy_to_host() | 5,358,888 |
def convert_to_dict(my_keys, my_values):
"""Merge a given list of keys and a list of values into a dictionary.
Args:
my_keys (list): A list of keys
my_values (list): A list corresponding values
Returns:
Dict: Dictionary of the list of keys mapped to the list of values
"""
return dict(zip(my_keys, my_values)) | 5,358,889 |
def add_vit(request):
"""
Add a new vit with API, currently image and video are not supported
"""
user = KeyBackend().authenticate(request)
if request.method == "POST":
if request.user.is_authenticated:
form = VitForm(request.POST)
if form.is_valid():
vit = form.save(commit=False)
vit.user = request.user
vit.save()
return JsonResponse({'status': 'success', 'vit': vit.to_json()}, status=201)
else:
return JsonResponse({'error': 'No vit body provided'}, status=400)
else:
return JsonResponse({'error': 'You must be logged in'}, status=401)
else:
return JsonResponse({'error': 'Invalid request'}, status=400) | 5,358,890 |
def long_control_state_trans(active, long_control_state, v_ego, v_target, v_pid,
output_gb, brake_pressed, cruise_standstill, min_speed_can):
"""Update longitudinal control state machine"""
stopping_target_speed = min_speed_can + STOPPING_TARGET_SPEED_OFFSET
stopping_condition = (v_ego < 2.0 and cruise_standstill) or \
(v_ego < STOPPING_EGO_SPEED and
((v_pid < stopping_target_speed and v_target < stopping_target_speed) or
brake_pressed))
starting_condition = v_target > STARTING_TARGET_SPEED and not cruise_standstill
if not active:
long_control_state = LongCtrlState.off
else:
if long_control_state == LongCtrlState.off:
if active:
long_control_state = LongCtrlState.pid
elif long_control_state == LongCtrlState.pid:
if stopping_condition:
long_control_state = LongCtrlState.stopping
elif long_control_state == LongCtrlState.stopping:
if starting_condition:
long_control_state = LongCtrlState.starting
elif long_control_state == LongCtrlState.starting:
if stopping_condition:
long_control_state = LongCtrlState.stopping
elif output_gb >= -BRAKE_THRESHOLD_TO_PID:
long_control_state = LongCtrlState.pid
return long_control_state | 5,358,891 |
def load_group_to_namedtuple(group: h5py.Group):
"""Returns namedtuple with name of group and key: values of group attrs
e.g. srs1 group which has gpib: 1... will be returned as an srs1 namedtuple with .gpib etc
"""
# Check it was stored as a namedTuple
if group.attrs.get('description', None) != 'NamedTuple':
raise ValueError(
f'Trying to load_group_to_named_tuple which has description: {group.attrs.get("description", None)}')
# Get the name of the NamedTuple either through the stored name or the group name
name = group.attrs.get('NT_name', None)
if name is None:
logger.warning('Did not find "name" attribute for NamedTuple, using folder name instead')
name = group.name.split('/')[-1]
# d = {key: val for key, val in group.attrs.items()}
d = {key: get_attr(group, key) for key in group.attrs.keys()}
# Remove HDF only descriptors
for k in ['description', 'NT_name']:
if k in d.keys():
del d[k]
# Make the NamedTuple
ntuple = namedtuple(name, d.keys())
filled_tuple = ntuple(**d) # Put values into tuple
return filled_tuple | 5,358,892 |
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = alembic_config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations() | 5,358,893 |
def send_put(context):
"""
"""
headers = _get_request_headers(context)
params = _get_params(context)
context.response = context.session.put(
context.request_url,
headers=headers,
params=params,
json=context.request_json_payload
) | 5,358,894 |
def cycle(iterable):
"""Returns an infinite iterator by cycling through the iterable.
It's better than the itertools.cycle function because it resets to iterable each time it is
exhausted. This is useful when using cycling through the torch.utils.data.DataLoader object.
See https://stackoverflow.com/a/49987606
"""
while True:
for x in iterable:
yield x | 5,358,895 |
def export_json(blocks=None, subsections=False):
"""
Returns JSON representation of parsed config structure
:param blocks: List of blocks to export
:param subsections: Export all subblocks
:return: JSON-dumped string
"""
if blocks is not None:
blocks = [_canonicalize_blockid(b) for b in blocks]
if subsections:
blocks = get_subblocks(blocks)
return json.dumps(_config_dict(blocks)) | 5,358,896 |
def get_dummy_message(text):
"""Get a dummy message with a custom text"""
return botogram.objects.messages.Message({
"message_id": 1,
"from": {"id": 123, "first_name": "Nobody"},
"chat": {"id": -123, "type": "chat", "title": "Something"},
"date": 1,
"text": text,
}) | 5,358,897 |
def test_use_callbacks() -> None:
"""Test the use_callbacks decorator."""
obj = Object()
obj.run_successful()
assert obj.result
obj.run_error()
assert isinstance(obj.error, Exception) | 5,358,898 |
def install_package(package_name, package_version, index_url, info, cache_path):
"""
Install packages based on the information we gather from the index_url page
"""
for item in info:
platform = item["platform"]
implementation = item["implementation"]
python_version = item["version"]
abi = item["abi"]
filename = "-".join([package_name, package_version, abi, platform])
# Calculate the path that the package will be installed into
version_path = os.path.join(DIST_CEXT, implementation + python_version)
package_path = get_path_with_arch(
platform, version_path, abi, implementation, python_version
)
print("Installing package {}...".format(filename))
# Install the package using pip with cache_path as the cache directory
install_return = run_pip_install(
package_path,
platform,
python_version,
implementation,
abi,
package_name,
package_version,
index_url,
cache_path,
)
# Ignore Piwheels installation failure because the website is not always stable
if install_return == 1 and index_url == PYPI_DOWNLOAD:
sys.exit("\nInstallation failed for package {}.\n".format(filename))
else:
# Clean up .dist-info folders
dist_info_folders = os.listdir(package_path)
for folder in dist_info_folders:
if folder.endswith(".dist-info"):
shutil.rmtree(os.path.join(package_path, folder)) | 5,358,899 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.