content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def bond(fn: Callable[..., Array],
displacement_or_metric: DisplacementOrMetricFn,
static_bonds: Optional[Array]=None,
static_bond_types: Optional[Array]=None,
ignore_unused_parameters: bool=False,
**kwargs) -> Callable[..., Array]:
"""Promotes a function that acts on a single pair to one on a set of bonds.
TODO(schsam): It seems like bonds might potentially have poor memory access.
Should think about this a bit and potentially optimize.
Args:
fn: A function that takes an ndarray of pairwise distances or displacements
of shape [n, m] or [n, m, d_in] respectively as well as kwargs specifying
parameters for the function. fn returns an ndarray of evaluations of shape
[n, m, d_out].
metric: A function that takes two ndarray of positions of shape
[spatial_dimension] and [spatial_dimension] respectively and returns
an ndarray of distances or displacements of shape [] or [d_in]
respectively. The metric can optionally take a floating point time as a
third argument.
static_bonds: An ndarray of integer pairs wth shape [b, 2] where each pair
specifies a bond. static_bonds are baked into the returned compute
function statically and cannot be changed after the fact.
static_bond_types: An ndarray of integers of shape [b] specifying the type
of each bond. Only specify bond types if you want to specify bond
parameters by type. One can also specify constant or per-bond parameters
(see below).
ignore_unused_parameters: A boolean that denotes whether dynamically
specified keyword arguments passed to the mapped function get ignored
if they were not first specified as keyword arguments when calling
`smap.bond(...)`.
kwargs: Arguments providing parameters to the mapped function. In cases
where no bond type information is provided these should be either 1) a
scalar or 2) an ndarray of shape [b]. If bond type information is
provided then the parameters should be specified as either 1) a scalar or
2) an ndarray of shape [max_bond_type].
Returns:
A function fn_mapped. Note that fn_mapped can take arguments bonds and
bond_types which will be bonds that are specified dynamically. This will
incur a recompilation when the number of bonds changes. Improving this
state of affairs I will leave as a TODO until someone actually uses this
feature and runs into speed issues.
"""
# Each call to vmap adds a single batch dimension. Here, we would like to
# promote the metric function from one that computes the distance /
# displacement between two vectors to one that acts on two lists of vectors.
# Thus, we apply a single application of vmap.
merge_dicts = partial(util.merge_dicts,
ignore_unused_parameters=ignore_unused_parameters)
def compute_fn(R, bonds, bond_types, static_kwargs, dynamic_kwargs):
Ra = R[bonds[:, 0]]
Rb = R[bonds[:, 1]]
_kwargs = merge_dicts(static_kwargs, dynamic_kwargs)
_kwargs = _kwargs_to_bond_parameters(bond_types, _kwargs)
# NOTE(schsam): This pattern is needed due to JAX issue #912.
d = vmap(partial(displacement_or_metric, **dynamic_kwargs), 0, 0)
dr = d(Ra, Rb)
return high_precision_sum(fn(dr, **_kwargs))
def mapped_fn(R: Array,
bonds: Optional[Array]=None,
bond_types: Optional[Array]=None,
**dynamic_kwargs) -> Array:
accum = f32(0)
if bonds is not None:
accum = accum + compute_fn(R, bonds, bond_types, kwargs, dynamic_kwargs)
if static_bonds is not None:
accum = accum + compute_fn(
R, static_bonds, static_bond_types, kwargs, dynamic_kwargs)
return accum
return mapped_fn | 1,600 |
def clean_repeated_symbols(text):
"""
Filters text, replacing symbols repeated more than twice (not allowed
in most languages) with a single repetition of the symbol.
:param text: the text to be filtered
:type: str
:return: the filtered text
:type: str
"""
pattern = re.compile(r"(.)\1{2,}", re.DOTALL)
return pattern.sub(r"\1\1", text) | 1,601 |
def sample(x,y, numSamples):
"""
gives numSamples samples from the distribution funciton fail
parameters
"""
y /= y.sum()
return np.random.choice(x, size=numSamples, replace=True, p=y) | 1,602 |
def make_path_strictly_increase(path):
"""
Given a warping path, remove all rows that do not
strictly increase from the row before
"""
toKeep = np.ones(path.shape[0])
i0 = 0
for i in range(1, path.shape[0]):
if np.abs(path[i0, 0] - path[i, 0]) >= 1 and np.abs(path[i0, 1] - path[i, 1]) >= 1:
i0 = i
else:
toKeep[i] = 0
return path[toKeep == 1, :] | 1,603 |
def generate_frequency_spectrum(samples, wild_threshold):
"""
Generates the site frequency spectrum for a given set of samples
:param samples: List of sample accession codes
:param wild_threshold: The index position of the last wild sample (used for resolving group membership)
:return:
"""
# open all files for reading
filehandles = [open("vcf/{}.vcf".format(sample), 'r') for sample in samples]
# skip over the block comments (which are variable length)
for fin in filehandles:
while fin.readline().startswith("##"):
pass
# keep count of SNP sites
snpcount = 0
# store the SNPs in a dictionary
variants = defaultdict(OrderedDict)
try:
# get the next line from all the files
for lines in itertools.izip(*filehandles):
try:
# convert each line from a string to a list
lines = [line.split() for line in lines]
# rephase the files, if not all the sequence positions match
if len(set(line[POS] for line in lines)) != 1:
rephase_files(lines, filehandles)
# TODO drop sites with coverage lower than 1st quartile or higher than 3rd quartile
# get the outgroup
outgroup = lines[0]
# get the chromosome number and position
chrm = int(outgroup[CHROM])
pos = int(outgroup[POS])
# skip all sites with indels
if 'INDEL' in outgroup[INFO]:
raise InDelException(chrm, pos, outgroup[INFO])
# get the reference and outgroup alleles
ref_allele = outgroup[REF]
out_allele = outgroup[ALT].replace('.', ref_allele)
# get the genotype of the outgroup
out_genotype = outgroup[GENOTYPE].split(':')[0]
# skip het sites in the outgroup
if out_genotype == '0/1':
raise HeterozygousException(chrm, pos, outgroup[GENOTYPE])
# keep track of all the observed alleles at this site
all_alleles = {ref_allele, out_allele}
# dictionary for counting observations
frequencies = {}
# process all the samples (omitting the outgroup)
for idx, line in enumerate(lines[1:]):
# skip all sites with indels
if 'INDEL' in line[INFO]:
raise InDelException(chrm, pos, line[REF])
# get the alt allele for this sample
alt_allele = line[ALT].replace('.', ref_allele)
# get the genotype of the sample
genotype = line[GENOTYPE].split(':')[0]
# resolve the genotype
if genotype == '0/0':
sample_alleles = [ref_allele, ref_allele] # 0/0 - the sample is homozygous reference
elif genotype == '0/1':
sample_alleles = [ref_allele, alt_allele] # 0/1 - the sample is heterozygous
elif genotype == '1/1':
sample_alleles = [alt_allele, alt_allele] # 1/1 - the sample is homozygous alternate
# add them to the all alleles set
all_alleles |= set(sample_alleles)
# skip sites with more than two alleles observed across all samples
if len(all_alleles) > 2:
raise PolyallelicException(chrm, pos, all_alleles)
# use the index threshold to determine which group this sample belongs to
group = 'wild' if idx < wild_threshold else 'doms'
# count the observations of each allele for each group
for allele in sample_alleles:
# initialise the counter, if necessary
if allele not in frequencies:
frequencies[allele] = {'wild': 0, 'doms': 0}
# increment the counter
frequencies[allele][group] += 1
if len(all_alleles) == 1:
# skip homozygous sites, because there is nothing to coalesce
raise HomozygousException(chrm, pos, all_alleles)
if len(frequencies) == 1:
# deal with fixed allele sites by initilising the missing allele to 0
for allele in all_alleles:
if allele not in frequencies:
frequencies[allele] = {'wild': 0, 'doms': 0}
# add the site to the SNP dictionary (so we can look up the flanking bases when we're done here)
variants[chrm][pos] = dict(ref=ref_allele, out=out_allele, frq=frequencies)
# increment the SNP count
snpcount += 1
except (InDelException, PolyallelicException, HeterozygousException, HomozygousException) as e:
# skip all sites containing indels, polyallelic sites in ingroup samples, heterozygous sites in the
# outgroup, or homozygous sites across all the populations
logging.debug('Skipping site chr{} {} because of a {} - {}'.format(outgroup[CHROM],
outgroup[POS],
type(e).__name__,
e))
except StopIteration as e:
logging.debug('Reached the end of one of the files {}'.format(e))
pass
# close all the open files
for fin in filehandles:
fin.close()
# reopen the outgroup file
fin = open("vcf/{}.vcf".format(samples.iterkeys().next()), 'r')
# skip over the block comments (which are variable length)
while fin.readline().startswith("##"):
pass
# start composing the output file
output = 'Rabbit\tHare\tAllele1\tWLD\tDOM\tAllele2\tWLD\tDOM\tGene\tPosition\n'
for chrm in variants:
for pos in variants[chrm]:
# Ref | Out | Allele1 | WILD | DOMS | Allele2 | WILD | DOMS | Gene | Position
# fetch the flanking bases for the reference and outgroup sequeneces
(ref_left, ref_right, out_left, out_right) = fetch_flanking_bases(chrm, pos, fin)
# add the output row
output += '{ref_left}{ref}{ref_right}\t{out_left}{out}{out_right}\t'.format(ref_left=ref_left,
ref=variants[chrm][pos]['ref'],
ref_right=ref_right,
out_left=out_left,
out=variants[chrm][pos]['out'],
out_right=out_right)
for allele, count in variants[chrm][pos]['frq'].iteritems():
# output the allele counts
output += '{alle}\t{wild}\t{doms}\t'.format(alle=allele,
wild=count['wild'],
doms=count['doms'])
# add the chromosome name and position
output += 'chr{chrm}\t{pos}\n'.format(chrm=chrm, pos=pos)
fin.close()
logging.debug('Finished! Found {} suitable SNP sites'.format(snpcount))
return output | 1,604 |
def set_group_selector(*args):
"""set_group_selector(sel_t grp, sel_t sel) -> int"""
return _idaapi.set_group_selector(*args) | 1,605 |
def callCisLoops(
predir,
fout,
log,
eps=[2000, 5000],
minPts=[5, 10],
cpu=1,
cut=0,
mcut=-1,
plot=False,
max_cut=False,
hic=False,
filter=False,
ucsc=False,
juicebox=False,
washU=False,
emPair=False,
):
"""
Call intra-chromosomal loops parallel.
@param metaf: str, petMeta.json file for calling peaks
@param eps: list
@param minPts: list
@param empair: bool, if true, pair run eps and minPts,
"""
global logger
logger = log
global DBSCAN
if hic:
from cLoops2.cDBSCAN2 import cDBSCAN as DBSCAN
logger.info("-hic option selected, cDBSCAN2 is used instead of blockDBSCAN.")
else:
from cLoops2.blockDBSCAN import blockDBSCAN as DBSCAN
if emPair and len(eps) != len(minPts):
logger.info("-emPair option selected, number of eps not equal to minPts, return.")
return
##step 0 prepare data and check directories
metaf = predir + "/petMeta.json"
meta = json.loads(open(metaf).read())
tot = meta["Unique PETs"]
if filter:
logger.info(
"-filter option chosed, will filter raw PETs based on called loops, for any PET that any end overlaps loop anchors will be kept. "
)
fdir = fout + "_filtered"
if not os.path.exists(fdir):
os.mkdir(fdir)
elif len(os.listdir(fdir)) > 0:
r = "working directory %s exists and not empty." % fdir
logger.error(r)
return
## step 1 find the candidate loops by running multiple times of clustering
loops = {} #candidate loops
#distance of classified inter-ligation PETs, self-ligaiton PETs.
dis, dss = [], []
cuts = [
cut,
]
if emPair:
for ep,minPt in zip(eps,minPts):
loops_2, dis_2, dss_2 = parallelRunCisDBSCANLoops(
meta,
ep,
minPt,
cut=cut,
mcut=mcut,
cpu=cpu,
)
if len(dis_2) == 0:
logger.error(
"ERROR: no inter-ligation PETs detected for eps %s minPts %s,can't model the distance cutoff,continue anyway"
% (ep, minPt))
continue
if not (len(dis_2) == 0 or len(dss_2) == 0):
cut_2 = estIntraCut(np.array(dis_2), np.array(dss_2))
if plot:
plotIntraCut(dis_2,
dss_2,
cut_2,
prefix=fout + "_eps%s_minPts%s_disCutoff" %
(ep, minPt))
logger.info(
"Estimated inter-ligation and self-ligation distance cutoff > %s for eps=%s,minPts=%s"
% (cut_2, ep, minPt))
if len(dss_2) == 0:
logger.info(
"No self-ligation PETs found, using cutoff > %s for eps=%s,minPts=%s"
% (cut, ep, minPt))
cut_2 = cut
loops_2 = filterLoopsByDis(loops_2, cut_2)
loops = combineLoops(loops, loops_2)
cuts.append(cut)
cut = cut
else:
for ep in eps:
for minPt in minPts:
loops_2, dis_2, dss_2 = parallelRunCisDBSCANLoops(
meta,
ep,
minPt,
cut=cut,
mcut=mcut,
cpu=cpu,
)
if len(dis_2) == 0:
logger.error(
"ERROR: no inter-ligation PETs detected for eps %s minPts %s,can't model the distance cutoff,continue anyway"
% (ep, minPt))
continue
if not (len(dis_2) == 0 or len(dss_2) == 0):
cut_2 = estIntraCut(np.array(dis_2), np.array(dss_2))
if plot:
plotIntraCut(dis_2,
dss_2,
cut_2,
prefix=fout + "_eps%s_minPts%s_disCutoff" %
(ep, minPt))
logger.info(
"Estimated inter-ligation and self-ligation distance cutoff > %s for eps=%s,minPts=%s"
% (cut_2, ep, minPt))
if len(dss_2) == 0:
logger.info(
"No self-ligation PETs found, using cutoff > %s for eps=%s,minPts=%s"
% (cut, ep, minPt))
cut_2 = cut
loops_2 = filterLoopsByDis(loops_2, cut_2)
loops = combineLoops(loops, loops_2)
cuts.append(cut_2)
cut = cut_2
#distance cutoff for estimation of loop significance
#cuts = [c for c in cuts if c > 0]
ncuts = [c for c in cuts if c > cuts[0]]
ncuts.append( cuts[0] )
cuts = ncuts
if max_cut:
cut = np.max(cuts)
else:
cut = np.min(cuts)
## step 2 determine the statstical significance of candidate loops
logger.info("Estimating loop statstical significance.")
if emPair:
mm = min(minPts)
else:
mm = max(minPts)
ds = Parallel(n_jobs=cpu,backend="multiprocessing")(
delayed(estLoopSig)(
key,
loops[key],
meta["data"]["cis"][key]["ixy"],
tot,
#minPts=max(minPts),
minPts=mm,
#cut= 0, #if using estimated cut, will generate just a little few loops than cut=0, but will increase a lot speed
cut=cut,
mcut=mcut,
hic=hic) for key in loops.keys())
nds = {}
for d in ds:
nds[d[0]] = d[1]
#mark the significant loops
ds = Parallel(n_jobs=cpu,backend="multiprocessing")(delayed(markSigLoops)(key, nds[key], hic=hic)
for key in nds.keys())
nds = {}
for d in ds:
nds[d[0]] = d[1]
## step 3 for the overlapped loops, output the most significant one
logger.info("Selecting the most significant loops of overlapped ones. ")
ds = Parallel(n_jobs=cpu,backend="multiprocessing")(delayed(selSigLoops)(key, nds[key])
for key in nds.keys())
nds = {}
for d in ds:
nds[d[0]] = d[1]
ds = Parallel(n_jobs=cpu,backend="multiprocessing")(delayed(selSigLoops)(key, nds[key])
for key in nds.keys())
nds = {}
for d in ds:
nds[d[0]] = d[1]
loops = []
for d in ds:
loops.extend(d[1])
## step 4 output
logger.info("Output %s loops to %s_loops.txt" % (len(loops), fout))
loops2txt(loops, fout + "_loops.txt")
if ucsc:
loops2ucscTxt(loops, fout + "_loops_ucsc.interact")
if juicebox:
loops2juiceTxt(loops, fout + "_loops_juicebox.txt")
if washU:
loops2washuTxt(loops, fout + "_loops_legacyWashU.txt")
loops2NewWashuTxt(loops, fout + "_loops_newWashU.txt")
## step 5 filtering PETs according to called loops
if filter:
Parallel(n_jobs=cpu,backend="multiprocessing")(
delayed(filterPETs)(key,
fdir,
meta["data"]["cis"][key]["ixy"],
nds[key],
margin=max(eps)) for key in nds.keys())
ixyfs = glob(fdir + "/*.ixy")
tot = 0
for f in ixyfs:
key, mat = parseIxy(f)
tot += mat.shape[0]
nmetaf = fdir + "/petMeta.json"
with open(nmetaf, "w") as fo:
json.dump({"Unique PETs": tot}, fo)
updateJson(ixyfs, nmetaf) | 1,606 |
def data_context_topology_context_topologyuuid_nodenode_uuid_node_rule_groupnode_rule_group_uuid_latency_characteristictraffic_property_name_get(uuid, node_uuid, node_rule_group_uuid, traffic_property_name): # noqa: E501
"""data_context_topology_context_topologyuuid_nodenode_uuid_node_rule_groupnode_rule_group_uuid_latency_characteristictraffic_property_name_get
returns tapi.topology.LatencyCharacteristic # noqa: E501
:param uuid: Id of topology
:type uuid: str
:param node_uuid: Id of node
:type node_uuid: str
:param node_rule_group_uuid: Id of node-rule-group
:type node_rule_group_uuid: str
:param traffic_property_name: Id of latency-characteristic
:type traffic_property_name: str
:rtype: TapiTopologyLatencyCharacteristic
"""
return 'do some magic!' | 1,607 |
def upgrade_db():
"""Run any outstanding migration scripts"""
_run_alembic_command(['--raiseerr', 'upgrade', 'head']) | 1,608 |
def assert_is_quantized_sequence(note_sequence):
"""Confirms that the given NoteSequence proto has been quantized.
Args:
note_sequence: A music_pb2.NoteSequence proto.
Raises:
SequenceNotQuantizedException: If the sequence is not quantized.
"""
# If the QuantizationInfo message has a non-zero steps_per_quarter, assume
# that the proto has been quantized.
if not note_sequence.quantization_info.steps_per_quarter > 0:
raise SequenceNotQuantizedException('NoteSequence %s is not quantized.' %
note_sequence.id) | 1,609 |
def getPendingReviewers(db, review):
"""getPendingReviewers(db, review) -> dictionary
Returns a dictionary, like the ones returned by getReviewersAndWatchers(), but
with details about remaining unreviewed changes in the review. Changes not
assigned to a reviewer are handled the same way."""
cursor = db.cursor()
cursor.execute("""SELECT reviewuserfiles.uid, reviewfiles.changeset, reviewfiles.file
FROM reviewfiles
LEFT OUTER JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id)
WHERE reviewfiles.review=%s
AND reviewfiles.state='pending'""",
(review.id,))
reviewers = {}
for user_id, changeset_id, file_id in cursor.fetchall():
reviewers.setdefault(file_id, {}).setdefault(user_id, set()).add(changeset_id)
return reviewers | 1,610 |
def base64_encode_string(string):
# type: (str or bytes) -> str
"""Base64 encode a string
:param str or bytes string: string to encode
:rtype: str
:return: base64-encoded string
"""
if on_python2():
return base64.b64encode(string)
else:
return str(base64.b64encode(string), 'ascii') | 1,611 |
def get_error_signature(error_type, n_top, **kwargs):
"""Generates a signature for the specified settings of pose error
calculation.
:param error_type: Type of error.
:param n_top: Top N pose estimates (with the highest score) to be evaluated
for each object class in each image.
:return: Generated signature.
"""
error_sign = "error:" + error_type + "_ntop:" + str(n_top)
if error_type == "vsd":
if kwargs["vsd_tau"] == float("inf"):
vsd_tau_str = "inf"
else:
vsd_tau_str = "{:.3f}".format(kwargs["vsd_tau"])
error_sign += "_delta:{:.3f}_tau:{}".format(kwargs["vsd_delta"], vsd_tau_str)
return error_sign | 1,612 |
def clean_text_from_multiple_consecutive_whitespaces(text):
"""Cleans the text from multiple consecutive whitespaces, by replacing these with a single whitespace."""
multi_space_regex = re.compile(r"\s+", re.IGNORECASE)
return re.sub(multi_space_regex, ' ', text) | 1,613 |
def run(filename):
"""
MUST HAVE FUNCTION!
Begins the plugin processing
Returns a list of endpoints
"""
run_results = set()
r_rule = re.compile(r"(Route\(\"[^,)]+)", flags=re.IGNORECASE)
for line in filename:
try:
route_match = r_rule.search(line)
if route_match:
run_results.add(route_match.group(1)[7:-1])
except Exception:
# Print the offending line the BurpSuite's extension Output tab
print("Error! Couldn't parse: %s" % line)
return list(run_results) | 1,614 |
async def setup_script(hass, notify_q, notify_q2, now, source, config=None):
"""Initialize and load the given pyscript."""
conf_dir = hass.config.path(FOLDER)
file_contents = {f"{conf_dir}/hello.py": source}
Function.hass = None
mock_open = MockOpen()
for key, value in file_contents.items():
mock_open[key].read_data = value
def isfile_side_effect(arg):
return arg in file_contents
def glob_side_effect(path, recursive=None):
result = []
path_re = path.replace("*", "[^/]*").replace(".", "\\.")
path_re = path_re.replace("[^/]*[^/]*/", ".*")
for this_path in file_contents:
if re.match(path_re, this_path):
result.append(this_path)
return result
if not config:
config = {DOMAIN: {CONF_ALLOW_ALL_IMPORTS: True}}
with patch("custom_components.pyscript.os.path.isdir", return_value=True), patch(
"custom_components.pyscript.glob.iglob"
) as mock_glob, patch("custom_components.pyscript.global_ctx.open", mock_open), patch(
"custom_components.pyscript.trigger.dt_now", return_value=now
), patch(
"custom_components.pyscript.open", mock_open
), patch(
"homeassistant.config.load_yaml_config_file", return_value=config
), patch(
"custom_components.pyscript.install_requirements", return_value=None,
), patch(
"custom_components.pyscript.watchdog_start", return_value=None
), patch(
"custom_components.pyscript.os.path.getmtime", return_value=1000
), patch(
"custom_components.pyscript.global_ctx.os.path.getmtime", return_value=1000
), patch(
"custom_components.pyscript.os.path.isfile"
) as mock_isfile:
mock_isfile.side_effect = isfile_side_effect
mock_glob.side_effect = glob_side_effect
assert await async_setup_component(hass, "pyscript", config)
#
# I'm not sure how to run the mock all the time, so just force the dt_now()
# trigger function to return the given list of times in now.
#
def return_next_time():
nonlocal now
if isinstance(now, list):
if len(now) > 1:
return now.pop(0)
return now[0]
return now
trigger.__dict__["dt_now"] = return_next_time
if notify_q or notify_q2:
async def state_changed(event):
var_name = event.data["entity_id"]
if var_name == "pyscript.done":
value = event.data["new_state"].state
if notify_q:
await notify_q.put(value)
if var_name == "pyscript.done2":
value = event.data["new_state"].state
if notify_q2:
await notify_q2.put(value)
hass.bus.async_listen(EVENT_STATE_CHANGED, state_changed) | 1,615 |
def get_assignment_grade_summaries(course_id):
""" return a list of a course's assignments with a grade summary for each
https://canvas.instructure.com/doc/api/analytics.html#method.analytics_api.course_assignments """
assignments = api.get_list('courses/{}/analytics/assignments'.format(course_id))
return [] if 'errors' in assignments else assignments | 1,616 |
def test_is_square_invalid():
"""Input must be a matrix."""
with np.testing.assert_raises(ValueError):
is_square(np.array([-1, 1])) | 1,617 |
def _list_descriptors():
"""Return a list of all registered XModuleDescriptor classes."""
return sorted(
[
desc for (_, desc) in XModuleDescriptor.load_classes()
] + XBLOCK_CLASSES,
key=str
) | 1,618 |
def e3p0(tof,p1,p2,p3,p4,p5,p6,p7,p8,p9,p10):
"""
Background function for TOF spectra
Parameters
----------
tof : array-like
The time-of-flight spectrum
p1 : float
constant background
p2 : float
multiplier on 1st exponential
p3 : float
multiplier on time-of-flight in 1st exponent
p4 : float
constant added to 1st exponent
p5-p10 : float
(see equation in notes)
Returns
-------
e3p0 : array-like
The function in the length of t (see notes)
Notes
-----
.. math:: f(t) = p1 + p2e^{p3t+p4} + p5e^{p6t+p7} + p8e^{p9t+p10}
"""
return p1 + p2*np.exp(p3*tof+p4) + p5*np.exp(p6*tof+p7) + p8*np.exp(p9*tof+p10) | 1,619 |
def read_dictionary():
"""
This function reads file "dictionary.txt" stored in FILE
and appends words in each line into a Python list
"""
global word_dict
with open(FILE, "r") as f:
for line in f:
line = line.strip()
word_dict.append(line) | 1,620 |
def padre_response_and_code_jump(context, response, connection):
"""
I expect a response and to jump to a point in the code in two separate
messages
"""
num_expected_results = len(context.table.rows) + 1
results = read_results(
num_expected_results, context.connections[int(connection)][0]
)
assert_that(
len(results),
equal_to(num_expected_results),
"Got {} responses".format(num_expected_results),
)
for row in context.table:
check_calls_in(results, row[0], json.loads(row[1]))
check_response_in(results, context.padre.last_request_number, response) | 1,621 |
def dice_coefficient(pred, gt):
"""
Computes dice coefficients between two masks
:param pred: predicted masks - [0 ,1]
:param gt: ground truth masks - [0 ,1]
:return: dice coefficient
"""
d = (2 * np.sum(pred * gt) + 1) / ((np.sum(pred) + np.sum(gt)) + 1)
return d | 1,622 |
def test_generic_data_wrapper_verifier_failure():
"""
Test that a GenericData used in constraints fails
the verifier when constraints are not satisfied.
"""
with pytest.raises(VerifyException) as e:
ListDataWrapper(
[ListData([BoolData(True),
ListData([BoolData(False)])])])
assert e.value.args[
0] == "ListData(data=[BoolData(data=False)]) should be of base attribute bool" | 1,623 |
def get_keep_score(source_counts, prediction_counts, target_counts):
"""Compute the keep score (Equation 5 in the paper)."""
source_and_prediction_counts = source_counts & prediction_counts
source_and_target_counts = source_counts & target_counts
true_positives = sum((source_and_prediction_counts & source_and_target_counts).values())
selected = sum(source_and_prediction_counts.values())
relevant = sum(source_and_target_counts.values())
return _get_fbeta_score(true_positives, selected, relevant) | 1,624 |
def run_decode_generator(gc, env):
"""Run the decode table generator"""
if env == None:
return (1, ['no env!'])
xedsrc = env.escape_string(env['src_dir'])
build_dir = env.escape_string(env['build_dir'])
debug = ""
other_args = " ".join(env['generator_options'])
gen_extra_args = "--gendir %s --xeddir %s %s %s" % (build_dir,
xedsrc, debug,
other_args)
if env['gen_ild_storage']:
gen_extra_args += ' --gen-ild-storage'
if env['compress_operands']:
gen_extra_args += " --compress-operands"
cmd = env.expand_string(gc.decode_command(xedsrc,
gen_extra_args,
env.on_windows()))
if mbuild.verbose(2):
mbuild.msgb("DEC-GEN", cmd)
(retval, output, error_output) = mbuild.run_command(cmd,
separate_stderr=True)
oo = env.build_dir_join('DEC-OUT.txt')
oe = env.build_dir_join('DEC-ERR.txt')
_write_file(oo, output)
_write_file(oe, error_output)
if retval == 0:
list_of_files = read_file_list(gc.dec_output_file)
mbuild.hash_files(list_of_files,
env.build_dir_join(".mbuild.hash.xeddecgen"))
mbuild.msgb("DEC-GEN", "Return code: " + str(retval))
return (retval, error_output ) | 1,625 |
def dict_comparator(first_dict, second_dict):
"""
Функция проверяет на совпадение множеств пар ключ-значение для двух словарей
Возвращает True в случае совпадения, иначе False
"""
if set(first_dict.keys()) != set(second_dict.keys()):
return False
for key, value in first_dict.items():
if value != second_dict[key]:
return False
return True | 1,626 |
def word_check(seq1,seq2,word):
"""Returns False and aborts if seq2 contains a substring of seq1 of length word. Returns True otherwise"""
for i in range(len(seq1)-word+1):
if seq2.find(seq1[i:i+word])>-1: return seq2.find(seq1[i:i+word])
return -1 | 1,627 |
def plasma_parameter(N_particles, N_grid, dx):
"""
Estimates the plasma parameter as the number of particles per step.
Parameters
----------
N_particles : int, float
Number of physical particles
N_grid : int
Number of grid cells
dx : float
grid step size
"""
return (N_particles / N_grid) * dx | 1,628 |
def get_ious_and_iou_loss(inputs,
targets,
weight=None,
loss_type="iou",
reduction="none"):
"""
Compute iou loss of type ['iou', 'giou', 'linear_iou']
Args:
inputs (tensor): pred values
targets (tensor): target values
weight (tensor): loss weight
box_mode (str): 'xx' or 'lr', 'lr' is currently supported.
loss_type (str): 'giou' or 'iou' or 'linear_iou'
reduction (str): reduction manner
Returns:
loss (tensor): computed iou loss.
"""
# box_mode = "lr"
inputs = torch.cat((-inputs[..., :1], inputs[..., 1:]), dim=-1)
targets = torch.cat((-targets[..., :1], targets[..., 1:]), dim=-1)
eps = torch.finfo(torch.float32).eps
inputs_area = (inputs[..., 1] - inputs[..., 0]).clamp_(min=0)
targets_area = (targets[..., 1] - targets[..., 0]).clamp_(min=0)
w_intersect = (torch.min(inputs[..., 1], targets[..., 1])
- torch.max(inputs[..., 0], targets[..., 0])).clamp_(min=0)
area_intersect = w_intersect
area_union = targets_area + inputs_area - area_intersect
ious = area_intersect / area_union.clamp(min=eps)
if loss_type == "iou":
loss = -ious.clamp(min=eps).log()
elif loss_type == "linear_iou":
loss = 1 - ious
elif loss_type == "giou":
g_w_intersect = torch.max(inputs[..., 1], targets[..., 1]) \
- torch.min(inputs[..., 0], targets[..., 0])
ac_uion = g_w_intersect
gious = ious - (ac_uion - area_union) / ac_uion.clamp(min=eps)
loss = 1 - gious
else:
raise NotImplementedError
if weight is not None:
loss = loss * weight.view(loss.size())
if reduction == "mean":
loss = loss.sum() / max(weight.sum().item(), eps)
else:
if reduction == "mean":
loss = loss.mean()
if reduction == "sum":
loss = loss.sum()
return ious, loss | 1,629 |
def _worker(exit_e: threading.Event, e_conf: threading.Event,
job_q: queue.Queue, res_q: queue.Queue, max_closed: int) -> None:
"""Worker thread -> consumes jobs that are executed in a Solver thread."""
sol = solver.Solitaire()
while not exit_e.is_set():
try:
seed, draw_count = job_q.get(timeout=0.001)
except queue.Empty:
continue
sol.draw_count = draw_count
sol.shuffle1(seed)
sol.reset_game()
if abs(sol.solve_fast(max_closed).value) == 1:
res_q.put((seed, sol.draw_count, sol.moves_made()))
job_q.task_done()
e_conf.set() | 1,630 |
def load_chembl():
"""Downloads a small subset of the ChEMBL dataset.
Returns
-------
ic50_train: sparse matrix
sparse train matrix
ic50_test: sparse matrix
sparse test matrix
feat: sparse matrix
sparse row features
"""
# load bioactivity and features
ic50 = load_one("chembl-IC50-346targets.mm")
feat = load_one("chembl-IC50-compound-feat.mm")
## creating train and test sets
ic50_train, ic50_test = make_train_test(ic50, 0.2)
return (ic50_train, ic50_test, feat) | 1,631 |
def test_skip(schema, schemas, expected_schema):
"""
GIVEN given schema, schemas and expected schema
WHEN merge is called with the schema and schemas and skip name
THEN the expected schema is returned.
"""
skip_name = "RefSchema"
return_schema = helpers.all_of.merge(
schema=schema, schemas=schemas, skip_name=skip_name
)
assert return_schema == expected_schema | 1,632 |
def validate_name_dynamotable(table_name):
"""Validate if table name matches DynamoDB naming standards."""
if not isinstance(table_name, str):
ValueError('Input argument \"name\" must a string')
if table_name.__len__() < 3 or table_name.__len__() > (255 - 5):
# note: deduct 5 chars to allow postfix space (e.g. for .lock)
return (False, 'TableName should be of length: [3-255]')
if not re.match(r'^[a-zA-Z0-9]', table_name):
return (False, 'BucketName should start with a lowercase letter or number')
if re.search(r'[-\._]{2}', table_name):
return (False, 'TableName can\'t contain two special characters [-, ., _] in a row')
if not re.match(r'^[-a-zA-Z0-9\._]*$', table_name):
return (False, re.sub(' +', ' ', 'TableName contains invalid character. \
Allowed characters: [a-z, A-Z, 0-9, \'.\', \'-\', \'_\']'))
return (True, 'Success') | 1,633 |
def test_compute():
"""Tests that it works for a simple example."""
activation_layers = [
FullyConnectedLayer(np.eye(2), np.ones(shape=(2,))),
ReluLayer(),
FullyConnectedLayer(2.0 * np.eye(2), np.zeros(shape=(2,))),
ReluLayer(),
]
value_layers = activation_layers[:2] + [
FullyConnectedLayer(3.0 * np.eye(2), np.zeros(shape=(2,))),
ReluLayer(),
]
network = DDNN(activation_layers, value_layers)
assert network.differ_index == 2
output = network.compute([[-2.0, 1.0]])
assert np.allclose(output, [[0.0, 6.0]])
output = network.compute(torch.tensor([[-2.0, 1.0]])).numpy()
assert np.allclose(output, [[0.0, 6.0]])
activation_layers = [
FullyConnectedLayer(np.eye(2), np.ones(shape=(2,))),
HardTanhLayer(),
]
value_layers = [
FullyConnectedLayer(2.0 * np.eye(2), np.zeros(shape=(2,))),
HardTanhLayer(),
]
network = DDNN(activation_layers, value_layers)
output = network.compute([[0.5, -0.9]])
assert np.allclose(output, [[1.0, -1.8]])
# Test HardTanh
activation_layers = [
FullyConnectedLayer(np.eye(2), np.ones(shape=(2,))),
HardTanhLayer(),
]
value_layers = [
FullyConnectedLayer(2.0 * np.eye(2), np.zeros(shape=(2,))),
HardTanhLayer(),
]
network = DDNN(activation_layers, value_layers)
output = network.compute([[0.5, -0.9]])
assert np.allclose(output, [[1.0, -1.8]])
# Test MaxPool
width, height, channels = 2, 2, 2
window_data = StridedWindowData((height, width, channels),
(2, 2), (2, 2), (0, 0), channels)
maxpool_layer = MaxPoolLayer(window_data)
activation_layers = [
FullyConnectedLayer(np.eye(8), np.ones(shape=(8,))),
maxpool_layer,
]
value_layers = [
FullyConnectedLayer(-1. * np.eye(8), np.zeros(shape=(8,))),
maxpool_layer,
]
network = DDNN(activation_layers, value_layers)
output = network.compute([[1.0, 2.0, -1.0, -2.5, 0.0, 0.5, 1.5, -3.5]])
# NHWC, so the two channels are: [1, -1, 0, 1.5] and [2, -2.5, 0.5, -3.5]
# So the maxes are 1.5 and 2.0, so the value layer outputs -1.5, -2.0
assert np.allclose(output, [[-1.5, -2.0]]) | 1,634 |
def kill(ctx, *args, **kw_args):
"""Kill existing running servers."""
# Get the config parser object.
config = ctx.obj["config_obj"]
s = Server(config)
print("Killing ...", end="")
s.kill()
print("... Done") | 1,635 |
def delete_item(item_id):
"""
The method deletes item with the provided id.
:param item_id: id of the item to be deleted
:return: http response
"""
try:
if DATA_CONTROLLER.delete_bucketlist_item(item_id):
return make_response("", 200)
else:
return make_response("", 404)
except ValueError as err:
tmp_response = make_response("", 500)
return tmp_response | 1,636 |
def test_child_class():
"""It can use method of child class normally"""
client = app.test_client()
resp = client.post('/child-class/')
eq_(b"POST", resp.data) | 1,637 |
def fix_all_workspace_info(
ws_url, auth_url, token, max_id, outfile=DEFAULT_OUTPUT_FILE
):
"""
Iterates over all workspaces available at the ws_url endpoint, using the given admin token,
and applies _fix_single_workspace_info to each.
ws_url = endpoint for the workspace service to modify
auth_url = endpoint for the auth service that the workspace talks to
token = an active auth token for a workspace administrator
max_id = the max workspace id to fix - should be fairly large, any that are missing up to that point are just ignored.
"""
assert ws_url
assert auth_url
assert token
user_id = _get_user_id(auth_url, token)
ws = Workspace(url=ws_url, token=token)
all_results = {
"multiple": [], # more than 1 narrative, not updated
"update": [], # list of updated narratives
"skip": [], # skipped - likely has 0 narratives
"fail": [], # failed because either that id doesn't exist, or it was deleted. maybe locked.
}
for ws_id in range(1, max_id):
try:
result = _fix_single_workspace_info(ws_id, user_id, ws, verbose=True)
if result.get("multiple") is not None:
all_results["multiple"].append(result["multiple"])
if result.get("update") is not None:
all_results["update"].append(result["update"])
if result.get("skip") is not None:
all_results["skip"].append(result["skip"])
except baseclient.ServerError as e:
if "No workspace with id" in str(e):
print("WS:{} does not exist".format(ws_id))
all_results["fail"].append({"id": ws_id, "error": str(e.message)})
print("Done. Results in update_results.json")
with open("update_results.json", "w") as f:
f.write(json.dumps(all_results, indent=4)) | 1,638 |
def import_from_text_file(filename, defaultExt, readDataFcn, verbose=False):
"""
Opens a given text file and reads data using the specified function
Parameters
----------
filename : str
the path of a file
defaultExt : str
the default extension of the file
readDataFcn : callable
the function to read data from the file. Takes the file as its only parameter.
verbose : bool (optional)
if True prints messages on console (default is False)
Returns
-------
unknown
the output of the readDataFcn
"""
return _open_file(filename, defaultExt, 'r', readDataFcn, verbose) | 1,639 |
def is_template_definition(metric_name):
"""Return if the given metric name is a template definition by
convention."""
fields = metric_name.split('/')
return fields[0].lower() == TEMPLATE_DEFINITION_PREFIX | 1,640 |
def _cm_ramp_points_and_voltages(abf):
"""
Return [points, voltages] if the sweep contains a ramp suitable for
capacitance calculation using a matching doward and upward ramp.
points is a list of 3 numbers depicting index values important to this
ramp. The first number is the index at the start of the downward ramp, the
second is the index of its nadir, and the third is the index where it
returns to the original level.
voltages is a list of 2 numbers: voltage before and during the ramp.
"""
assert isinstance(abf, pyabf.ABF)
if abf.sweepUnitsY != "pA":
raise Exception("must be in voltage clamp configuration")
for i, p1 in enumerate(abf.sweepEpochs.p1s):
if i == 0:
continue
# ensure this sweep and the last are both ramps
if abf.sweepEpochs.types[i] != "Ramp":
continue
if abf.sweepEpochs.types[i-1] != "Ramp":
continue
# ensure the levels are different
if abf.sweepEpochs.levels[i] == abf.sweepEpochs.levels[i-1]:
continue
ptStart = abf.sweepEpochs.p1s[i-1]
ptTransition = abf.sweepEpochs.p1s[i]
ptEnd = abf.sweepEpochs.p2s[i]
points = [ptStart, ptTransition, ptEnd]
voltageBefore = abf.sweepEpochs.levels[i-1]
voltageDuring = abf.sweepEpochs.levels[i]
voltages = [voltageBefore, voltageDuring]
return [points, voltages]
return None | 1,641 |
def single_model_embeddings_specify(single_model_embeddings):
"""Returns an instance of MultiTaskLSTMCRF initialized with the default configuration file,
loaded embeddings and single specified model."""
single_model_embeddings.specify()
return single_model_embeddings | 1,642 |
def load_json(filename):
"""
Load a JSON file that may be .bz2 or .gz compressed
"""
if '.bz2' in filename:
import bz2
with bz2.open(filename, 'rt') as infile:
return json.load(infile)
elif '.gz' in filename:
import gzip
with gzip.open(filename, 'rt') as infile:
return json.load(infile)
else:
with open(filename, 'rt') as infile:
return json.load(infile) | 1,643 |
def get_future_contracts(underlying_symbol, date=None):
"""
获取某期货品种在策略当前日期的可交易合约标的列表
:param security 期货合约品种,如 ‘AG’(白银)
:return 某期货品种在策略当前日期的可交易合约标的列表
"""
assert underlying_symbol, "underlying_symbol is required"
dt = to_date_str(date)
return JQDataClient.instance().get_future_contracts(underlying_symbol=underlying_symbol, dt=dt) | 1,644 |
def rodeo_query(fc, pallet): # 3.5-4 seconds for 150 elem
"""
Get pd DataFrame with info from rodeo about pallet/tote in TS Out.
:param fc: str
:param pallet: Pallet or Tote are accepted.
:return: df or "No data was found" if status_code = 200, "There was an error while connecting to {url}"
otherwise.
"""
url = f"https://rodeo-dub.amazon.com/{fc}/Search?_enabledColumns=on&enabledColumns=ASIN_TITLES&enabledColumns" \
f"=FC_SKU&enabledColumns=OUTER_SCANNABLE_ID&&searchKey={pallet} "
urllib3.disable_warnings() # prevent warnings for unverified request
print(COLOR + "Downloading manifested pallet's content from Rodeo.")
with requests.Session() as req:
resp = req.get(url,
timeout=30,
verify=False,
allow_redirects=True,
auth=HTTPKerberosAuth(mutual_authentication=OPTIONAL))
if resp.status_code == 200:
data = pd.read_html(resp.text, flavor=None, header=0, parse_dates=["Need To Ship By Date"])
if data is not None and len(data[0]) > 0:
df = pd.concat(data, sort=False)
df = df.drop(columns='Unnamed: 0')
return df
else:
return f"No data was found at {url}\nPlease check that {pallet} is correct.\nIf the error persists, " \
f"please check Rodeo status for your FC: {fc}."
else:
# return resp.raise_for_status() # to see error
return f"There was an error while connecting to {url}" | 1,645 |
def plan_to_joint_configuration(robot, qgoal, pname='BiRRT', max_iters=20,
max_ppiters=40, try_swap=False):
"""
Plan a trajectory to the given `qgoal` configuration.
Parameters
----------
robot: orpy.Robot
The OpenRAVE robot
qgoal: array_like
The goal configuration
pname: str
Name of the planning algorithm. Available options are: `BasicRRT`, `BiRRT`
max_iters: float
Maximum iterations for the planning stage
max_ppiters: float
Maximum iterations for the post-processing stage. It will use a parabolic
smoother wich short-cuts the trajectory and then smooths it
try_swap: bool
If set, will compute the direct and reversed trajectory. The minimum
duration trajectory is used.
Returns
-------
traj: orpy.Trajectory
Planned trajectory. If plan fails, this function returns `None`.
"""
qstart = robot.GetActiveDOFValues()
env = robot.GetEnv()
planner = orpy.RaveCreatePlanner(env, pname)
params = orpy.Planner.PlannerParameters()
params.SetMaxIterations(max_iters)
if max_ppiters > 0:
params.SetPostProcessing('ParabolicSmoother',
'<_nmaxiterations>{0}</_nmaxiterations>'.format(max_ppiters))
else:
params.SetPostProcessing('', '')
# Plan trajectory
best_traj = None
min_duration = float('inf')
reversed_is_better = False
count = 0
for qa, qb in itertools.permutations([qstart, qgoal], 2):
count += 1
with robot:
robot.SetActiveDOFValues(qa)
params.SetGoalConfig(qb)
params.SetRobotActiveJoints(robot)
initsuccess = planner.InitPlan(robot, params)
if initsuccess:
traj = orpy.RaveCreateTrajectory(env, '')
status = planner.PlanPath(traj) # Plan the trajectory
if status == orpy.PlannerStatus.HasSolution:
duration = traj.GetDuration()
if duration < min_duration:
min_duration = duration
best_traj = orpy.RaveCreateTrajectory(env, traj.GetXMLId())
best_traj.Clone(traj, 0)
if count == 2:
reversed_is_better = True
if not try_swap:
break
# Check if we need to reverse the trajectory
if reversed_is_better:
best_traj = orpy.planningutils.ReverseTrajectory(best_traj)
return best_traj | 1,646 |
def _get_texinfo(data):
"""Return the texture information of a texture data.
Arguments:
* data: the texture data as an array.
Returns:
* texinfo: a dictionary with the information related to the texture data.
"""
assert data.ndim == 3
size = data.shape[:2]
if size[0] == 1:
ndim = 1
elif size[0] > 1:
ndim = 2
ncomponents = data.shape[2]
return dict(size=size, ndim=ndim, ncomponents=ncomponents) | 1,647 |
def test_get_idn(switch_driver):
"""
to check if the instrument attributes are set correctly after getting
the IDN
"""
assert switch_driver.IDN() == {
"vendor": "Keysight",
"model": "34980A",
"serial": "1000",
"firmware": "0.1"
} | 1,648 |
def set_featured_notebooks(notebook_ids): # noqa: E501
"""set_featured_notebooks
:param notebook_ids: Array of notebook IDs to be featured.
:type notebook_ids: List[str]
:rtype: None
"""
update_multiple(ApiNotebook, [], "featured", False)
if notebook_ids:
update_multiple(ApiNotebook, notebook_ids, "featured", True)
return None, 200 | 1,649 |
def speed_to_cadences(bicycle, speed, digits=None):
"""
Return cadences in hertz (revolutions per second).
Speed is measured in kilometers per hour.
Assume the following bicycle attributes are non-null and non-empty:
- front_cogs
- rear_cogs
- crank_length
- rear_wheel
Raise a ``ValueError``, if that is not the case.
EXAMPLES::
>>> w = Wheel(diameter=600)
>>> b = Bicycle(front_cogs=[40], rear_cogs=[20, 30], crank_length=100, rear_wheel=w)
>>> speed_to_cadences(b, 18.1, digits=1)
{(40, 30): 2.0, (40, 20): 1.3}
"""
b = bicycle
attrs = ['front_cogs', 'rear_cogs', 'crank_length', 'rear_wheel']
check_attrs(b, *attrs)
check_attrs(b.rear_wheel, 'diameter')
gr = gain_ratios(b)
result = {}
for (k, g) in gr.items():
result[k] = speed/(2*pi*b.crank_length*g*(3600/1e6))
if digits is not None:
result = {k: round(v, digits) for k, v in result.items()}
return result | 1,650 |
def _gen_version(fields):
"""Looks at BotGroupConfig fields and derives a digest that summarizes them.
This digest is going to be sent to the bot in /handshake, and bot would
include it in its state (and thus send it with each /poll). If server detects
that the bot is using older version of the config, it would ask the bot
to restart.
Args:
fields: dict with BotGroupConfig fields (without 'version').
Returns:
A string that going to be used as 'version' field of BotGroupConfig tuple.
"""
# Just hash JSON representation (with sorted keys). Assumes it is stable
# enough. Add a prefix and trim a bit, to clarify that is it not git hash or
# anything like that, but just a dumb hash of the actual config.
digest = hashlib.sha256(utils.encode_to_json(fields)).hexdigest()
return 'hash:' + digest[:14] | 1,651 |
def getopt(clf, ret_val, isbool=False):
""" Command Line Option input parser"""
found = []
def getCLO(flag):
iindx = sys.argv.index(flag)
sys.argv.pop(iindx)
return sys.argv.pop(iindx)
if isbool: return (clf in sys.argv)
while clf in sys.argv: found.append(getCLO(clf))
if found: ret_val = [found, found[0]][int(len(found) == 1)]
return ret_val | 1,652 |
def pay_and_save_financing(req: request, request_json, account_id):
"""Set up the financing statement, pay if there is an account id, and save the data."""
# Charge a fee.
token: dict = g.jwt_oidc_token_info
statement = FinancingStatement.create_from_json(request_json, account_id, token.get('username', None))
invoice_id = None
registration = statement.registration[0]
pay_trans_type, fee_quantity = resource_utils.get_payment_type_financing(registration)
pay_ref = None
if not is_reg_staff_account(account_id):
pay_account_id: str = account_id if not is_sbc_office_account(account_id) else None
payment = Payment(jwt=jwt.get_token_auth_header(),
account_id=pay_account_id,
details=resource_utils.get_payment_details_financing(registration))
pay_ref = payment.create_payment(pay_trans_type, fee_quantity, None, registration.client_reference_id)
else:
payment_info = resource_utils.build_staff_registration_payment(req, pay_trans_type, fee_quantity)
payment = Payment(jwt=jwt.get_token_auth_header(),
account_id=None,
details=resource_utils.get_payment_details_financing(registration))
pay_ref = payment.create_payment_staff_registration(payment_info, registration.client_reference_id)
invoice_id = pay_ref['invoiceId']
registration.pay_invoice_id = int(invoice_id)
registration.pay_path = pay_ref['receipt']
# Try to save the financing statement: failure throws an exception.
try:
statement.save()
except Exception as db_exception: # noqa: B902; handle all db related errors.
current_app.logger.error(SAVE_ERROR_MESSAGE.format(account_id, 'financing', repr(db_exception)))
if account_id and invoice_id is not None:
current_app.logger.info(PAY_REFUND_MESSAGE.format(account_id, 'financing', invoice_id))
try:
payment.cancel_payment(invoice_id)
except SBCPaymentException as cancel_exception:
current_app.logger.error(PAY_REFUND_ERROR.format(account_id, 'financing', invoice_id,
repr(cancel_exception)))
raise db_exception
return statement | 1,653 |
def resolve_cmds_path(cmds, singlesrv_mode):
"""Resolve the cmds path if in single server mode.
Args:
cmds: A list of sender/receiver commands.
singlesrv_mode: A bool on whether running in single server mode.
Returns:
The commands that path has been resolved if needed
(in single server mode).
"""
if not singlesrv_mode:
return cmds
r_cmds = []
for cmd in cmds:
r_cmds.append(_resolve_binary_path_for_timed_cmd(cmd))
return r_cmds | 1,654 |
def _encode_base64(data: str) -> str:
"""Base 64 encodes a string."""
ebytes = base64.b64encode(data.encode("utf-8"))
estring = str(ebytes, "utf-8")
return estring | 1,655 |
def downgrade():
"""schema downgrade migrations go here."""
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("answers")
op.drop_table("questions")
# ### end Alembic commands ### | 1,656 |
def workflow_spec(
dag: DAG,
workflow: Workflow,
) -> Mapping[str, Any]:
"""
Return a minimal representation of a WorkflowSpec for the supplied DAG and metadata.
Spec: https://github.com/argoproj/argo-workflows/blob/v3.0.4/docs/fields.md#workflowspec
Parameters
----------
dag
The DAG to generate the spec for
workflow
The configuration for this workflow
Raises
------
ValueError
If any of the extra_spec_options collides with a property used by the runtime.
"""
validate_parameters(inputs=dag.inputs, params=workflow.params)
spec = {
"entrypoint": BASE_DAG_NAME,
"templates": _templates(
node=dag,
container_image=workflow.container_image,
container_command=workflow.container_entrypoint_to_dag_cli,
params=workflow.params,
),
}
if workflow.params:
spec["arguments"] = _workflow_spec_arguments(workflow.params)
spec = with_extra_spec_options(
original=spec,
extra_options=workflow.extra_spec_options,
context="the Workflow spec",
)
return spec | 1,657 |
def redirect(request):
"""
Handling what happens when the groupcode is submitted by user and handles input from user's when they are answering
questions.
:param request:
:return: The methods returns the student view page which is the actual game to the user if they entered a correct
groupcode, it will also return messages when user's are answering questions in the quiz telling them if the answers
are correct or not
"""
"""handling what happens when the groupcode is entered and submitted aswell as the question logic"""
global score
global num
map_check = False
# Below is to check if whether the button is for groupcode or answer to question
# process the group code passed from the landing page
if request.method == 'POST' and 'submit-groupcode' in request.POST:
# Get inputted groupcode from the user
groupcode = str(request.POST.get('groupCode'))
# if the group code exists, load the treasure hunt page with the correct questions
if Gamecode.objects.filter(groupcode=groupcode).exists():
#Below is for question loading and getting question informations
questionNum = Gamecode.objects.get(groupcode=groupcode)
mapCheck = questionNum.map
routeID = questionNum.routeID_id
num = questionNum.questionNum
score = questionNum.score
# Get question by using the question number the group is currently on
info = Questions.objects.filter(node_num=int(num),routeID=routeID)
# Add group code into user's session
request.session['groupcode'] = groupcode
# Add score into user's session
request.session['score'] = score
# Add routeID into user's session
request.session['routeID'] = routeID
#To show the correct map for the user to go to when the join the game after a question is answered but the
# map check is not yet done
if num >1:
print(num)
#set map value to the previous question
num -=1
print(num)
latest_question = Questions.objects.get(node_num=num, routeID=routeID)
#Return number to the correct question number
num +=1
else:
latest_question = Questions.objects.get(node_num=num , routeID=routeID)
location = latest_question.location
longtitude = latest_question.longtitude
latitude = latest_question.latitude
place_name = latest_question.answers
return render(request, 'app/studentview.html',{"groupcode":groupcode, "data":info, "id":id, "score":score,"map_check":mapCheck,"location":location,"longtitude": longtitude,
"latitude":latitude,"answer":place_name})
# otherwise show an error message
else:
print("Wrong")
messages.error(request, 'The game code does not exist')
return render(request, 'app/index.html')
# if an answer to question is submitted, check if it is correct
if request.method == 'POST' and 'submit-question' in request.POST:
# Get routeID from user's session
routeID = request.session['routeID']
# Get groupcode from user's session
groupcode = request.session['groupcode']
# Get text from the input answer box
data = str(request.POST.get('answer'))
# Retrieve the current question the group is on from the database
questionNum = Gamecode.objects.get(groupcode=groupcode)
# if answer is correct for the current node, move onto the next question if it exists,
# otherwise show they have finished the quiz
if Questions.objects.filter(answers__icontains=data.strip(), node_num=int(num), routeID=routeID).exists():
latest_question = Questions.objects.get(node_num=num, routeID=routeID)
location = latest_question.location
longtitude = latest_question.longtitude
latitude = latest_question.latitude
place_name = latest_question.answers
map_check = "True"
# Add 1 to the counter so the questions moves on to the next one
num += 1
# Check whether if the user is on the last question
if Questions.objects.filter(node_num=int(num), routeID=routeID).exists():
score += 3
questionNum.map = map_check
questionNum.questionNum = num
questionNum.score = score
questionNum.save()
print(location)
info = Questions.objects.filter(node_num=num, routeID=routeID)
messages.success(request, 'Correct!') #Generate message saying correct
return render(request, 'app/studentview.html',{"groupcode":groupcode,"data":info,"id":id,
"score":score, "map_check":map_check,
"location":location,"longtitude": longtitude,
"latitude":latitude,"answer":place_name})
# Case when the user is on the last question
else:
# To make sure user stays on the last question
num -=1
questionNum.questionNum = num
questionNum.map = map_check
questionNum.save()
info = Questions.objects.filter(node_num=num,routeID=routeID)
# Generate message when user finish the quiz
messages.success(request, 'You have finished the quiz, well done!')
# Return the information back to user's view
return render(request, 'app/studentview.html', {"groupcode":groupcode,"data":info,"id":id,
"score":score, "map_check":map_check,
"location":location,"longtitude": longtitude,
"latitude":latitude,"answer":place_name,"Finished":"True"})
# Case when user gets the answer wrong
else:
info = Questions.objects.filter(node_num=num, routeID=routeID)
# Return incorrect message
messages.error(request, 'That is the wrong answer, please try again')
# Return the information back to user's view
return render(request, 'app/studentview.html', {"groupcode": groupcode, "data": info, "id": id,"score":score})
# Case when user refreshes the page during the game
if 'groupcode' in request.session:
# Retrieve information about the questions
groupcode = request.session['groupcode']
routeID = request.session['routeID']
questionNum = Gamecode.objects.get(groupcode=groupcode)
num = questionNum.questionNum
mapcheck = questionNum.map
# Get question from the database using num counter
info = Questions.objects.filter(node_num=int(num), routeID=routeID)
if num > 1:
print(num)
# set map value to the previous question
num -= 1
print(num)
latest_question = Questions.objects.get(node_num=num, routeID=routeID)
# Return number to the correct question number
num += 1
else:
latest_question = Questions.objects.get(node_num=num, routeID=routeID)
location = latest_question.location
longtitude = latest_question.longtitude
latitude = latest_question.latitude
place_name = latest_question.answers
# Return the information back to user's view
return render(request, 'app/studentview.html',
{"groupcode": groupcode, "data": info, "id": id, "score": score, "map_check": mapcheck,
"location": location, "longtitude": longtitude,
"latitude": latitude, "answer": place_name})
else:
# Redirect user back to start page
return render(request, 'app/index.html') | 1,658 |
def test_plDensity():
"""Test the plDensity function."""
mass, radius = 1, 1
assert isinstance(plDensity(mass, radius), float)
assert round(plDensity(mass, radius), 2) == 1.33
assert plDensity(0, radius) == 0
with pytest.raises(ZeroDivisionError):
plDensity(mass, 0) | 1,659 |
def build_test_fn(policy, optim, log_dir, model_name, train_collector, save_train_buffer, obs_shape, stack_num, env_id,
num_episodes):
""" Build custom test function for maze world environment """
def custom_test_fn(epoch, env_step):
# Save agent
print(f"Epoch = {epoch}")
torch.save({'model': policy.state_dict(), 'optim': optim.state_dict()},
log_dir + model_name + f'_epoch{epoch}.pth')
if save_train_buffer:
train_collector.buffer.save_hdf5(f'{log_dir}/epoch{epoch}_train_buffer.hdf5')
# Record agent`s performance in video
policy.eval()
test_env = envpool.make_gym(env_id, num_envs=1, seed=0, episodic_life=False, reward_clip=True, stack_num=4,
gray_scale=False, img_height=160, img_width=160)
collector = ts.data.Collector(policy, test_env, exploration_noise=True)
record.collect_and_record(collector, n_episode=num_episodes // 2, obs_shape=obs_shape, stack_num=stack_num,
log_dir=log_dir, epoch=epoch, starting_episode=0)
collector = ts.data.Collector(policy, test_env, exploration_noise=False)
record.collect_and_record(collector, n_episode=num_episodes // 2, obs_shape=obs_shape, stack_num=stack_num,
log_dir=log_dir, epoch=epoch, starting_episode=num_episodes // 2)
return custom_test_fn | 1,660 |
def decode_2bit(iterable: Iterable[int], palette: Sequence[Color]) \
-> Iterable[int]:
"""For every two bytes consumed from the given iterable, generates 8 decoded
RGB8 colors based on the palette.
:param iterable: 2-bit grayscale encoded image.
:param palette: List of colors used to decode the iterable.
:returns: An iterable of decoded data.
"""
iterable = iter(iterable)
#print(palette)
try:
while True:
hi = next(iterable)
lo = next(iterable)
for i in range(8):
c = (lo >> (7-i)) & 1
c |= ((hi >> (7-i)) & 1) << 1
color = palette[c]
yield color
except StopIteration:
raise StopIteration() | 1,661 |
def check_score(encoding, min_qual, qual_str):
"""Return True if the average quality score is at least min_qual
"""
qscores = [encoding[q] for q in qual_str]
return sum(qscores) >= min_qual * len(qscores) | 1,662 |
def add_unsafe_warning(func, fig):
"""
Generate warning if not supported by Paxplot
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if fig._show_unsafe_warning:
warnings.warn(
f'The function you have called ({func.__name__}) is not '
'officially supported by Paxplot, but it may still work. '
'Report issues to '
'https://github.com/kravitsjacob/paxplot/issues',
Warning
)
return func(*args, **kwargs)
return wrapper | 1,663 |
def update_dask_partitions_shuffle(
ddf: dd.DataFrame,
table: str,
secondary_indices: List[str],
metadata_version: int,
partition_on: List[str],
store_factory: StoreFactoryType,
df_serializer: DataFrameSerializer,
dataset_uuid: str,
num_buckets: int,
sort_partitions_by: Optional[str],
bucket_by: List[str],
) -> da.Array:
"""
Perform a dataset update with dask reshuffling to control partitioning.
The shuffle operation will perform the following steps
1. Pack payload data
Payload data is serialized and compressed into a single byte value using
``distributed.protocol.serialize_bytes``, see also ``pack_payload``.
2. Apply bucketing
Hash the column subset ``bucket_by`` and distribute the hashes in
``num_buckets`` bins/buckets. Internally every bucket is identified by an
integer and we will create one physical file for every bucket ID. The
bucket ID is not exposed to the user and is dropped after the shuffle,
before the store. This is done since we do not want to guarantee at the
moment, that the hash function remains stable.
3. Perform shuffle (dask.DataFrame.groupby.apply)
The groupby key will be the combination of ``partition_on`` fields and the
hash bucket ID. This will create a physical file for every unique tuple
in ``partition_on + bucket_ID``. The function which is applied to the
dataframe will perform all necessary subtask for storage of the dataset
(partition_on, index calc, etc.).
4. Unpack data (within the apply-function)
After the shuffle, the first step is to unpack the payload data since
the follow up tasks will require the full dataframe.
5. Pre storage processing and parquet serialization
We apply important pre storage processing like sorting data, applying
final partitioning (at this time there should be only one group in the
payload data but using the ``MetaPartition.partition_on`` guarantees the
appropriate data structures kartothek expects are created.).
After the preprocessing is done, the data is serialized and stored as
parquet. The applied function will return an (empty) MetaPartition with
indices and metadata which will then be used to commit the dataset.
Returns
-------
A dask.Array holding relevant MetaPartition objects as values
"""
if ddf.npartitions == 0:
return ddf
group_cols = partition_on.copy()
if num_buckets is None:
raise ValueError("``num_buckets`` must not be None when shuffling data.")
meta = ddf._meta
meta[_KTK_HASH_BUCKET] = np.uint64(0)
ddf = ddf.map_partitions(_hash_bucket, bucket_by, num_buckets, meta=meta)
group_cols.append(_KTK_HASH_BUCKET)
packed_meta = ddf._meta[group_cols]
packed_meta[_PAYLOAD_COL] = b""
unpacked_meta = ddf._meta
ddf = pack_payload(ddf, group_key=group_cols)
ddf = ddf.groupby(by=group_cols)
ddf = ddf.apply(
partial(
_store_partition,
secondary_indices=secondary_indices,
sort_partitions_by=sort_partitions_by,
table=table,
dataset_uuid=dataset_uuid,
partition_on=partition_on,
store_factory=store_factory,
df_serializer=df_serializer,
metadata_version=metadata_version,
unpacked_meta=unpacked_meta,
),
meta=("MetaPartition", "object"),
)
return ddf | 1,664 |
def edit_paycheck(paycheck_id):
""" Edit a paycheck """
paycheck = Paycheck.query.get(paycheck_id)
form = PaycheckForm(obj=paycheck)
return render_template('pay/edit_paycheck.jinja', form=form, paycheck_id=paycheck_id) | 1,665 |
def flake8(session):
"""Lint code with Flake8."""
session.run("flake8", *SOURCES, external=True) | 1,666 |
def is_meeting_approved(meeting):
"""Returns True if the meeting is approved"""
if meeting.session_set.first().status.slug == 'apprw':
return False
else:
return True | 1,667 |
def get_data_rows_after_start(
worksheet,
start_row,
start_col,
end_col,
page_size=SHEETS_VALUE_REQUEST_PAGE_SIZE,
**kwargs,
):
"""
Yields the data rows of a spreadsheet starting with a given row and spanning a given column range
until empty rows are encountered.
Args:
worksheet (pygsheets.worksheet.Worksheet): Worksheet object
start_row (int): Zero-based index of the first row for which you want data returned
start_col (int): Zero-based index of the start of the column range
end_col (int): Zero-based index of the end of the column range
page_size (int): The number of rows to fetch per individual API request
kwargs (dict): Option params to pass along to pygsheets.worksheet.Worksheet.get_values
Yields:
list of str: List of cell values in a given row
"""
request_count = 0
values = []
while request_count == 0 or (values and len(values) == page_size):
end_row = start_row + page_size - 1
values = worksheet.get_values(
start=(start_row, start_col),
end=(end_row, end_col),
include_tailing_empty=True,
include_tailing_empty_rows=False,
returnas="matrix",
**kwargs,
)
request_count += 1
yield from values
start_row = end_row + 1 | 1,668 |
def F(
u,
v,
kappa,
rho,
cp,
convection,
source,
r,
neumann_bcs,
robin_bcs,
my_dx,
my_ds,
stabilization,
):
"""
Compute
.. math::
F(u) =
\\int_\\Omega \\kappa r
\\langle\\nabla u, \\nabla \\frac{v}{\\rho c_p}\\rangle
\\, 2\\pi \\, \\text{d}x
+ \\int_\\Omega \\langle c, \\nabla u\\rangle v
\\, 2\\pi r\\,\\text{d}x
- \\int_\\Omega \\frac{1}{\\rho c_p} f v
\\, 2\\pi r \\,\\text{d}x\\\\
- \\int_\\Gamma r \\kappa \\langle n, \\nabla T\\rangle v
\\frac{1}{\\rho c_p} 2\\pi \\,\\text{d}s
- \\int_\\Gamma r \\kappa \\alpha (u - u_0) v
\\frac{1}{\\rho c_p} \\, 2\\pi \\,\\text{d}s,
used for time-stepping
.. math::
u' = F(u).
"""
rho_cp = rho * cp
F0 = kappa * r * dot(grad(u), grad(v / rho_cp)) * 2 * pi * my_dx
# F -= dot(b, grad(u)) * v * 2*pi*r * dx_workpiece(0)
if convection is not None:
c = as_vector([convection[0], convection[1]])
F0 += dot(c, grad(u)) * v * 2 * pi * r * my_dx
# Joule heat
F0 -= source * v / rho_cp * 2 * pi * r * my_dx
# Neumann boundary conditions
for k, n_grad_T in neumann_bcs.items():
F0 -= r * kappa * n_grad_T * v / rho_cp * 2 * pi * my_ds(k)
# Robin boundary conditions
for k, value in robin_bcs.items():
alpha, u0 = value
F0 -= r * kappa * alpha * (u - u0) * v / rho_cp * 2 * pi * my_ds(k)
if stabilization == "supg":
# Add SUPG stabilization.
assert convection is not None
# TODO u_t?
R = (
-div(kappa * r * grad(u)) / rho_cp * 2 * pi
+ dot(c, grad(u)) * 2 * pi * r
- source / rho_cp * 2 * pi * r
)
mesh = v.function_space().mesh()
element_degree = v.ufl_element().degree()
tau = stab.supg(mesh, convection, kappa, element_degree)
F0 += R * tau * dot(convection, grad(v)) * my_dx
else:
assert stabilization is None
return F0 | 1,669 |
def test_define_with_non_symbol_as_variable():
"""TEST 4.12: Defines require the first argument to be a symbol."""
with assert_raises_regexp(DiyLangError, "not a symbol"):
evaluate(parse("(define #t 42)"), Environment()) | 1,670 |
def check_radarr():
"""
Connects to an instance of Radarr and returns a tuple containing the instances status.
Returns:
(str) an instance of the Status enum value representing the status of the service
(str) a short descriptive string representing the status of the service
"""
try:
req = requests.get('{}/api/system/status?apikey={}'.format(paths['Radarr'], keys['Radarr']), timeout=0.2)
req.raise_for_status()
except (requests.ConnectionError, requests.HTTPError, requests.Timeout):
return Status.ERROR.value, "NoAPI"
try:
data = req.json()
except ValueError:
return Status.ERROR.value, "BadJSON"
if data['version']:
return Status.ACTIVE.value, "Online"
else:
return Status.ERROR.value, "BadAPI" | 1,671 |
def reversebits5(max_bits, num):
""" Like reversebits4, plus optimizations regarding leading zeros in
original value. """
rev_num = 0
shifts = 0
while num != 0 and shifts < max_bits:
rev_num |= num & 1
num >>= 1
rev_num <<= 1
shifts += 1
rev_num >>= 1
rev_num <<= (max_bits - shifts)
return rev_num | 1,672 |
def rescale(img, thresholds):
"""
Linear stretch of image between two threshold values.
"""
return img.subtract(thresholds[0]).divide(thresholds[1] - thresholds[0]) | 1,673 |
def X_n120() -> np.ndarray:
"""
Fixture that generates a Numpy array with 120 observations. Each
observation contains two float values.
:return: a Numpy array.
"""
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(120, 2)
return X | 1,674 |
def get_shp(shp_str):
"""
Return a shapely geometry in WGS84 lon/lat
input: shp_str - a string corresponding to an iso-3166-1 or -2 administrative area for admin-level 1 (countries) and -2 (states/provinces) respectively
"""
if len(shp_str.split('-'))>1:
load_fts = json.load(open(os.path.join(os.getcwd(),'data','ne_10m_admin_1_states_provinces.geojson'),'r'))
select_fts = [ft for ft in load_fts['features'] if ft['properties']['iso_3166_2']==shp_str]
else:
load_fts = json.load(open(os.path.join(os.getcwd(),'data','ne_10m_admin_0_countries.geojson'),'r'))
select_fts = [ft for ft in load_fts['features'] if ft['properties']['ISO_A2']==shp_str]
all_shps = [geometry.shape(ft['geometry']) for ft in select_fts]
return reduce2mp(flatten_polys(all_shps)) | 1,675 |
def int_to_bytes(n: uint64, length: uint64) -> bytes:
"""
Return the ``length``-byte serialization of ``n`` in ``ENDIANNESS``-endian.
"""
return n.to_bytes(length, ENDIANNESS) | 1,676 |
def upper_to_title(text, force_title=False):
"""Inconsistently, NiH has fields as all upper case.
Convert to titlecase"""
if text == text.upper() or force_title:
text = string.capwords(text.lower())
return text | 1,677 |
def test_ens_reverse_lookup(ethereum_manager):
"""This test could be flaky because it assumes
that all used ens names exist
"""
reversed_addr_0 = to_checksum_address('0x71C7656EC7ab88b098defB751B7401B5f6d8976F')
reversed_addr_1 = ethereum_manager.ens_lookup('lefteris.eth')
expected = {reversed_addr_0: None, reversed_addr_1: 'lefteris.eth'}
assert ethereum_manager.ens_reverse_lookup([reversed_addr_0, reversed_addr_1]) == expected
reversed_addr_2 = ethereum_manager.ens_lookup('abc.eth')
reversed_addr_3 = ethereum_manager.ens_lookup('rotki.eth')
expected = {reversed_addr_2: 'abc.eth', reversed_addr_3: 'rotki.eth'}
assert ethereum_manager.ens_reverse_lookup([reversed_addr_2, reversed_addr_3]) == expected
with pytest.raises(InputError):
ethereum_manager.ens_reverse_lookup('xyz') | 1,678 |
def pairwise_negative(true, pred):
"""Return p_num, p_den, r_num, r_den over noncoreferent item pairs
As used in calcualting BLANC (see Luo, Pradhan, Recasens and Hovy (2014).
>>> pairwise_negative({1: {'a', 'b', 'c'}, 2: {'d'}},
... {1: {'b', 'c'}, 2: {'d', 'e'}})
(2, 4, 2, 3)
"""
true_pairs = _positive_pairs(values(true))
pred_pairs = _positive_pairs(values(pred))
n_pos_agreements = len(true_pairs & pred_pairs)
true_mapping = sets_to_mapping(true)
pred_mapping = sets_to_mapping(pred)
extra_mentions = keys(true_mapping) ^ keys(pred_mapping)
disagreements = {p for p in true_pairs ^ pred_pairs
if p[0] not in extra_mentions
and p[1] not in extra_mentions}
n_common_mentions = len(keys(true_mapping) & keys(pred_mapping))
n_neg_agreements = (_triangle(n_common_mentions) - n_pos_agreements -
len(disagreements))
# Total number of negatives in each of pred and true:
p_den = _triangle(len(pred_mapping)) - len(pred_pairs)
r_den = _triangle(len(true_mapping)) - len(true_pairs)
return n_neg_agreements, p_den, n_neg_agreements, r_den | 1,679 |
def reptile_resurgence_links(tar_url, max_layer, max_container="", a_elem="a", res_links=[], next_url="", callback=None):
"""
爬虫层次挖掘,对目标 URL 进行多层挖链接
参数:目标 URL | 最大层数 | 爬取范围 | 爬取的a标签选择器 | 内部使用,返回列表 | 内部使用 下一个目标
"""
if next_url != "" and next_url[:4] in 'http':
res_links.append(next_url)
if max_layer <= 0:
return res_links
rep = init_reptile(tar_url)
document = rep['document']
# 专注于某一区域对网页爬虫 推荐这种方法只爬一层
container_tags = document.find(max_container).items()
for tag1 in container_tags:
children_tags = tag1.children(a_elem).items()
for tag2 in children_tags:
# 可以在这里增加 callback 有效减少请求次数
if callback != None:
callback(comp_http_url(tar_url, tag2.attr('href')))
reptile_resurgence_links(
tar_url, max_layer - 1,
max_container=max_container,
res_links=res_links,
next_url=comp_http_url(tar_url, tag2.attr('href'))
)
# 爬取之后将会获得每一个链接
return res_links | 1,680 |
def _swap(list_, a, b):
"""
Swap items in positions a and b of list_.
list_ -- a list
a -- an index in list_
b -- an index in list_
"""
list_[a], list_[b] = list_[b], list_[a] | 1,681 |
def random_uniform(seed_tensor: Tensor,
shape: Tuple[int, ...],
low: float = 0.0,
high: float = 1.0,
dtype: dtypes.dtype = dtypes.float32):
"""
Randomly sample from a uniform distribution with minimum value `low` and maximum value `high`.
Note: not compatible with `IPUModel`.
Args:
seed_tensor (Tensor):
Used to seed the probability distribution. Must have data type uint32 and shape (2,).
shape (Tuple[int, ...]):
Shape of output tensor
low (float, optional):
Minimum value. Defaults to 0.0.
high (float, optional):
Maximum value. Defaults to 1.0.
dtype (dtypes.dtype, optional):
Data type of output tensor. Defaults to dtypes.float32.
Returns:
Tensor: tensor with elements sampled from a uniform distribution.
"""
ctx = get_current_context()
g = ctx.graph
pb_g = g._pb_graph
check_in_graph(g, seed_tensor)
settings = ctx._get_op_settings('random_uniform')
opid = _ir.OperatorIdentifier("ai.onnx", "RandomUniform", 1,
_ir.NumInputs(1, 1), 1)
op = pb_g.createConnectedOp_RandomUniformOp(
{0: seed_tensor.id},
{0: g._create_tensor_id("random_uniform_out")},
shape_=shape,
low_=low,
high_=high,
dataType_=convert_optional_dtype(dtype),
opid=opid,
settings=settings,
)
return Tensor._from_pb_tensor(op.outTensor(0)) | 1,682 |
def pipelines_as_gdf():
"""
Return pipelines as geodataframes
"""
from shapely import wkt
def wkt_loads(x):
try:
return wkt.loads(x)
except Exception:
return None
df_fossil_pipelines = load_fossil_pipelines().query("route==route")
# Manual transform to line string:
# Input 43.5995, 16.3946: 43.6098, 16.5395:
# Output: LINESTRING (30 10, 10 30, 40 40)
df_fossil_pipelines['route'] = 'LINESTRING (' + df_fossil_pipelines['route'].str.replace(',', '').str.replace(':', ',') + ')'
df_fossil_pipelines['route'] = df_fossil_pipelines['route'].apply(wkt_loads)
return gpd.GeoDataFrame(df_fossil_pipelines, geometry=df_fossil_pipelines['route']) | 1,683 |
def historico(
historia="",sintomas="",medicamentos=""
):
"""Histótia: Adicionar os relatos de doenças anteriores do paciente,\n incluindo sintomas antigos e histórico de doenças familiares
\n Sintomas: Descrever os atuais sintomas do paciente
\n Medicamentos: Remédios e tratamentos usados durante o tratamento geral do paciente."""
historia = str(
input(
"Digite o histórico de vida do paciente: "
)
)
sintomas = str(
input( "Digite os sintomas do paciente: " )
)
medicamentos = str(
input("Digite o medicamento a ser usado e a dosagem: " )
)
return historia, sintomas, medicamentos | 1,684 |
def com_ec2_sync_cmdb():
"""数据同步"""
com_ec2_list = get_ec2_list()
with DBContext('w') as session:
session.query(ComEc2).delete(synchronize_session=False) # 清空数据库的所有记录
for com_ec2 in com_ec2_list:
instance_id = com_ec2.get("InstanceId", "")
ami_id = com_ec2.get("ImageId", "")
instance_type = com_ec2.get("InstanceType", "")
key_name = com_ec2.get("KeyName", "")
launch_time = com_ec2.get("LaunchTime", "")
placement = str(com_ec2.get("Placement", ""))
private_dns_name = com_ec2.get("PrivateDnsName", "")
private_ip_address = com_ec2.get("PrivateIpAddress", "")
public_dns_name = com_ec2.get("PublicDnsName", "")
public_ip_address = com_ec2.get("PublicIpAddress", "")
new_com_ec2 = ComEc2(
instance_id=instance_id, ami_id=ami_id, instance_type=instance_type, key_name=key_name,
launch_time=launch_time, placement=placement, private_dns_name=private_dns_name,
private_ip_address=private_ip_address, public_dns_name=public_dns_name, public_ip_address=public_ip_address
)
session.add(new_com_ec2)
session.commit() | 1,685 |
def plot_beam_ts(obs, title=None, pix_flag_list=[], reg_interest=None,
plot_show=False, plot_save=False, write_header=None,
orientation=ORIENTATION):
"""
plot time series for the pipeline reduction
:param obs: Obs or ObsArray or list or tuple or dict, can be the object
containing the data to plot, or list/tuple of objects, or dict in the
form of {key: obs} or {key: (obs, kwargs)} or {key: (obs, obs_yerr)} or
{key: (obs, obs_yerr, kwargs)} or {key: [obs, kwargs]}, in which case
the dict key will be the label in legend, obs and obs_yerr is Obs or
ObsArray objects, and kwargs is passed to FigArray.scatter() if the dict
iterm is tuple or FigArray.plot() if it's list, the items in the
tuple/list determined based on type, and if obs_yerr is present,
FigArray.errorbar() will also be called with kwargs
:type obs: Union[Obs, ObsArray, list, tuple, dict]
:param str title: str, title of the figure, will use the first available
obs_id if left None
:param list pix_flag_list: list, [[spat, spec], ...] or [[row, col], ...] of
the flagged pixels, shown in grey shade
:param dict reg_interest: dict, indicating the region of array to plot,
passed to ArrayMap.take_where(); will plot all the input pixels if
left None
:param bool plot_show: bool, flag whether to show the figure with plt.show()
:param bool plot_save: bool, flag whether to save the figure
:param str write_header: str, path to the file header to write the figure to,
the figure will be saved as {write_header}.png, only matters if
plot_save=True; will use the first available obs_id if left None
:param str orientation: str, the orientation of the figure, passed to
FigArray.init_with_array_map
:return: FigArray, object of the figure
:rtype: FigArray
"""
if isinstance(obs, (Obs, ObsArray, np.ndarray)):
obs0 = obs
elif isinstance(obs, dict):
obs0 = list(obs.values())[0]
if isinstance(obs0, (list, tuple)):
obs0 = obs0[0]
else:
obs0 = obs[0]
array_map = ObsArray(obs0).array_map_
if title is None:
title = obs0.obs_id_
if write_header is None:
write_header = obs0.obs_id_
if isinstance(obs0, (Obs, ObsArray)) and (not obs0.ts_.empty_flag_):
obs_t_len = obs0.t_end_time_ - obs0.t_start_time_
x_size = max((obs_t_len / units.hour).to(1).value / 2,
FigArray.x_size_)
else:
x_size = FigArray.x_size_
fig = FigArray.init_by_array_map(array_map if reg_interest is None else
array_map.take_where(**reg_interest),
orientation=orientation, x_size=x_size)
if isinstance(obs, (Obs, ObsArray, np.ndarray)):
fig.scatter(obs)
elif isinstance(obs, dict):
for key in obs:
if isinstance(obs[key], (list, tuple)):
plot_func = fig.scatter if isinstance(obs[key], tuple) else \
fig.plot
if len(obs[key]) > 1:
if isinstance(obs[key][1], (Obs, ObsArray)):
kwargs = obs[key][2] if len(obs[key]) > 2 else {}
plot_func(obs[key][0], **kwargs)
fig.errorbar(obs[key][0], yerr=obs[key][1], label=key,
**kwargs)
else:
plot_func(obs[key][0], label=key, **obs[key][1])
else:
plot_func(obs[key][0], label=key)
else:
fig.scatter(obs[key], label=key)
fig.legend(loc="upper left")
if fig.twin_axs_list_ is not None:
fig.legend(twin_axes=True, loc="lower right")
else:
for obs_i in obs:
fig.scatter(obs_i)
fig.imshow_flag(pix_flag_list=pix_flag_list)
fig.set_labels(obs0, orientation=orientation)
fig.set_title(title)
if plot_save:
fig.savefig("%s.png" % write_header)
if plot_show:
plt.show()
return fig | 1,686 |
def getcallargs(func, *positional, **named):
"""Get the mapping of arguments to values.
A dict is returned, with keys the function argument names (including the
names of the * and ** arguments, if any), and values the respective bound
values from 'positional' and 'named'."""
args, varargs, varkw, defaults = getargspec(func)
f_name = func.__name__
arg2value = {}
# The following closures are basically because of tuple parameter unpacking.
assigned_tuple_params = []
def assign(arg, value):
if isinstance(arg, str):
arg2value[arg] = value
else:
assigned_tuple_params.append(arg)
value = iter(value)
for i, subarg in enumerate(arg):
try:
subvalue = next(value)
except StopIteration:
raise ValueError('need more than %d %s to unpack' %
(i, 'values' if i > 1 else 'value'))
assign(subarg,subvalue)
try:
next(value)
except StopIteration:
pass
else:
raise ValueError('too many values to unpack')
def is_assigned(arg):
if isinstance(arg,str):
return arg in arg2value
return arg in assigned_tuple_params
if ismethod(func) and func.im_self is not None:
# implicit 'self' (or 'cls' for classmethods) argument
positional = (func.im_self,) + positional
num_pos = len(positional)
num_total = num_pos + len(named)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
for arg, value in zip(args, positional):
assign(arg, value)
if varargs:
if num_pos > num_args:
assign(varargs, positional[-(num_pos-num_args):])
else:
assign(varargs, ())
elif 0 < num_args < num_pos:
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at most' if defaults else 'exactly', num_args,
'arguments' if num_args > 1 else 'argument', num_total))
elif num_args == 0 and num_total:
if varkw:
if num_pos:
# XXX: We should use num_pos, but Python also uses num_total:
raise TypeError('%s() takes exactly 0 arguments '
'(%d given)' % (f_name, num_total))
else:
raise TypeError('%s() takes no arguments (%d given)' %
(f_name, num_total))
for arg in args:
if isinstance(arg, str) and arg in named:
if is_assigned(arg):
raise TypeError("%s() got multiple values for keyword "
"argument '%s'" % (f_name, arg))
else:
assign(arg, named.pop(arg))
if defaults: # fill in any missing values with the defaults
for arg, value in zip(args[-num_defaults:], defaults):
if not is_assigned(arg):
assign(arg, value)
if varkw:
assign(varkw, named)
elif named:
unexpected = next(iter(named))
try:
unicode
except NameError:
pass
else:
if isinstance(unexpected, unicode):
unexpected = unexpected.encode(sys.getdefaultencoding(), 'replace')
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(f_name, unexpected))
unassigned = num_args - len([arg for arg in args if is_assigned(arg)])
if unassigned:
num_required = num_args - num_defaults
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at least' if defaults else 'exactly', num_required,
'arguments' if num_required > 1 else 'argument', num_total))
return arg2value | 1,687 |
def get_qos():
"""Gets Qos policy stats, CLI view"""
return render_template('qos.html', interfaces=QueryDbFor.query_interfaces(device),
interface_qos=QueryDbFor.query_qos(device)) | 1,688 |
def test_add_columns():
"""Test the behavior of AraDefinition.add_columns"""
empty = empty_defn()
empty.config_uid = uuid4()
# Check the mismatched name error
with pytest.raises(ValueError) as excinfo:
empty.add_columns(
variable=TerminalMaterialInfo(name="foo", headers=["bar"], field="name"),
columns=[IdentityColumn(data_source="bar")]
)
assert "data_source must be" in str(excinfo.value)
# Check desired behavior
with_name_col = empty.add_columns(
variable=TerminalMaterialInfo(name="name", headers=["bar"], field="name"),
columns=[IdentityColumn(data_source="name")]
)
assert with_name_col.variables == [TerminalMaterialInfo(name="name", headers=["bar"], field="name")]
assert with_name_col.columns == [IdentityColumn(data_source="name")]
assert with_name_col.config_uid == empty.config_uid
# Check duplicate variable name error
with pytest.raises(ValueError) as excinfo:
with_name_col.add_columns(
variable=TerminalMaterialInfo(name="name", headers=["bar"], field="name"),
columns=[IdentityColumn(data_source="name")]
)
assert "already used" in str(excinfo.value) | 1,689 |
def create_multiaction(action_name: str, subactions: List[str], description: str = '') -> Callable[[Context, Any], Any]:
"""Creates and registers an action that only executes the subactions in order.
Dependencies and allowation rules are inferred from subactions.
Subactions must be defined first, because the function uses registered definitions!
Argumens
--------
action_name
Name of the new action that acts as a key
subactions
The subactions in the execution order.
The subactions must be registered before the multiaction.
description
Human readable action description.
Returns
-------
function
The combination of subaction functions.
"""
registerations = [registered_actions[sa] for sa in subactions]
affects_database = any([r.affects_database for r in registerations])
baseactions = {
baseaction for r in registerations for baseaction in r.baseactions}
dependencies = {
dep for r in registerations for dep in r.dependencies} - baseactions
def func(*args, **kwargs):
returns = [r.function(*args, **kwargs) for r in registerations]
return returns
func.__doc__ = description
ActionRegisteration(func, action_name, affects_database,
dependencies, baseactions)
return func | 1,690 |
def phase_amp_seq_to_complex():
"""
This constructs the function to convert from phase/magnitude format data,
assuming that data type is simple with two bands, to complex64 data.
Returns
-------
callable
"""
def converter(data):
if not isinstance(data, numpy.ndarray):
raise TypeError(
_requires_array_text.format(type(data)))
if len(data.shape) != 3 and data.shape[2] != 2:
raise ValueError(_requires_3darray_text.format(data.shape))
if data.dtype.name not in ['uint8', 'uint16', 'uint32', 'uint64']:
raise ValueError(
'Requires a numpy.ndarray of unsigned integer type.')
bit_depth = data.dtype.itemsize*8
out = numpy.zeros(data.shape[:2] + (1, ), dtype=numpy.complex64)
mag = data[:, :, 0]
theta = data[:, :, 1]*(2*numpy.pi/(1 << bit_depth))
out[:, :, 0].real = mag*numpy.cos(theta)
out[:, :, 0].imag = mag*numpy.sin(theta)
return out
return converter | 1,691 |
def Regress_model(x_train,y_train,x_test=None,y_test=None,degree=2,test_size=0.1):
"""[summary]
DESCRIPTION :-
Regressin Model selection.
This Model will compare all the different Regression models, and will return model with highest Rsq value.
It also shows performance graph comaring the models.
PARAMETERS :-
x_train,x_test,y_train,y_test = are the data after tain test split
test_size = 10 % of original data is used for testing
degree = degree of polinomial regresoin (default = 2)
Returns:
Model with heighest Rsq.
Along with model compaing plot.
"""
print('Regression Model Selection...')
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
if x_test is None or y_test is None:
x_train,x_test,y_train,y_test = train_test_split(x_train,y_train,random_state=0,test_size=test_size)
print('\nLinear Regression ...')
lr=LinearRegression()
lr.fit(x_train,y_train)
y_pred_lir = lr.predict(x_test)
lr_pred=r2_score(y_test, y_pred_lir)
print('Rsq :',lr_pred )
print('\nPolinomial Regression ...')
polr=PolynomialFeatures(degree)
x_polr=polr.fit_transform(x_train)
polr.fit(x_polr,y_train)
lr.fit(x_polr,y_train)
y_pred_poly=lr.predict(polr.fit_transform(x_test))
poly_pred=r2_score(y_pred_poly,y_test)
print('Rsq :',poly_pred )
print('\nSVM Model ...')
regressor = SVR(kernel = 'rbf')
regressor.fit(x_train, y_train)
y_pred=regressor.predict(x_test)
svr_pred=r2_score(y_test,y_pred)
print('Rsq :',svr_pred)
print('\nDesision Tree ...')
d_tree=DecisionTreeRegressor(random_state=1)
d_tree.fit(x_train,y_train)
y_pred=d_tree.predict(x_test)
d_tree_acc=r2_score(y_test,y_pred)
print('Rsq : ',d_tree_acc)
print('\nRandom Forest ...')
rand = RandomForestRegressor(n_estimators = 100, random_state = 1)
rand.fit(x_train,y_train)
y_pred=rand.predict(x_test)
ran_for_acc=r2_score(y_test,y_pred)
print('Rsq :',ran_for_acc)
l=[lr_pred,poly_pred,svr_pred,d_tree_acc,ran_for_acc]
x_label=['Lin_Reg','Poly_Reg','Svm','Des_Tr','Rand_For']
ma=l.index(max(l))
if ma==0:
model=lr
elif(ma==1):
model=polr
elif(ma==2):
model=regressor
elif(ma==3):
model=d_tree
else:
model=rand
xx=np.arange(0,5)
plt.plot(xx,l)
plt.ylabel('Rsq')
plt.title('Regression Models')
plt. xticks(xx,x_label)
plt.show()
return model | 1,692 |
def test_store_and_retrieve(sirang_instance):
"""Test store and retrieve functions."""
# Declare database/collection names
db = 'db'
collection = 'store_and_retrieve'
# Store tests
assert sirang_instance.store(db, collection, {'_id': 0}) == '0'
assert sirang_instance.store(db, collection, {'test': 'x', 'unique': 2})
assert sirang_instance.store(db, collection, {'test': 'x', 'unique': 1})
# Retrieve tests
retrieved_docs = sirang_instance.retrieve(
db, collection, filter_criteria={'test': 'x'})
assert isinstance(retrieved_docs, list)
assert len(retrieved_docs) == 2
assert isinstance(retrieved_docs[0], dict)
only_one = sirang_instance.retrieve(
db, collection, filter_criteria={'test': 'x', 'unique': 1})
assert isinstance(only_one, list)
assert len(only_one) == 1
assert only_one[0]['unique'] == 1 | 1,693 |
async def get_group_list_all():
"""
获取所有群, 无论授权与否, 返回为原始类型(列表)
"""
bot = nonebot.get_bot()
self_ids = bot._wsr_api_clients.keys()
for sid in self_ids:
group_list = await bot.get_group_list(self_id=sid)
return group_list | 1,694 |
def experiences_task():
"""Re-schedule self before executing `tasks.update_experiences`."""
schedule.enter(UPDATE_FREQ_EXPERIENCES, 1, experiences_task)
tasks.update_experiences() | 1,695 |
def merge(array, left, right):
"""
Perform Merge Operation between arrays.
Time Complexity: Theta(nLogn)
Auxiliary Space: O(n)
:param array: Iterable of elements
:param left: left limit for merge sort
:param right: right limit for merge sort
:return: no returns, merges arrays.
"""
mid = (left + right) // 2
l = array[left:mid + 1]
r = array[mid + 1:right + 1]
k = left
while l and r:
if l[0] < r[0]:
array[k] = l.pop(0)
else:
array[k] = r.pop(0)
k += 1
while l:
array[k] = l.pop(0)
k += 1
while r:
array[k] = r.pop(0)
k += 1 | 1,696 |
def emcee_schools_model(data, draws, chains):
"""Schools model in emcee."""
import emcee
chains = 10 * chains # emcee is sad with too few walkers
y = data["y"]
sigma = data["sigma"]
J = data["J"] # pylint: disable=invalid-name
ndim = J + 2
pos = np.random.normal(size=(chains, ndim))
pos[:, 1] = np.absolute(pos[:, 1]) # pylint: disable=unsupported-assignment-operation
if emcee_version() < 3:
sampler = emcee.EnsembleSampler(chains, ndim, _emcee_lnprob, args=(y, sigma))
# pylint: enable=unexpected-keyword-arg
sampler.run_mcmc(pos, draws)
else:
here = os.path.dirname(os.path.abspath(__file__))
data_directory = os.path.join(here, "saved_models")
filepath = os.path.join(data_directory, "reader_testfile.h5")
backend = emcee.backends.HDFBackend(filepath) # pylint: disable=no-member
backend.reset(chains, ndim)
# pylint: disable=unexpected-keyword-arg
sampler = emcee.EnsembleSampler(
chains, ndim, _emcee_lnprob, args=(y, sigma), backend=backend
)
# pylint: enable=unexpected-keyword-arg
sampler.run_mcmc(pos, draws, store=True)
return sampler | 1,697 |
def _agefromarr(arr, agelist):
"""Measures the mean age map of a timeslice array.
:param arr: A timeslice instance's data array.
:param agelist: List of age sampling points of array.
:return:
:agemap: Light- or mass-weighted (depending on weight_type in the timecube()) mean metallicity of the slice_obj at each spaxel, in years.
"""
arr = np.sum(arr, axis=3) # Remove metallicities
arrshape = np.shape(arr)
arw = np.expand_dims(np.log10(agelist), 0)
arw = np.expand_dims(arw, 0)
arw, np.pad(arw, ((0,arrshape[0]-1),(0,arrshape[1]-1),(0,0)), 'maximum')
return 10**(np.sum(arw*arr, axis=2)/np.sum(arr, axis=2)) | 1,698 |
def undo_download_dir_patch():
"""
Provide a way for certain tests to not have tmp download dir.
"""
oridir = os.environ["SUNPY_DOWNLOADDIR"]
del os.environ["SUNPY_DOWNLOADDIR"]
yield
os.environ["SUNPY_DOWNLOADDIR"] = oridir | 1,699 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.