content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def get_email(package):
"""
Return package email as listed in `__email__` in `init.py`.
"""
init_py = codecs.open(os.path.abspath(os.path.join(package, '__init__.py')), encoding='utf-8').read()
return re.search("^__email__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(1) | 5,358,600 |
def update_conf_file():
"""Update the logging configuration file with the paths
defined in the CONFIG file
"""
sett = AlgoSettings()
saving_path = pathlib.Path(sett.log_saving_path())
config_file = pathlib.Path(sett.log_configuration())
with open(config_file) as my_file:
doc = yaml.load(my_file)
doc['handlers']['info_file_handler']['filename'] = \
str(saving_path / 'bsk_info.log')
doc['handlers']['error_file_handler']['filename'] = \
str(saving_path / 'bsk_error.log')
with open(config_file, 'w') as my_file:
yaml.dump(doc, my_file) | 5,358,601 |
def get_class_name(obj, instance=True):
"""
Given a class or instance of a class, returns a string representing the
fully specified path of the class.
Parameters
----------
obj : object
An instance of any object
instance: bool
Indicates whether given object is an instance of the class to be named
"""
typ = type(obj) if instance else obj
return "{}.{}".format(typ.__module__, typ.__name__) | 5,358,602 |
def contour(*data, **kwargs):
""" Contour line plots of scalar data in a roughly Matlab-compatible way.
Data are assumed to be a scalar image. Any additional *kwargs* passed in
are broadcast to all plots.
Example::
xs = linspace(0,10,100)
ys = linspace(0,20,200)
x,y=meshgrid(xs,ys)
z = sin(x)*y
contour(z)
To use previous data, specify names instead of actual data arrays.
"""
cont = _do_plot_boilerplate(kwargs)
plots = plot_maker.do_contour(session.data, session.colormap, cont,
"line", *data, **kwargs)
cont.request_redraw()
return | 5,358,603 |
def pipeline(opts):
"""Construct the pipeline"""
outdir = path.join(opts.outdir, opts.cancer)
pTCGADownload.input = [opts.snpmani]
pTCGADownload.args.nthread = opts.nthread
pTCGADownload.args.token = opts.token
pTCGADownload.config.export_dir = outdir
pTCGADownload.cache = 'export'
pSample2SubmitterID.depends = pTCGADownload
pSample2SubmitterID.input = lambda ch: ch.cbind(opts.snpmeta)
pSample2SubmitterID.args.len = 16
# remove normal-like
pShell.depends = pSample2SubmitterID
pShell.args.cmd = '''
mkdir {{o.outfile}}
for gtfile in {{i.infile}}/*.txt; do
if [[ $(basename $gtfile | cut -c14) == "0" ]]; then
ln -s $gtfile {{o.outfile}}/
fi
done
'''
pGtFiles2Mat.depends = pShell
pGtFiles2Mat.input = lambda ch: [ch.expand(pattern='*.txt').flatten()]
pGtFiles2Mat.config.echo_jobs = [0]
pGtFiles2Mat.config.echo_types = 'all'
pGtFiles2Mat.config.export_dir = outdir
pGtFiles2Mat.output = 'outfile:file:TCGA-%s.gt.txt' % opts.cancer
pDownloadGet.input = [
'https://gdc.xenahubs.net/download/'
'TCGA-%s.htseq_counts.tsv.gz' % opts.cancer,
'https://gdc.xenahubs.net/download/'
'TCGA-%s.GDC_phenotype.tsv.gz' % opts.cancer,
'https://gdc.xenahubs.net/download/'
'TCGA-%s.survival.tsv.gz' % opts.cancer
]
pDownloadGet.config.export_dir = outdir
pDownloadGet.config.export_part = ['*phenotype.tsv.gz',
'*.survival.tsv.gz']
# remove normal-like
pTsvColSelect.depends = pDownloadGet
pTsvColSelect.input = lambda ch: ch.row_at(0)
pTsvColSelect.args.cols = ('lambda cnames: [name for name in cnames '
'if name[-3:-2] == "0" or name[:5] != "TCGA-"]')
# convert ENSG to gene symbols
pGeneNameNorm.depends = pTsvColSelect
pGeneNameNorm.output = 'outfile:file:TCGA-%s.expr.txt' % opts.cancer
pGeneNameNorm.args.inopts.cnames = True
pGeneNameNorm.args.frm = 'ensembl.gene'
pGeneNameNorm.args.notfound = 'skip'
pGeneNameNorm.config.export_dir = outdir
return pTCGADownload, pDownloadGet | 5,358,604 |
def create_tarfile(files, project_name):
"""Create a tar file based on the list of files passed"""
fd, filename = tempfile.mkstemp(prefix="polyaxon_{}".format(project_name), suffix='.tar.gz')
with tarfile.open(filename, "w:gz") as tar:
for f in files:
tar.add(f)
yield filename
# clear
os.close(fd)
os.remove(filename) | 5,358,605 |
def mol_to_smiles(molecule, isomeric=True, explicit_hydrogen=True, mapped=True):
"""
Generate canonical SMILES with RDKit
Parameters
----------
molecule: RDKit Chem.rdchem.Mol instance
The molecule to generate SMILES for
isomeric: bool
If True, SMILES will have isomeric information. If molecule already has isomeric information, this will be retained.
If no isomeric information exists, this function will perceive it and assign the CW (clockwise) flag for chiral
centers and the E-isomer for stereo bonds.
explicit_hydrogen: bool
If True, SMILES will have explicit hydrogens
mapped: bool
If True, SMILES will have map indices. (+1 because the map is 1 indexed)
Returns
-------
smiles: str
The canonical SMILES
"""
if mapped and not explicit_hydrogen:
raise Warning("Tagged SMILES must include hydrogens to retain order")
if mapped and not isomeric:
raise Warning("Tagged SMILES must include stereochemistry ")
if explicit_hydrogen:
# Add explicit hydrogens
molecule = Chem.AddHs(molecule)
if not explicit_hydrogen:
molecule = Chem.RemoveHs(molecule)
try:
json_geometry = int(molecule.GetProp("_json_geometry"))
except KeyError:
json_geometry = False
if isomeric and not has_stereo_defined(molecule):
raise ValueError("Some stereochemistry is not defined")
# Get canonical order for map
if mapped:
if json_geometry:
# keep original ordering
#ToDo this looks like a potential bug that only json_geometry gets atom maps
for i in range(molecule.GetNumAtoms()):
molecule.GetAtomWithIdx(i).SetAtomMapNum(i+1)
else:
molecule = canonical_order_atoms(molecule)
smiles = Chem.MolToSmiles(molecule, allHsExplicit=explicit_hydrogen, isomericSmiles=isomeric, canonical=True)
return smiles | 5,358,606 |
def _plot_events_nday(ax, grid, events, scale_factor=1.0):
"""
Plot a map of the total number of days spent in dry spell events.
Parameters
----------
ax : <matplotlib.axes.Axes> instance.
The axes to which the map will be drawn.
grid : <geo_grid.LandGrid> instance
Object describing the spatial grid.
events : list of lists of <event.Event> instances
Catalogue of dry spell events from file_eves.
scale_factor : float, optional
Totals are multipled by this number before plotting. Typically
used to convert from total to per year.
Returns
-------
PCM : mappable
E.,g, <matplotlib.collections.QuadMesh>.
"""
nday = []
for eves in events[0]:
nday.append(sum(e.duration for e in eves))
nday = np.ma.masked_less(nday, 1)
nday = grid.expand(nday) * scale_factor
levs = np.linspace(0, 360, 13)
cmap = _get_cmap("cividis", levs, over="orange")
PCM = _plot_map(ax, grid, nday, levs, cmap, ticks=levs[::3])
ax.set_title("Number of days per year spent in dry spells")
ax.add_feature(LAND, facecolor="lightgrey")
return PCM | 5,358,607 |
def rank_transform(arr: np.ndarray, centered=True) -> np.ndarray:
"""
Transform a 1-dim ndarray with arbitrary scalar values to an array with equally spaced rank values.
This is a nonlinear transform.
:param arr: input array
:param centered: if the transform should by centered around zero
:return: transformed array
"""
assert isinstance(arr, np.ndarray)
assert arr.ndim == 1
# Create array to sort in
ranks = np.zeros_like(arr)
# Ascending sort
idcs_sort = np.argsort(arr)
# Rearrange to an equal-step array from -0.5 (or 0) to 0.5 (or 1)
if centered:
ranks[idcs_sort] = np.linspace(-.5, .5, idcs_sort.size, endpoint=True)
else:
ranks[idcs_sort] = np.linspace(0., 1., idcs_sort.size, endpoint=True)
return ranks | 5,358,608 |
def edit_post(id, alias):
"""Edit an existing post.
User has to be logged in and be either:
- Author of the post
- Editor (role)
- Administrator (role)
"""
post = Post.query.get_or_404(id)
if current_user != post.author and not (
current_user.has_role('Administrator') or current_user.has_role('Editor')
):
abort(403)
form = PostForm()
if form.validate_on_submit():
upload = Upload.query.filter_by(filename=form.image.data).first()
category = Category.query.filter_by(id=form.category.data).first()
post.title = form.title.data
post.alias = sanitize_alias(form.alias.data)
post.timestamp = form.timestamp.data
post.description = form.description.data
post.body = form.body.data
post.image = upload
post.featured = form.featured.data
post.commenting = form.commenting.data
post.category = category
db.session.add(post)
# update tags
new_tags = sanitize_tags(form.tags.data)
old_tags = sanitize_tags(', '.join([c.title for c in post.tags.all()]))
added_tag_titles, removed_tag_titles = get_added_removed(new_tags, old_tags)
# add new tags
added_tag_aliases = [sanitize_alias(c) for c in added_tag_titles]
for c in zip(added_tag_titles, added_tag_aliases):
tag_alias = c[1]
tag = Tag.query.filter(Tag.alias == tag_alias).first()
# if tag doesn't exist in the db, add it
if not tag:
tag = Tag(title=c[0], alias=c[1])
db.session.add(tag)
# add relation between the Post and the Tag
# flush session to obtain tag.id, if the tag has been added recently
db.session.flush()
cl = Tagification(tag_id=tag.id, post_id=id)
db.session.add(cl)
# remove obsolete tags
removed_tag_aliases = [sanitize_alias(c) for c in removed_tag_titles]
for c in zip(removed_tag_titles, removed_tag_aliases):
tag_alias = c[1]
tag = Tag.query.filter(Tag.alias == tag_alias).first()
# remove relations
old_cl = Tagification.query.filter(
Tagification.tag_id == tag.id, Tagification.post_id == id
).first()
db.session.delete(old_cl)
# remove tag, if it's not used in other posts
other_cl = Tagification.query.filter(
Tagification.tag_id == tag.id, Tagification.post_id != id
).first()
if not other_cl:
db.session.delete(tag)
flash('The post has been updated.', 'success')
return redirect(
url_for(
'main.post', category=post.category.alias, id=post.id, alias=post.alias
)
)
form.title.data = post.title
form.alias.data = post.alias
form.timestamp.data = post.timestamp
form.description.data = post.description
form.body.data = post.body
if post.image:
form.image.data = post.image.filename
form.featured.data = post.featured
form.commenting.data = post.commenting
form.category.data = post.category
form.tags.data = ', '.join([c.title for c in post.tags.all()])
return render_template(
'ctrl/edit_post.html', form=form, datetimepicker=datetime.utcnow()
) | 5,358,609 |
def baseline(parent,params,label='result'):
"""
Add a `_params` result from `biasd.utils.baseline` to HDF5 group
"""
# pi,mu,var,r,baseline,R2,ll,iter
group = parent.create_croup(label)
_addhash(group)
group.attrs['description'] = 'White-noise baseline correction parameters'
group.attrs['pi'] = params.pi
group.attrs['mu'] = params.mu
group.attrs['var'] = params.var
group.create_dataset('r', data = params.r)
group.create_dataset('baseline', data = params.baseline)
group.attrs['R2'] = params.R2
group.attrs['log likelihood'] = params.ll
group.attrs['iterations'] = params.iter | 5,358,610 |
def delete_all_extensions(imagename, keep_exts=None):
"""
Parameters
----------
imagename : str
keep_exts : None, iterable
A list of extensions to keep, example: ['mask', 'psf']
"""
for filen in glob(imagename+'.*'):
if keep_exts is not None and any(filen.endswith(ext) for ext in keep_exts):
continue
try:
log_post(':: Removing {0}'.format(filen))
rmtables(filen)
shutil.rmtree(filen)
log_post('-- Hard Delete!')
except OSError:
pass | 5,358,611 |
def generate_glove(dataset="bigvul", sample=False, cache=True):
"""Generate Glove embeddings for tokenised dataset."""
savedir = svd.get_dir(svd.processed_dir() / dataset / f"glove_{sample}")
if os.path.exists(savedir / "vectors.txt") and cache:
svd.debug("Already trained GloVe.")
return
if dataset == "bigvul":
df = bigvul(sample=sample)
MAX_ITER = 2 if sample else 500
# Only train GloVe embeddings on train samples
samples = df[df.label == "train"].copy()
# Preprocessing
samples.before = svd.dfmp(
samples, svdt.tokenise_lines, "before", cs=200, desc="Get lines: "
)
lines = [i for j in samples.before.to_numpy() for i in j]
# Save corpus
savedir = svd.get_dir(svd.processed_dir() / dataset / f"glove_{sample}")
with open(savedir / "corpus.txt", "w") as f:
f.write("\n".join(lines))
# Train Glove Model
CORPUS = savedir / "corpus.txt"
svdglove.glove(CORPUS, MAX_ITER=MAX_ITER) | 5,358,612 |
def _date_list_to_num(ifg_date_list):
"""Convert list of dates, or list of date pairs, numpy array of floats
for 'days since 1970'
Handles both strings and datetimes
"""
from matplotlib import dates
arr = np.array(ifg_date_list)
if isinstance(arr.ravel()[0], str):
return dates.datestr2num(ifg_date_list)
else:
return dates.date2num(ifg_date_list) | 5,358,613 |
def lief(phenny, input):
"""
Maar ze is eigenlijk wel lief
"""
asker = input.nick
phenny.say(random.choice(NICE_CHOICES) % asker) | 5,358,614 |
def get_font():
"""
Sets up a font capable of rendering the characters
"""
path = os.path.join(os.getcwd(), 'scripts', 'assets', 'fonts', 'NotoSansCJKjp-Regular.otf')
return font_manager.FontProperties(fname=path) | 5,358,615 |
def maxindices(l):
"""
Get indices for all occurences of maximal element in list
:param l:
:return:
"""
max_indices = []
max_value = l[0] #Assume un-exhaustible iterator
for i, v in enumerate(l):
if v > max_value:
max_value = v
max_indices = [i]
elif v == max_value:
max_indices.append(i)
return max_indices | 5,358,616 |
def sta2inv(sta_file,out_file="sta.inv",ele_zero=True):
"""
Convert station file into HYPOINVERSE format
"""
sta_dict =load_sta(sta_file)
to_inv_sta_file(sta_dict,out_file,ele_zero=True) | 5,358,617 |
def get_grid_extents(data, edges=True):
"""
Get min and max lat and lon from an input GEOS-Chem xarray dataset or grid dict
Args:
data: xarray Dataset or dict
A GEOS-Chem dataset or a grid dict
edges (optional): bool
Whether grid extents should use cell edges instead of centers
Default value: True
Returns:
minlon: float
Minimum longitude of data grid
maxlon: float
Maximum longitude of data grid
minlat: float
Minimum latitude of data grid
maxlat: float
Maximum latitude of data grid
"""
if isinstance(data, dict):
if "lon_b" in data and edges:
return np.min(
data["lon_b"]), np.max(
data["lon_b"]), np.min(
data["lat_b"]), np.max(
data["lat_b"])
elif not edges:
return np.min(
data["lon"]), np.max(
data["lon"]), np.min(
data["lat"]), np.max(
data["lat"])
else:
return -180, 180, -90, 90
elif "lat" in data.dims and "lon" in data.dims:
lat = data["lat"].values
lon = data["lon"].values
if lat.size / 6 == lon.size:
# No extents for CS plots right now
return -180, 180, -90, 90
else:
lat = np.sort(lat)
minlat = np.min(lat)
if abs(abs(lat[1]) - abs(lat[0])
) != abs(abs(lat[2]) - abs(lat[1])):
#pole is cutoff
minlat = minlat - 1
maxlat = np.max(lat)
if abs(abs(lat[-1]) - abs(lat[-2])
) != abs(abs(lat[-2]) - abs(lat[-3])):
maxlat = maxlat + 1
# add longitude res to max longitude
lon = np.sort(lon)
minlon = np.min(lon)
maxlon = np.max(lon) + abs(abs(lon[-1]) - abs(lon[-2]))
return minlon, maxlon, minlat, maxlat
else:
# GCHP data using MAPL v1.0.0+ has dims time, lev, nf, Ydim, and Xdim
return -180, 180, -90, 90 | 5,358,618 |
def test_interpolation ():
"""Test bernstein interpolation
"""
logger = getLogger("test_interpolation")
from math import sin,pi, sqrt
fun = lambda x : sin(2*pi*x)
bs = ostap.math.bernstein.interpolate ( fun , [0] + [ random.uniform(0.01,0.99) for i in range(25) ] + [1] , 0 , 1 )
from ostap.stats.counters import SE
s = SE()
for i in range(10000) :
x = random.uniform ( 0 , 1 )
vf = fun(x)
vb = bs (x)
s += vf-vb
logger.info ('Interpolation quality %s' % s ) | 5,358,619 |
def millisToNanos(millis):
"""
Converts milliseconds to nanoseconds.
:param millis: (long) - The long milliseconds value to convert.
:return: (long) QueryConstants.NULL_LONG if the input is equal to QueryConstants.NULL_LONG. Throws
DBTimeUtils.DBDateTimeOverflowException if the input is too large for conversion. Otherwise returns a long of
the equivalent number of nanoseconds to the input.
"""
return _java_type_.millisToNanos(millis) | 5,358,620 |
def recursive_seed_part(
graph,
parts,
pop_target,
pop_col,
epsilon,
method=bipartition_tree,
node_repeats=1,
n=None,
ceil=None
):
"""
Returns a partition with ``num_dists`` districts balanced within ``epsilon`` of
``pop_target`` by recursively splitting graph using recursive_seed_part_inner.
:param graph: The graph
:param parts: Iterable of part labels (like ``[0,1,2]`` or ``range(4)``
:param pop_target: Target population for each part of the partition
:param pop_col: Node attribute key holding population data
:param epsilon: How far (as a percentage of ``pop_target``) from ``pop_target`` the parts
of the partition can be
:param method: Function used to find balanced partitions at the 2-district level
:param node_repeats: Parameter for :func:`~gerrychain.tree_methods.bipartition_tree` to use.
:param n: Either a positive integer (greater than 1) or None. If n is a positive integer,
this function will recursively create a seed plan by either biting off districts from graph
or dividing graph into n chunks and recursing into each of these. If n is None, this
function prime factors ``num_dists``=n_1*n_2*...*n_k (n_1 > n_2 > ... n_k) and recursively
partitions graph into n_1 chunks.
:param ceil: Either a positive integer (at least 2) or None. Relevant only if n is None. If
``ceil`` is a positive integer then finds the largest factor of ``num_dists`` less than or
equal to ``ceil``, and recursively splits graph into that number of chunks, or bites off a
district if that number is 1.
:return: New assignments for the nodes of ``graph``.
:rtype: dict
"""
flips = {}
assignment = recursive_seed_part_inner(
graph,
len(parts),
pop_target,
pop_col,
epsilon,
method=bipartition_tree,
node_repeats=node_repeats,
n=n,
ceil=ceil
)
for i in range(len(assignment)):
for node in assignment[i]:
flips[node] = parts[i]
return flips | 5,358,621 |
def codegen_reload_data():
"""Parameters to codegen used to generate the fn_carbon_black_cloud_devices package"""
reload_params = {"package": u"fn_carbon_black_cloud_devices",
"incident_fields": [],
"action_fields": [],
"function_params": [u"carbon_black_device_query_string", u"incident_id"],
"datatables": [],
"message_destinations": [u"carbon_black_cloud"],
"functions": [u"carbon_black_cloud_devices_quarantine"],
"phases": [],
"automatic_tasks": [],
"scripts": [],
"workflows": [u"example_carbon_black_cloud_devices_quarantine"],
"actions": [u"Example: Run Carbon Black Device Quarantine"],
"incident_artifact_types": []
}
return reload_params | 5,358,622 |
def plot_3DW(att, ti, tf, dt):
"""
%run: plot_3DW(att, 0, 365*5, 0.1)
:param att: attitude object
:param ti: initial time [days]
:param tf: final time [days]
:param dt: step time for calculating the data point [days]
:return: plot of the total inertia vector (unitary) of the scanner wrt LMN frame.
"""
if isinstance(att, ggs.Attitude) is False:
raise TypeError('att is not an Attitude object.')
if type(ti) not in [int, float]:
raise TypeError('ti must be non-negative real numbers.')
if type(tf) not in [int, float]:
raise TypeError('tf must be non-negative real numbers.')
if type(dt) not in [int, float]:
raise TypeError('dt must be non-negative real numbers.')
if ti < 0:
raise ValueError('ti cannot be negative.')
if tf <0:
raise ValueError('tf cannot be negative.')
if dt <0:
raise ValueError('dt cannot be negative.')
att.reset()
att.create_storage(ti, tf, dt)
w_list = [obj[1] for obj in att.storage]
w_listx = [i[0] for i in w_list]
w_listy = [i[1] for i in w_list]
w_listz = [i[2] for i in w_list]
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(w_listx, w_listy, w_listz, '--', label='W vector rotation')
ax.legend()
ax.set_xlabel('l')
ax.set_ylabel('m')
ax.set_zlabel('n')
plt.show() | 5,358,623 |
def validate_investment_amount(investment_amount, intent_request):
"""
Validates the investment_amount provided by the user.
"""
# Validate the investment_amount should be equal to or greater than 5000.
if investment_amount is not None:
investment_amount = parse_int(
investment_amount
) # Since parameters are strings it's important to cast values
if investment_amount < 5000:
return build_validation_result(
False,
"investmentAmount",
"The investmentAmount should be greater than or equal to 5000, "
"Please provide a correct investmentAmount in dollars.",
)
# A True results is returned if age or amount are valid
return build_validation_result(True, None, None) | 5,358,624 |
def test_create_feature_request_missing_field():
"""
tests creating a feature request
with one or more missing fields
:param app:
:return:
"""
# with one missing field
with pytest.raises(TypeError):
feature.FeatureRequestService.objects_new(
client_id=1,
title='One Piece',
description='Chart a course for All Blue',
product_area_id=2,
priority=2
)
# with 2 missing fields
with pytest.raises(TypeError):
feature.FeatureRequestService.objects_new(
client_id=1,
title='One Piece',
description='Chart a course for All Blue',
product_area_id=2
) | 5,358,625 |
def new_id():
"""
Generates new bson ObjectId
"""
return str(ObjectId()) | 5,358,626 |
def gc_sweep(
session=None,
draw_function=gc2d,
dirpath=CONFIG["workspace"],
overwrite=False,
run=True,
base_fsp_path=str(CONFIG["grating_coupler_2D_base"]),
**kwargs
):
""" grating coupler sweep
grating_coupler_2D_base optimizes Transmission and does not calculate Sparameters
"""
import lumapi
function_name = draw_function.__name__ + "_sweep"
filename = kwargs.pop("name", get_function_name(function_name, **kwargs))
dirpath = pathlib.Path(dirpath) / function_name
dirpath.mkdir(exist_ok=True)
filepath = dirpath / filename
filepath_sim_settings = filepath.with_suffix(".settings.json")
filepath_json = filepath.with_suffix(".json")
filepath_fsp = str(filepath.with_suffix(".fsp"))
if filepath_json.exists() and not overwrite and run:
return json.loads(open(filepath_json).read())
s = session or lumapi.FDTD(hide=False)
simdict = draw_function(session=s, base_fsp_path=base_fsp_path, **kwargs)
s.save(filepath_fsp)
if not run:
return
s.run()
T = s.getresult("fom", "T")
results = dict(wavelength_nm=list(T["lambda"].ravel() * 1e9), T=list(T["T"]))
with open(filepath_json, "w") as f:
json.dump(results, f)
settings = simdict.get("settings")
if settings:
with open(filepath_sim_settings, "w") as f:
json.dump(settings, f)
return results | 5,358,627 |
def stem(word, stemmer=PORTER, **kwargs):
""" Returns the base form of the word when counting words in count().
With stemmer=PORTER, the Porter2 stemming algorithm is used.
With stemmer=LEMMA, either uses Word.lemma or inflect.singularize().
(with optional parameter language="en", pattern.en.inflect is used).
"""
if hasattr(word, "string") and stemmer in (PORTER, None):
word = word.string
if isinstance(word, basestring):
word = decode_utf8(word.lower())
if stemmer is None:
return word.lower()
if stemmer == PORTER:
return _stemmer.stem(word, **kwargs)
if stemmer == LEMMA:
if hasattr(word, "lemma"): # pattern.en.Word
w = word.string.lower()
if word.lemma is not None:
return word.lemma
if word.pos == "NNS":
return singularize(w)
if word.pos.startswith(("VB", "MD")):
return conjugate(w, "infinitive") or w
if word.pos.startswith(("JJ",)):
return predicative(w)
if word.pos.startswith(("DT", "PR", "WP")):
return singularize(w, pos=word.pos)
return w
return singularize(word, pos=kwargs.get("pos", "NN"))
if hasattr(stemmer, "__call__"):
return decode_utf8(stemmer(word))
return word.lower() | 5,358,628 |
def verify_bounce_message(msg):
"""
Verify an SES/SNS bounce notification message.
"""
verifier = BounceMessageVerifier(msg)
return verifier.is_verified() | 5,358,629 |
def multiples(a, b):
"""This function checks if a number is a multiple of another."""
if type(a) != int or type(b) != int:
raise Exception('Values must be integers.')
elif a == 0:
raise Exception('0 is not valid.')
elif a == b:
raise Exception('Numbers should not be the same.')
else:
if b > a:
check = b % a
if not check:
return True
else:
return False
else:
raise Exception("Error! {0} isn't greater than {1}."
.format(b, a)) | 5,358,630 |
def plot_masks(im_h=150, im_w=235):
"""
Plots the preallocated masks of the steerable pyramid
:param im_h:
:param im_w:
:return:
"""
py = Steerable_complex_wavelet_pyramid(im_h=im_h, im_w=im_w)
py.plot_lo_masks()
py.plot_high_masks()
py.plot_angle_masks_b1() | 5,358,631 |
def test_release_version(three_experiments_same_name_with_trials):
"""Test releasing a specific experiment version"""
experiments = get_storage().fetch_experiments({})
storage = get_storage()
assert len(experiments) == 3
assert len(storage._fetch_trials({})) > 0
uid = None
for experiment in experiments:
if experiment["version"] == 2:
uid = experiment["_id"]
assert storage.get_algorithm_lock_info(uid=experiment["_id"]).locked == 0
assert uid is not None
with storage.acquire_algorithm_lock(uid=uid):
assert storage.get_algorithm_lock_info(uid=uid).locked == 1
for experiment in experiments:
if experiment["version"] == 2:
assert (
storage.get_algorithm_lock_info(uid=experiment["_id"]).locked == 1
)
else:
assert (
storage.get_algorithm_lock_info(uid=experiment["_id"]).locked == 0
)
execute("db release -f test_single_exp --version 2")
for experiment in experiments:
assert storage.get_algorithm_lock_info(uid=experiment["_id"]).locked == 0 | 5,358,632 |
def train_gilbo(gan, sess, outdir, checkpoint_path, dataset, options):
"""Build and train GILBO model.
Args:
gan: GAN object.
sess: tf.Session.
outdir: Output directory. A pickle file will be written there.
checkpoint_path: Path where gan"s checkpoints are written. Only used to
ensure that GILBO files are written to a unique
subdirectory of outdir.
dataset: Name of dataset used to train the GAN.
options: Options dictionary.
Returns:
mean_eval_info: Mean GILBO computed over a large number of images generated
by the trained GAN
mean_train_consistency: Mean consistency of the trained GILBO model with
data from the training set.
mean_eval_consistency: Same consistency measure for the trained model with
data from the validation set.
mean_self_consistency: Same consistency measure for the trained model with
data generated by the trained model itself.
See the GILBO paper for an explanation of these metrics.
Raises:
ValueError: If the GAN has uninitialized variables.
"""
uninitialized = sess.run(tf.report_uninitialized_variables())
if uninitialized:
raise ValueError("Model has uninitialized variables!\n%r" % uninitialized)
outdir = os.path.join(outdir, checkpoint_path.replace("/", "_"))
tf.gfile.MakeDirs(outdir)
with tf.variable_scope("gilbo"):
ones = tf.ones((gan.batch_size, gan.z_dim))
# Get a distribution for the prior.
z_dist = ds.Independent(ds.Uniform(-ones, ones), 1)
z_sample = z_dist.sample()
epsneg = np.finfo("float32").epsneg
# Clip samples from the GAN uniform prior because the Beta distribution
# doesn"t include the top endpoint and has issues with the bottom endpoint.
ganz_clip = tf.clip_by_value(gan.z, -(1 - epsneg), 1 - epsneg)
# Get generated images from the model.
fake_images = gan.fake_images
# Build the regressor distribution that encodes images back to predicted
# samples from the prior.
with tf.variable_scope("regressor"):
z_pred_dist = _build_regressor(fake_images, gan.z_dim)
# Capture the parameters of the distributions for later analysis.
dist_p1 = z_pred_dist.distribution.distribution.concentration0
dist_p2 = z_pred_dist.distribution.distribution.concentration1
# info and avg_info compute the GILBO.
info = z_pred_dist.log_prob(ganz_clip) - z_dist.log_prob(ganz_clip)
avg_info = tf.reduce_mean(info)
# Set up training of the GILBO model.
lr = options.get("gilbo_learning_rate", 4e-4)
learning_rate = tf.get_variable(
"learning_rate", initializer=lr, trainable=False)
gilbo_step = tf.get_variable("gilbo_step", dtype=tf.int32, initializer=0,
trainable=False)
opt = tf.train.AdamOptimizer(learning_rate)
regressor_vars = tf.contrib.framework.get_variables("gilbo/regressor")
train_op = opt.minimize(-info, var_list=regressor_vars)
# Initialize the variables we just created.
uninitialized = plist(tf.report_uninitialized_variables().eval())
uninitialized_vars = uninitialized.apply(
tf.contrib.framework.get_variables_by_name)._[0]
tf.variables_initializer(uninitialized_vars).run()
saver = tf.train.Saver(uninitialized_vars, max_to_keep=1)
try:
checkpoint_path = tf.train.latest_checkpoint(outdir)
saver.restore(sess, checkpoint_path)
except ValueError:
# Failing to restore just indicates that we don"t have a valid checkpoint,
# so we will just start training a fresh GILBO model.
pass
_train_gilbo(sess, gan, saver, learning_rate, gilbo_step, z_sample, avg_info,
z_pred_dist, train_op, outdir, options)
mean_eval_info = _eval_gilbo(sess, gan, z_sample, avg_info,
dist_p1, dist_p2, fake_images, outdir, options)
# Collect encoded distributions on the training and eval set in order to do
# kl-nearest-neighbors on generated samples and measure consistency.
dataset = datasets.get_dataset(dataset)
x_train = dataset.load_dataset(split_name="train", num_threads=1)
x_train = x_train.batch(gan.batch_size, drop_remainder=True)
x_train = x_train.make_one_shot_iterator().get_next()[0]
x_train = tf.reshape(x_train, fake_images.shape)
x_eval = dataset.load_dataset(split_name="test", num_threads=1)
x_eval = x_eval.batch(gan.batch_size, drop_remainder=True)
x_eval = x_eval.make_one_shot_iterator().get_next()[0]
x_eval = tf.reshape(x_eval, fake_images.shape)
mean_train_consistency = _run_gilbo_consistency(
x_train, "train", extract_input_images=0,
save_consistency_images=20, num_batches=5, **locals())
mean_eval_consistency = _run_gilbo_consistency(
x_eval, "eval", extract_input_images=0,
save_consistency_images=20, num_batches=5, **locals())
mean_self_consistency = _run_gilbo_consistency(
fake_images, "self", extract_input_images=20,
save_consistency_images=20, num_batches=5, **locals())
return (mean_eval_info, mean_train_consistency, mean_eval_consistency,
mean_self_consistency) | 5,358,633 |
def unquote_keys(data):
"""Restores initial view of 'quoted' keys in dictionary data
:param data: is a dictionary
:return: data with restored keys if they were 'quoted'.
"""
if isinstance(data, dict):
for key, value in data.items():
if isinstance(value, dict):
unquote_keys(value)
if key.startswith('%24'):
k = parse.unquote(key)
data[k] = data.pop(key)
return data | 5,358,634 |
def retr_amplslen(peri, radistar, masscomp, massstar):
"""
Calculate the self-lensing amplitude.
Arguments
peri: orbital period [days]
radistar: radius of the star [Solar radius]
masscomp: mass of the companion [Solar mass]
massstar: mass of the star [Solar mass]
Returns
amplslen: the fractional amplitude of the self-lensing
"""
amplslen = 7.15e-5 * radistar**(-2.) * peri**(2. / 3.) * masscomp * (masscomp + massstar)**(1. / 3.) * 1e3 # [ppt]
return amplslen | 5,358,635 |
def main_work(indir, outdir, aux=None, hv=None):
"""
:param indir:
:param outdir:
:param aux:
:param hv:
:return:
"""
if not os.path.exists(outdir):
os.makedirs(outdir)
if hv is None:
hv_ = all_hv
else:
hv_ = [hv]
if aux is None:
aux_ = all_aux
else:
aux_ = [aux]
for tile in hv_:
get_extent = GetExtents(int(tile[0]), int(tile[1]))
out_file = None
tarlist = []
for prod in aux_:
print(f"\nWorking on TILE: {tile}\n\t\tAUX: {prod}")
out_file = make_filename(tile, prod, outdir)
src_file = f"{indir}{os.sep}{prod}.tif"
if not os.path.exists(out_file):
run_subset(src_file, out_file, get_extent.TILE_EXTENT)
tarlist.append(out_file)
archive = outdir + os.sep + os.path.basename(out_file)[:35] + ".tar"
with tarfile.open(archive, "w") as tar:
for f in tarlist:
tar.add(f, os.path.basename(f))
os.remove(f)
return None | 5,358,636 |
def calculate_current_teach_week(semester_first_week_date='2021-3-08 08:00:00'):
"""
计算当前日期所属教学周,实现思路是:当前日期所属一年中的周 - 每学期的第一周
----
param: semester_first_week_date: 学期第一周的日期,例如 '2021-3-08 08:00:00'
return: 当前教学周
"""
# 获取指定日期属于当年的第几周, 返回字符串
semester_first_week = datetime.strptime(semester_first_week_date, '%Y-%m-%d %H:%M:%S').strftime('%W')
# 获取当前日期是一年中的第几周, 返回字符串
current_year_week = datetime.now().strftime('%W')
# 计算当前日期所属的教学周
# ( ) 中的减一表示第一周之前的周数
# 最后加一是因为计算周数是从索引00开始的,所以需要加1
current_teach_week = int(current_year_week) - (int(semester_first_week) - 1) + 1
return current_teach_week | 5,358,637 |
def get_first_model_each_manufacturer(cars=cars):
"""return a list of matching models (original ordering)"""
return [cars[key][0] for key in cars] | 5,358,638 |
def get_sandbox_table_name(dataset_id, rule_name):
"""
A helper function to create a table in the sandbox dataset
:param dataset_id: the dataset_id to which the rule is applied
:param rule_name: the name of the cleaning rule
:return: the concatenated table name
"""
return '{dataset_id}_{rule_name}'.format(dataset_id=dataset_id,
rule_name=re.sub(
r'\W', '_', rule_name)) | 5,358,639 |
def index():
"""for i in range(0, 30):
data = QuizQuestions("math", None, "en_US", 7, "normal", "This is placeholder question number " + str(i), "c", "Answer A", "Answer B", "Answer C", "Answer D", True)
db.session.add(data)
db.session.commit()"""
return render_template("quiz_index.html") | 5,358,640 |
def _get_invoke_function_name() -> Any:
"""
Get invoke function Name.
Returns
-------
Function Name.
"""
props = get_properties()
functionName = f"orbit-{props['AWS_ORBIT_ENV']}-{props['AWS_ORBIT_TEAM_SPACE']}-container-runner"
return functionName | 5,358,641 |
def metadata_table(samples):
"""Return a pandas dataframe with metadata from `samples`."""
pass | 5,358,642 |
def sigmaG(a, axis=None, overwrite_input=False, keepdims=False):
"""
Compute the rank-based estimate of the standard deviation
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the means are computed. The default is to compute
the mean of the flattened array.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted.
Default is False. Note that, if `overwrite_input` is True and the
input is not already an array, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
median : ndarray, see dtype parameter above
array containing the median values
sigmaG : ndarray, see dtype parameter above.
array containing the robust estimator of the standard deviation
See Also
--------
median_sigmaG : robust rank-based estimate of mean and standard deviation
Notes
-----
This routine uses a single call to ``np.nanpercentile`` to find the
quartiles along the given axis, and uses these to compute the
sigmaG, a robust estimate of the standard deviation sigma:
sigmaG = 0.7413 * (q75 - q25)
where 0.7413 ~ 1 / (2 sqrt(2) erf^-1(0.5))
"""
q25, q75 = np.nanpercentile(a, [25, 75], axis=axis, overwrite_input=overwrite_input)
sigmaG = sigmaG_factor * (q75 - q25)
if keepdims:
if axis is None:
newshape = a.ndim * (1,)
else:
newshape = np.asarray(a.shape)
newshape[axis] = 1
sigmaG = sigmaG.reshape(newshape)
return sigmaG | 5,358,643 |
def traverse_tagged_databases(
functional_unit, method, label="tag", default_tag="other", secondary_tags=[], fg_databases=None
):
"""Traverse a functional unit throughout its foreground database(s) or the
listed databses in fg_databses, and group impacts by tag label.
Contribution analysis work by linking impacts to individual activities.
However, you also might want to group impacts in other ways. For example,
give individual biosphere exchanges their own grouping, or aggregate two
activities together.
Consider this example system, where the letters are the tag labels, and the
numbers are exchange amounts. The functional unit is one unit of the tree
root.
.. image:: images/tagged-traversal.png
:alt: Example tagged supply chain
In this supply chain, tags are applied to activities and biosphere exchanges.
If a biosphere exchange is not tagged, it inherits the tag of its producing
activity. Similarly, links to other databases are assessed with the usual
LCA machinery, and the total LCA score is tagged according to its consuming
activity. If an activity does not have a tag, a default tag is applied.
We can change our visualization to show the use of the default tags:
.. image:: images/tagged-traversal-2.png
:alt: Example tagged supply chain
And then we can manually calculate the tagged impacts. Normally we would
need to know the actual biosphere flows and their respective
characterization factors (CF), but in this example we assume that each
CF is one. Our result, group by tags, would therefore be:
* **A**: :math:`6 + 27 = 33`
* **B**: :math:`30 + 44 = 74`
* **C**: :math:`5 + 16 + 48 = 69`
* **D**: :math:`14`
This function will only traverse the foreground database, i.e. the
database of the functional unit activity. A functional unit can have
multiple starting nodes; in this case, all foreground databases are
traversed.
Input arguments:
* ``functional_unit``: A functional unit dictionary, e.g. ``{("foo", "bar"): 42}``.
* ``method``: A method name, e.g. ``("foo", "bar")``
* ``label``: The label of the tag classifier. Default is ``"tag"``
* ``default_tag``: The tag classifier to use if none was given. Default is ``"other"``
* ``secondary_tags``: List of tuples in the format (secondary_label, secondary_default_tag). Default is empty list.
* ``fg_databases``: a list of foreground databases to be traversed, e.g. ['foreground', 'biomass', 'machinery']
It's not recommended to include all databases of a project in the list to be traversed, especially not ecoinvent itself
Returns:
Aggregated tags dictionary from ``aggregate_tagged_graph``, and tagged supply chain graph from ``recurse_tagged_database``.
"""
lca = LCA(functional_unit, method)
lca.lci()
lca.lcia()
method_dict = {o[0]: o[1] for o in Method(method).load()}
graph = [
recurse_tagged_database(
key, amount, method_dict, lca, label, default_tag, secondary_tags, fg_databases
)
for key, amount in functional_unit.items()
]
return aggregate_tagged_graph(graph), graph | 5,358,644 |
def graft(
repo,
ctx,
base=None,
labels=None,
keepparent=False,
keepconflictparent=False,
wctx=None,
):
"""Do a graft-like merge.
This is a merge where the merge ancestor is chosen such that one
or more changesets are grafted onto the current changeset. In
addition to the merge, this fixes up the dirstate to include only
a single parent (if keepparent is False) and tries to duplicate any
renames/copies appropriately.
ctx - changeset to rebase
base - merge base, or ctx.p1() if not specified
labels - merge labels eg ['local', 'graft']
keepparent - keep second parent if any
keepconflictparent - if unresolved, keep parent used for the merge
"""
# If we're grafting a descendant onto an ancestor, be sure to pass
# mergeancestor=True to update. This does two things: 1) allows the merge if
# the destination is the same as the parent of the ctx (so we can use graft
# to copy commits), and 2) informs update that the incoming changes are
# newer than the destination so it doesn't prompt about "remote changed foo
# which local deleted".
# We also pass mergeancestor=True when base is the same revision as p1. 2)
# doesn't matter as there can't possibly be conflicts, but 1) is necessary.
wctx = wctx or repo[None]
pctx = wctx.p1()
base = base or ctx.p1()
mergeancestor = (
repo.changelog.isancestor(pctx.node(), ctx.node())
or pctx.rev() == base.rev()
)
stats = _update(
repo,
ctx.node(),
True,
True,
base.node(),
mergeancestor=mergeancestor,
labels=labels,
wc=wctx,
)
if keepconflictparent and stats.unresolvedcount:
pother = ctx.node()
else:
pother = nullid
parents = ctx.parents()
if keepparent and len(parents) == 2 and base in parents:
parents.remove(base)
pother = parents[0].node()
# Never set both parents equal to each other
if pother == pctx.node():
pother = nullid
if wctx.isinmemory():
wctx.setparents(pctx.node(), pother)
# fix up dirstate for copies and renames
copies.graftcopies(wctx, ctx, base)
else:
with repo.dirstate.parentchange():
repo.setparents(pctx.node(), pother)
repo.dirstate.write(repo.currenttransaction())
# fix up dirstate for copies and renames
copies.graftcopies(wctx, ctx, base)
return stats | 5,358,645 |
def GetSiteFilters(filename):
""" Reader for a file of reportable sites.
The file contains 2 tokens: the site name and a normalization factor.
The method returns a hash table with the key being site and the value
the normalization factor to use.
"""
try:
#--- process the reportable sites file ---
sites = {}
fd = open(filename)
while 1:
filter = fd.readline()
if filter == "": # EOF
break
filter = filter.strip().strip("\n")
if filter.startswith("#"):
continue
if len(filter) == 0:
continue
site = filter.split()
if sites.has_key(site[0]):
raise Exception("System error: duplicate - site (%s) already set" % site[0])
factor = 0
if len(site) == 1:
raise Exception("System error: No normalization factory was provide for site: %s" % site[0])
elif len(site) > 1:
#-- verify the factor is an integer --
try:
tmp = int(site[1])
factor = float(site[1])/1000
except:
raise Exception("Error in %s file: 2nd token must be an integer (%s" % (filename,filter))
#-- set the factor --
sites[site[0]] = factor
else:
continue
#-- end of while loop --
fd.close()
#-- verify there is at least 1 site --
if len(sites) == 0:
raise Exception("Error in %s file: there are no sites to process" % filename)
return sites
except IOError, (errno,strerror):
raise Exception("IO error(%s): %s (%s)" % (errno,strerror,filename)) | 5,358,646 |
def MiniMobileNetV2(input_shape=None,
alpha=1.0,
expansion_factor=6,
depth_multiplier=1,
dropout=0.,
weight_decay=0.,
include_top=True,
weights=None,
input_tensor=None,
pooling=None,
classes=10):
"""Instantiates the MobileNet architecture.
MobileNet V2 is from the paper:
- [Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification, Detection and Segmentation](https://arxiv.org/abs/1801.04381)
Note that only TensorFlow is supported for now,
therefore it only works with the data format
`image_data_format='channels_last'` in your Keras config
at `~/.keras/keras.json`.
To load a MobileNet model via `load_model`, import the custom
objects `relu6` and `DepthwiseConv2D` and pass them to the
`custom_objects` parameter.
E.g.
model = load_model('mobilenet.h5', custom_objects={
'relu6': mobilenet.relu6,
'DepthwiseConv2D': mobilenet.DepthwiseConv2D})
# Arguments
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or (3, 224, 224) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
expansion_factor: controls the expansion of the internal bottleneck
blocks. Should be a positive integer >= 1
depth_multiplier: depth multiplier for depthwise convolution
(also called the resolution multiplier)
dropout: dropout rate
weight_decay: Weight decay factor.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
input_tensor: optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
if K.backend() != 'tensorflow':
raise RuntimeError('Only Tensorflow backend is currently supported, '
'as other backends do not support '
'depthwise convolution.')
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as ImageNet with `include_top` '
'as true, `classes` should be 1000')
# Determine proper input shape and default size.
if input_shape is None:
default_size = 224
else:
if K.image_data_format() == 'channels_first':
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [96, 128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = _obtain_input_shape(input_shape,
default_size=default_size,
min_size=32,
data_format=K.image_data_format(),
require_flatten=include_top or weights)
if K.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == 'imagenet':
if depth_multiplier != 1:
raise ValueError('If imagenet weights are being loaded, '
'depth multiplier must be 1')
if alpha not in [0.35, 0.50, 0.75, 1.0, 1.3, 1.4]:
raise ValueError('If imagenet weights are being loaded, '
'alpha can be one of'
'`0.35`, `0.50`, `0.75`, `1.0`, `1.3` and `1.4` only.')
if rows != cols or rows not in [96, 128, 160, 192, 224]:
raise ValueError('If imagenet weights are being loaded, '
'input must have a static square shape (one of '
'(06, 96), (128,128), (160,160), (192,192), or '
'(224, 224)).Input shape provided = %s' % (input_shape,))
if K.image_data_format() != 'channels_last':
warnings.warn('The MobileNet family of models is only available '
'for the input data format "channels_last" '
'(width, height, channels). '
'However your settings specify the default '
'data format "channels_first" (channels, width, height).'
' You should set `image_data_format="channels_last"` '
'in your Keras config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = _conv_block(img_input, 32, alpha, bn_epsilon=1e-3, bn_momentum=0.99, weight_decay=weight_decay)
x = _depthwise_conv_block_v2(x, 16, alpha, 1, depth_multiplier, bn_epsilon=1e-3, bn_momentum=0.99,
weight_decay=weight_decay, block_id=1)
x = _depthwise_conv_block_v2(x, 24, alpha, expansion_factor, depth_multiplier, block_id=2,
bn_epsilon=1e-3, bn_momentum=0.99, weight_decay=weight_decay, strides=(2, 2))
x = _depthwise_conv_block_v2(x, 24, alpha, expansion_factor, depth_multiplier, bn_epsilon=1e-3, bn_momentum=0.99,
weight_decay=weight_decay, block_id=3)
x = _depthwise_conv_block_v2(x, 32, alpha, expansion_factor, depth_multiplier, block_id=4,
bn_epsilon=1e-3, bn_momentum=0.99, weight_decay=weight_decay)
x = _depthwise_conv_block_v2(x, 32, alpha, expansion_factor, depth_multiplier, bn_epsilon=1e-3, bn_momentum=0.99,
weight_decay=weight_decay, block_id=5)
x = _depthwise_conv_block_v2(x, 32, alpha, expansion_factor, depth_multiplier, bn_epsilon=1e-3, bn_momentum=0.99,
weight_decay=weight_decay, block_id=6)
x = _depthwise_conv_block_v2(x, 64, alpha, expansion_factor, depth_multiplier, block_id=7,
bn_epsilon=1e-3, bn_momentum=0.99, weight_decay=weight_decay, strides=(2, 2))
x = _depthwise_conv_block_v2(x, 64, alpha, expansion_factor, depth_multiplier, bn_epsilon=1e-3, bn_momentum=0.99,
weight_decay=weight_decay, block_id=8)
x = _depthwise_conv_block_v2(x, 64, alpha, expansion_factor, depth_multiplier, bn_epsilon=1e-3, bn_momentum=0.99,
weight_decay=weight_decay, block_id=9)
x = _depthwise_conv_block_v2(x, 64, alpha, expansion_factor, depth_multiplier, bn_epsilon=1e-3, bn_momentum=0.99,
weight_decay=weight_decay, block_id=10)
if alpha <= 1.0:
penultimate_filters = 1280
else:
penultimate_filters = int(1280 * alpha)
x = _conv_block(x, penultimate_filters, alpha=1.0, kernel=(1, 1), bn_epsilon=1e-3, bn_momentum=0.99,
block_id=18)
if include_top:
if K.image_data_format() == 'channels_first':
shape = (penultimate_filters, 1, 1)
else:
shape = (1, 1, penultimate_filters)
x = GlobalAveragePooling2D()(x)
x = Reshape(shape, name='reshape_1')(x)
x = Dropout(dropout, name='dropout')(x)
x = Conv2D(classes, (1, 1),
kernel_initializer=initializers.he_normal(),
kernel_regularizer=regularizers.l2(weight_decay),
padding='same', name='conv_preds')(x)
x = Activation('softmax', name='act_softmax')(x)
x = Reshape((classes,), name='reshape_2')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='mobilenetV2_%0.2f_%s' % (alpha, rows))
# load weights
if weights == 'imagenet':
if K.image_data_format() == 'channels_first':
raise ValueError('Weights for "channels_last" format '
'are not available.')
if alpha == 1.0:
alpha_text = '1_0'
elif alpha == 1.3:
alpha_text = '1_3'
elif alpha == 1.4:
alpha_text = '1_4'
elif alpha == 0.75:
alpha_text = '7_5'
elif alpha == 0.50:
alpha_text = '5_0'
else:
alpha_text = '3_5'
if include_top:
model_name = 'mobilenet_v2_%s_%d_tf.h5' % (alpha_text, rows)
weigh_path = BASE_WEIGHT_PATH_V2 + model_name
weights_path = get_file(model_name,
weigh_path,
cache_subdir='models')
else:
model_name = 'mobilenet_v2_%s_%d_tf_no_top.h5' % (alpha_text, rows)
weigh_path = BASE_WEIGHT_PATH_V2 + model_name
weights_path = get_file(model_name,
weigh_path,
cache_subdir='models')
model.load_weights(weights_path)
if old_data_format:
K.set_image_data_format(old_data_format)
return model | 5,358,647 |
def update_forward_cnt(**kwargs):
"""
更新被转发的次数,进行加1操作
:param kwargs: {'object_id': object_id}
:return:
"""
session = None
try:
session = get_session()
# 转发次数 +1
session.query(SecondHand).filter(SecondHand.OBJECT_ID == kwargs['object_id']).update(
{SecondHand.FORWARD_CNT: SecondHand.FORWARD_CNT + 1})
# 提交到数据库
session.commit()
logging.info('OK : second_hand.py--->update_forward_cnt() 成功')
return RESULT_OK
except Exception as e:
session.rollback()
logging.critical('Error : second_hand.py--->update_forward_cnt() 失败 :{}'.format(e))
return []
finally:
session.close() | 5,358,648 |
def _show_list(images: list, width: int=-1, height: int=-1) -> None:
"""Gets list of images and display them all
Args:
images (list): list of numpy arrays
width (int, optional): width of the image grid. Defaults to -1.
height (int, optional): height of the image grid. Defaults to -1.
Returns:
None
"""
im_num = len(images)
mylib_log.info(f"Showing {im_num} images")
in_grid = width * height >= im_num
if not in_grid:
if width != -1 and height != -1:
mylib_log.warn(
f"Cant fit {im_num} images in a {width} by {height} grid")
_show_line(images)
return
elif in_grid and (width == 1 or height == 1):
mylib_log.info("Showing in line")
_show_line(images, width == 1)
return
# showing in grid
_, array = plt.subplots(height, width)
for i in range(im_num):
im = images[i]
x = i % width
y = int(i / width)
array[y, x].imshow(im, "gray") if len(
im.shape) == 2 else array[y, x].imshow(im) | 5,358,649 |
def test_get_geosol_gs(config_geosol_gs_WFS):
"""Testing query for a specific object"""
p = OGRProvider(config_geosol_gs_WFS)
result = p.get('Unesco_point.123')
assert result['id'] == 'Unesco_point.123'
assert 'Centro storico di San Gimignano' in result['properties']['sito'] | 5,358,650 |
def recursive_dict_merge(dict1, dict2):
"""
Merges dictionaries (of dictionaries).
Preference is given to the second dict, i.e. if a key occurs in both dicts, the value from `dict2` is used.
"""
result = copy.deepcopy(dict1)
for key in dict2:
if key in dict1 and isinstance(dict1[key], dict) and isinstance(dict2[key], dict):
result[key] = recursive_dict_merge(dict1[key], dict2[key])
else:
result[key] = dict2[key]
return result | 5,358,651 |
def define_loss(name, device="cuda"):
"""
Defines the loss function associated to the name.
Supports losses in the LOSSES list, as well as the Lovasz, Softdice and Haussdorf losses.
Args:
name (str): Loss name.
device (str, optional): Device for torch. Defaults to "cuda".
Raises:
NotImplementedError: Specified loss name is not supported.
Returns:
torch loss: Loss function
"""
if name in LOSSES:
loss = getattr(torch.nn, name)(reduction="none")
elif name == "lovasz":
loss = lovasz_loss
else:
raise NotImplementedError
return loss | 5,358,652 |
def get_default_archive_path():
"""
Makeup default archive path.
Unify the archive path between local machine and cloud.
"""
if not XT_HWC_WORKSPACE:
return os.path.join(os.path.expanduser("~"), DEFAULT_ARCHIVE_DIR)
else:
return os.path.join(XT_HWC_WORKSPACE, DEFAULT_ARCHIVE_DIR) | 5,358,653 |
def python_3000_async_await_keywords(logical_line, tokens):
"""'async' and 'await' are reserved keywords starting at Python 3.7.
W606: async = 42
W606: await = 42
Okay: async def read(db):\n data = await db.fetch('SELECT ...')
"""
# The Python tokenize library before Python 3.5 recognizes
# async/await as a NAME token. Therefore, use a state machine to
# look for the possible async/await constructs as defined by the
# Python grammar:
# https://docs.python.org/3/reference/grammar.html
state = None
for token_type, text, start, end, line in tokens:
error = False
if token_type == tokenize.NL:
continue
if state is None:
if token_type == tokenize.NAME:
if text == 'async':
state = ('async_stmt', start)
elif text == 'await':
state = ('await', start)
elif (token_type == tokenize.NAME and
text in ('def', 'for')):
state = ('define', start)
elif state[0] == 'async_stmt':
if token_type == tokenize.NAME and text in ('def', 'with', 'for'):
# One of funcdef, with_stmt, or for_stmt. Return to
# looking for async/await names.
state = None
else:
error = True
elif state[0] == 'await':
if token_type == tokenize.NAME:
# An await expression. Return to looking for async/await
# names.
state = None
elif token_type == tokenize.OP and text == '(':
state = None
else:
error = True
elif state[0] == 'define':
if token_type == tokenize.NAME and text in ('async', 'await'):
error = True
else:
state = None
if error:
yield (
state[1],
"W606 'async' and 'await' are reserved keywords starting with "
"Python 3.7",
)
state = None
# Last token
if state is not None:
yield (
state[1],
"W606 'async' and 'await' are reserved keywords starting with "
"Python 3.7",
) | 5,358,654 |
def get_hosts_from_file(hostfile):
"""
Return the list of hosts from a given host file.
"""
hosts = []
if os.path.exists(hostfile):
for line in open(hostfile, "r").readlines():
hosts.append(line.split(' ', 1)[0])
return hosts | 5,358,655 |
def process_exists(pid): # type: (int) -> bool
""" Checks if the processed with the given *pid* exists. Returns #True if
that is the case, #False otherwise. """
if pid == 0:
return False
try:
os.kill(pid, 0)
except OSError as exc:
if exc.errno == errno.ESRCH:
return False
return True | 5,358,656 |
def get_application_name():
"""Attempts to find the application name using system arguments."""
if hasattr(sys, 'argv') and sys.argv[0]:
app_name = os.path.basename(sys.argv[0])
else:
app_name = None
return app_name | 5,358,657 |
def test_can_pluck_single_element(base_clumper, elem):
"""
We can pluck single elements by selecting on the character.
"""
collected = base_clumper.keep(lambda d: d["c"] == elem).collect()
assert collected[0]["c"] == elem
assert len(collected) == 1 | 5,358,658 |
def test_repr(redis_dict, str_dict):
"""Test ``__repr__`` method."""
assert str(redis_dict) == '{0}: {1}'.format(type(redis_dict).__name__, str_dict) | 5,358,659 |
def check_clioncode(projroot: Path, full: bool, verbose: bool) -> None:
"""Run clion inspections on all our code."""
import time
cachepath = Path('.cache/check_clioncode')
filenames = get_code_filenames(projroot)
clionroot = Path('/Applications/CLion.app')
# clionbin = Path(clionroot, 'Contents/MacOS/clion')
inspect = Path(clionroot, 'Contents/bin/inspect.sh')
# At the moment offline clion inspections seem a bit flaky.
# They don't seem to run at all if we haven't opened the project
# in the GUI, and it seems recent changes can get ignored for that
# reason too.
# So for now let's try blowing away caches, launching the gui
# temporarily, and then kicking off inspections after that. Sigh.
print('Clearing CLion caches...', flush=True)
caches_root = os.environ['HOME'] + '/Library/Caches/JetBrains'
if not os.path.exists(caches_root):
raise RuntimeError(f'CLion caches root not found: {caches_root}')
subprocess.run('rm -rf ~/Library/Caches/JetBrains/CLion*',
shell=True,
check=True)
# UPDATE: seems this is unnecessary now; should double check.
# Note: I'm assuming this project needs to be open when the GUI
# comes up. Currently just have one project so can rely on auto-open
# but may need to get fancier later if that changes.
if bool(True):
print('Launching GUI CLion to rebuild caches...', flush=True)
# process = subprocess.Popen(str(clionbin))
subprocess.run(
['open', '-a', clionroot,
Path(projroot, 'ballisticacore-cmake')],
check=True)
# Wait a moment and ask it nicely to die.
waittime = 60
while waittime > 0:
print(f'Waiting for {waittime} more seconds.', flush=True)
time.sleep(10)
waittime -= 10
# For some reason this is giving a return-code 1 although
# it appears to be working.
print('Waiting for GUI CLion to quit...', flush=True)
subprocess.run(
[
'osascript', '-e', 'tell application "CLion" to quit\n'
'repeat until application "CLion" is not running\n'
' delay 1\n'
'end repeat'
],
check=False,
)
time.sleep(5)
# process.terminate()
# process.wait(timeout=60)
print('Launching Offline CLion to run inspections...', flush=True)
_run_idea_inspections_cached(cachepath=cachepath,
filenames=filenames,
full=full,
projroot=Path(projroot,
'ballisticacore-cmake'),
inspectdir=Path(projroot, 'src/ballistica'),
displayname='CLion',
inspect=inspect,
verbose=verbose) | 5,358,660 |
def get_atomate_wflows(wf_coll,
states,
seed_regex=None,
project_regex=None) -> pd.DataFrame:
"""Obtain workflow informaton for atomate jobs"""
return get_workflows(wf_coll, ['atomate-relax'],
states,
seed_regex=seed_regex,
project_regex=project_regex) | 5,358,661 |
def the_H_function(sorted_citations_list, n=1):
"""from a list of integers [n1, n2 ..] representing publications citations,
return the max list-position which is >= integer
eg
>>> the_H_function([10, 8, 5, 4, 3]) => 4
>>> the_H_function([25, 8, 5, 3, 3]) => 3
>>> the_H_function([1000, 20]) => 2
"""
if sorted_citations_list and sorted_citations_list[0] >= n:
return the_H_function(sorted_citations_list[1:], n + 1)
else:
return n - 1 | 5,358,662 |
def test_parse_assignments_expressions(code, target):
"""Test parse_assignments_expressions function."""
res = kale_ast.parse_assignments_expressions(code)
compare(res, target) | 5,358,663 |
def InitGoldData(packet_addresses, gold_data, host):
"""Gold standard is data from first read."""
packet_addresses.append(TEST_PACKET0_ADDRESS % host)
packet_addresses.append(TEST_PACKET1_ADDRESS % host)
gold_data.append(urllib2.urlopen(packet_addresses[0]).read())
gold_data.append(urllib2.urlopen(packet_addresses[1]).read()) | 5,358,664 |
def build_trainer(model: BaseModel,
params: Parameters,
dataset: Dataset,
target_processor: TargetProcessor,
batch_processor: BatchProcessor) \
-> BaseTrainer:
"""
Build a neural network trainer/optimizer based on different backend
:param model: Model (inherited from nnimgproc.model.BaseModel)
:param params: Parameters (from nnimgproc.util.parameters),
training parameter set such as learning rate
:param dataset: Dataset (from nnimgproc.dataset), image provider
:param target_processor: TargetProcessor (from nnimgproc.processor)
:param batch_processor: BatchProcessor (from nnimgproc.processor)
:return: Trainer (from nnimgproc.trainer)
"""
lib = importlib.import_module('nnimgproc.backend.%s' % model.backend)
return lib.Trainer(model, params, dataset,
target_processor, batch_processor) | 5,358,665 |
def get_random_quote(quotes_list):
"""Return a random quote to user."""
upper_limit = len(quotes_list)-1
select = random.randint(0, upper_limit)
selected_quote = quotes_list[select]
soup = BeautifulSoup(selected_quote, 'html.parser')
return soup.text | 5,358,666 |
def record_states(hass):
"""Record some test states.
We inject a bunch of state updates from media player, zone and
thermostat.
"""
mp = "media_player.test"
mp2 = "media_player.test2"
mp3 = "media_player.test3"
therm = "thermostat.test"
therm2 = "thermostat.test2"
zone = "zone.home"
script_c = "script.can_cancel_this_one"
def set_state(entity_id, state, **kwargs):
"""Set the state."""
hass.states.set(entity_id, state, **kwargs)
wait_recording_done(hass)
return hass.states.get(entity_id)
zero = dt_util.utcnow()
one = zero + timedelta(seconds=1)
two = one + timedelta(seconds=1)
three = two + timedelta(seconds=1)
four = three + timedelta(seconds=1)
states = {therm: [], therm2: [], mp: [], mp2: [], mp3: [], script_c: []}
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=one):
states[mp].append(
set_state(mp, "idle", attributes={"media_title": str(sentinel.mt1)})
)
states[mp].append(
set_state(mp, "YouTube", attributes={"media_title": str(sentinel.mt2)})
)
states[mp2].append(
set_state(mp2, "YouTube", attributes={"media_title": str(sentinel.mt2)})
)
states[mp3].append(
set_state(mp3, "idle", attributes={"media_title": str(sentinel.mt1)})
)
states[therm].append(
set_state(therm, 20, attributes={"current_temperature": 19.5})
)
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=two):
# This state will be skipped only different in time
set_state(mp, "YouTube", attributes={"media_title": str(sentinel.mt3)})
# This state will be skipped because domain is excluded
set_state(zone, "zoning")
states[script_c].append(
set_state(script_c, "off", attributes={"can_cancel": True})
)
states[therm].append(
set_state(therm, 21, attributes={"current_temperature": 19.8})
)
states[therm2].append(
set_state(therm2, 20, attributes={"current_temperature": 19})
)
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=three):
states[mp].append(
set_state(mp, "Netflix", attributes={"media_title": str(sentinel.mt4)})
)
states[mp3].append(
set_state(mp3, "Netflix", attributes={"media_title": str(sentinel.mt3)})
)
# Attributes changed even though state is the same
states[therm].append(
set_state(therm, 21, attributes={"current_temperature": 20})
)
return zero, four, states | 5,358,667 |
def generate_char_gap_report(fp, char_gap_accuracies):
"""Generate a accuracy report for the char-gap set. Outputs to file fp."""
fp.write("==== Character gaps ====\n")
all_pc_accs = []
all_ps_accs = []
for i, acc in enumerate(char_gap_accuracies):
pc_acc = acc[0] * 100
ps_acc = acc[1] * 100
all_pc_accs.append(pc_acc)
all_ps_accs.append(ps_acc)
fp.write(
"%d char gaps: accuracy per character %.2f%%, accuracy per sequence %.2f%%\n"
% (i + 1, pc_acc, ps_acc)
)
fp.write(
"Character gaps average per character accuracy: %.2f%%\n"
% (sum(all_pc_accs) / float(len(all_pc_accs)))
)
fp.write(
"Character gaps average per mask accuracy: %.2f%%\n"
% ((sum(all_ps_accs) / float(len(all_ps_accs))))
) | 5,358,668 |
def input_change(attr, old, new):
"""Executes whenever the input form changes.
It is responsible for updating the plot, or anything else you want.
Args:
attr : the name of the attr that changed
old : old value of attr
new : new value of attr
"""
update_data()
plot.title = text.value | 5,358,669 |
def ask_why(doc):
"""
Ask questions of the form “Why is ..x..?” where x is either a
combination of object and adjective or subject and adjective
or “Why ..prep.. the ..noun..”
"""
chunk = find_subj_chunk(doc)
if chunk != None and chunk["adjective"] != None:
subj = chunk["subject"]
adj = chunk["adjective"]
respond = "Why is {} {}?".format(subj, adj)
return respond
chunk = find_obj_chunk(doc)
if chunk != None and chunk["adjective"] != None:
subj = chunk["objective"]
adj = chunk["adjective"]
respond = "Why is {} {}?".format(subj, adj)
return respond
# I had similar experience in high school --> why in high school?
chunk = find_prep_chunk(doc)
if chunk != None:
subj = chunk["full_subject"]
prep = chunk["prep"]
respond = "Why {} the {}?".format(prep, subj)
return respond
return None | 5,358,670 |
def get_extension(fname):
"""
Get file extension.
"""
return '.' + fname.split(".")[-1] | 5,358,671 |
def large_xyz_to_luv_star(large_xyz, white_xyz):
"""
# 概要
XYZ から L*u*v* を計算する。
# 参考
https://en.wikipedia.org/wiki/CIELUV
"""
large_x, large_y, large_z = np.dsplit(large_xyz, 3)
white_xyz = np.array(white_xyz)
white_xyz = (white_xyz / white_xyz[1]).reshape((1, 1, 3))
x_n, y_n, z_n = np.dsplit(white_xyz, 3)
threshold = (6/29) ** 3
judge = (large_y / y_n)
l_lower = (judge <= threshold) * (((29/3) ** 3) * (large_y / y_n))
l_upper = (judge > threshold) * (116 * ((large_y / y_n) ** (1/3)) - 16)
l_star = l_lower + l_upper
u_dash, v_dash = np.dsplit(large_xyz_to_uv_dash(large_xyz), 2)
u_n_dash, v_n_dash = np.dsplit(large_xyz_to_uv_dash(white_xyz), 2)
u_star = 13 * l_star * (u_dash - u_n_dash)
v_star = 13 * l_star * (v_dash - v_n_dash)
return np.dstack((l_star, u_star, v_star)) | 5,358,672 |
def rotation(new_rotation=0):
"""Set the display rotation.
:param new_rotation: Specify the rotation in degrees: 0, 90, 180 or 270
"""
global _rotation
if new_rotation in [0, 90, 180, 270]:
_rotation = new_rotation
return True
else:
raise ValueError("Rotation: 0, 90, 180 or 270 degrees only") | 5,358,673 |
def parse_url (url:str) -> str:
"""
规范化 URL
-> hello/world
<- /hello/world
"""
if url == "": url = "/"
if not url.startswith ('/'): url = "/" + url # 添加开头斜杠
# if not url.endswith ("/"): url += "/" # 添加末尾斜杠
return url | 5,358,674 |
def bulk_edit(modeladmin, request, queryset):
""" Bulk edit selected items. """
form = None
if 'apply' in request.POST:
form = BulkEditForm(request.POST)
if form.is_valid():
property = form.cleaned_data['property']
cf_value = form.cleaned_data['cf_value']
ff_value = form.cleaned_data['ff_value']
inline_notes = form.cleaned_data['inline_notes']
footnotes = form.cleaned_data['footnotes']
overwrite = form.cleaned_data['overwrite']
delete_only = form.cleaned_data['delete_only']
if queryset.model is Subject:
entity_type = 'SO'
elif queryset.model is Location:
entity_type = 'SL'
elif queryset.model is Media:
entity_type = 'MP'
elif queryset.model is File:
entity_type = 'MF'
else:
entity_type = 'PO'
if not delete_only and (property.control_field and not cf_value):
modeladmin.message_user(request, 'UPDATE FAILED: If you would like to update a Controlled Property, you must selected a Controlled Term', level=messages.ERROR)
return HttpResponseRedirect(request.get_full_path())
elif not delete_only and (not property.control_field and (not ff_value or ff_value == '')):
modeladmin.message_user(request, 'UPDATE FAILED: If you would like to update a Free-Form Property, you must provide a Free-Form value', level=messages.ERROR)
return HttpResponseRedirect(request.get_full_path())
if property.primary_type != 'AL' and property.primary_type != entity_type:
modeladmin.message_user(request, 'UPDATE FAILED: You selected a property which is not available for this Entity. If you would like to make it available, go to the Descriptive Property table and change Primary Type to All', level=messages.ERROR)
return HttpResponseRedirect(request.get_full_path())
if cf_value and cf_value.type != property:
modeladmin.message_user(request, 'UPDATE FAILED: You selected a Controlled Term that is not a value for the selected Property', level=messages.ERROR)
return HttpResponseRedirect(request.get_full_path())
for item in queryset:
if queryset.model is Subject:
if property.control_field:
if overwrite or delete_only:
control_props = SubjectControlProperty.objects.filter(subject = item, control_property = property)
for cp in control_props:
cp.delete()
if not delete_only:
new_cp = SubjectControlProperty(subject = item, control_property = property, control_property_value = cf_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user)
new_cp.save()
else:
if overwrite or delete_only:
ff_props = SubjectProperty.objects.filter(subject = item, property = property)
for fp in ff_props:
fp.delete()
if not delete_only:
new_fp = SubjectProperty(subject = item, property = property, property_value = ff_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user)
new_fp.save()
elif queryset.model is Location:
if property.control_field:
if overwrite or delete_only:
control_props = LocationControlProperty.objects.filter(location = item, control_property = property)
for cp in control_props:
cp.delete()
if not delete_only:
new_cp = LocationControlProperty(location = item, control_property = property, control_property_value = cf_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user)
new_cp.save()
else:
if overwrite or delete_only:
ff_props = LocationProperty.objects.filter(location = item, property = property)
for fp in ff_props:
fp.delete()
if not delete_only:
new_fp = LocationProperty(location = item, property = property, property_value = ff_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user)
new_fp.save()
elif queryset.model is Media:
if property.control_field:
if overwrite or delete_only:
control_props = MediaControlProperty.objects.filter(media = item, control_property = property)
for cp in control_props:
cp.delete()
if not delete_only:
new_cp = MediaControlProperty(media = item, control_property = property, control_property_value = cf_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user)
new_cp.save()
else:
if overwrite or delete_only:
ff_props = MediaProperty.objects.filter(media = item, property = property)
for fp in ff_props:
fp.delete()
if not delete_only:
new_fp = MediaProperty(media = item, property = property, property_value = ff_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user)
new_fp.save()
elif queryset.model is PersonOrg:
if property.control_field:
if overwrite or delete_only:
control_props = PersonOrgControlProperty.objects.filter(person_org = item, control_property = property)
for cp in control_props:
cp.delete()
if not delete_only:
new_cp = PersonOrgControlProperty(person_org = item, control_property = property, control_property_value = cf_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user)
new_cp.save()
else:
if overwrite or delete_only:
ff_props = PersonOrgProperty.objects.filter(person_org = item, property = property)
for fp in ff_props:
fp.delete()
if not delete_only:
new_fp = PersonOrgProperty(person_org = item, property = property, property_value = ff_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user)
new_fp.save()
elif queryset.model is File:
if property.control_field:
if overwrite or delete_only:
control_props = FileControlProperty.objects.filter(file = item, control_property = property)
for cp in control_props:
cp.delete()
if not delete_only:
new_cp = FileControlProperty(file = item, control_property = property, control_property_value = cf_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user)
new_cp.save()
else:
if overwrite or delete_only:
ff_props = FileProperty.objects.filter(file = item, property = property)
for fp in ff_props:
fp.delete()
if not delete_only:
new_fp = FileProperty(file = item, property = property, property_value = ff_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user)
new_fp.save()
modeladmin.message_user(request, _("%s %s." % ('Selected property edited: ', property)))
return HttpResponseRedirect(request.get_full_path())
if not form:
form = BulkEditForm(initial={'_selected_action': request.POST.getlist(admin.ACTION_CHECKBOX_NAME)})
opts = queryset.model._meta
app_label = opts.app_label
return render_to_response(
'admin/bulk_edit.html',
{'items': queryset, 'bulk_edit_form': form, "opts": opts, "app_label": app_label},
context_instance = RequestContext(request)
) | 5,358,675 |
def smooth_GF(C,S,avg_rad, start_deg):
"""from Wahr et al: Time-variable gravity recovery from space eq. 34.
This is Jekeli's [1981] smoothing method."""
C_smooth = C
S_smooth = S
Re = 6378.1363; # Radius of Earth in km
b = np.log(2) / (1 - np.cos(avg_rad / Re))
W=[]
W.append(1 / (2 * np.pi))
W.append(1 / (2 * np.pi) * ((1 + np.exp(-2 * b)) / (1 - np.exp(-2 * b)) - 1 / b))
for j in range(start_deg,C.shape[0]):
w = (-(2*(j-1)+1)/b*W[j-1]) + W[j-2]
W.append(w)
if W[j] < 0.: W[j] = 0.
if W[j-1] == 0.: W[j] = 0.
for i in range(start_deg-1,C.shape[0]):
C_smooth[i,:]=C[i,:]*W[i]*2.*np.pi
S_smooth[i,:] = S[i,:]*W[i]*2.*np.pi
return C_smooth, S_smooth | 5,358,676 |
def get_vmstat():
"""
Get and format the content of /proc/vmstat
"""
buf = open("/proc/vmstat").read()
buf = [v.replace(' ', ":") for v in buf.split("\n")]
buf = ";".join(buf)
return buf | 5,358,677 |
def upcomingIPOs(
symbol="",
exactDate="",
token="",
version="stable",
filter="",
format="json",
):
"""This will return all upcoming estimates, dividends, splits for a given symbol or the market. If market is passed for the symbol, IPOs will also be included.
https://iexcloud.io/docs/api/#upcoming-events
Args:
symbol (str): Symbol to look up
exactDate (str): exactDate Optional. Exact date for which to get data
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Returns:
dict or DataFrame: result
"""
_raiseIfNotStr(symbol)
symbol = _quoteSymbols(symbol)
if symbol:
url = "stock/{}/upcoming-ipos".format(symbol)
else:
url = "stock/market/upcoming-ipos"
if exactDate:
url += "?exactDate={}".format(exactDate)
return _get(url, token, version, filter) | 5,358,678 |
def sort_tokens(tokens: Iterable[Cwf]) -> List[Cwf]:
"""Sort tokens by natural order (sent, offset)"""
return sorted(tokens, key=lambda t: (t.get_sent(), int(t.get_offset()))) | 5,358,679 |
def is_zero(evm: Evm) -> None:
"""
Checks if the top element is equal to 0. Pushes the result back on the
stack.
Parameters
----------
evm :
The current EVM frame.
Raises
------
ethereum.frontier.vm.error.StackUnderflowError
If `len(stack)` is less than `1`.
ethereum.frontier.vm.error.OutOfGasError
If `evm.gas_left` is less than `GAS_VERY_LOW`.
"""
evm.gas_left = subtract_gas(evm.gas_left, GAS_VERY_LOW)
x = pop(evm.stack)
result = U256(x == 0)
push(evm.stack, result)
evm.pc += 1 | 5,358,680 |
def deep_seq_map(xss, fun, keys=None, fun_name=None, expand=False):
"""Applies fun to list of or dict of lists; adds the results in-place.
Usage: Transform a corpus iteratively by applying functions like
`tokenize`, `lower`, or vocabulary functions (word -> embedding id) to it.
from jtr.sisyphos.vocab import Vocab
vocab = Vocab()
keys = ['question', 'support']
corpus = deep_map(corpus, lambda x: x.lower(), keys)
corpus = deep_map(corpus, tokenize, keys)
corpus = deep_map(corpus, vocab, keys)
corpus = deep_map(corpus, vocab._normalize, keys=keys)
-> through tokenize we go from a dict of sentences to
a dict of words (list of lists), thus we now apply deep_seq_map for
processing to add start of and end of sentence tags:
corpus = deep_seq_map(corpus, lambda xs: ["<SOS>"] + xs +
["<EOS>"],
['question', 'support'])
-> From here we can create batches from the corpus and feed it into a model.
In case `expand==False` each top-level entry of `xs` to be transformed
replaces the original entry.
`deep_map` supports `xs` to be a dictionary or a list/tuple:
- In case `xs` is a dictionary, its transformed value is also a dictionary, and `keys` contains the keys of the
values to be transformed.
- In case `xs` is a list/tuple, `keys` contains the indices of the entries to be transformed
The function `deep_map` is recursively applied to the values of `xs`;
the function `fun` takes a sequence as input, and is applied at the one but deepest level,
where the entries are sequences of objects (no longer sequences of sequences).
This is the only difference with `deep_map`
Args:
`xs`: a sequence (list/tuple) of objects or sequences of objects.
`fun`: a function to transform sequences
`keys`: seq with keys if `xs` is dict; seq with integer indices if `xs` is seq.
For entries not in `keys`, the original `xs` value is retained.
`fun_name`: default value 'trf'; string with function tag (e.g. 'lengths'),
used if '''expand==True''' and '''isinstance(xs,dict)'''
Say for example fun_name='count', and `keys` contains 'sentence', then the transformed dict would look like
'''{'sentence':[sentences], 'sentence_lengths':[fun(sentences)] ...}'''
Returns:
Transformed sequence or dictionary.
Example:
>>> dave = [
... "All work and no play makes Jack a dull boy",
... "All work and no play makes Jack a dull boy.",
... "All work and no play makes Jack a very dull boy!"]
>>> jack = [
... "I'm sorry Dave, I'm afraid I can't do that!",
... "I'm sorry Dave, I'm afraid I can't do that",
... "I'm sorry Dave, I'm afraid I cannot do that"]
>>> support = [
... ["Play makes really dull", "really dull"],
... ["Dave is human"],
... ["All work", "all dull", "dull"]]
>>> data2 = {'dave': dave, 'jack': jack, 'support': support}
>>> vocab2 = Vocab()
>>> data2_processed = deep_map(data2, lambda x: tokenize(x.lower()))
>>> data2_ids = deep_map(data2_processed, vocab2)
>>> data2_ids_with_lengths = deep_seq_map(data2_ids, lambda xs: len(xs), keys=['dave','jack','support'],
... fun_name='lengths', expand=True)
>>> pprint.pprint(data2_ids_with_lengths)
{'dave': [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[1, 2, 3, 4, 5, 6, 7, 8, 12, 9, 10, 13]],
'dave_lengths': [10, 11, 12],
'jack': [[14, 15, 16, 17, 18, 19, 14, 15, 16, 20, 14, 21, 15, 22, 23, 24, 13],
[14, 15, 16, 17, 18, 19, 14, 15, 16, 20, 14, 21, 15, 22, 23, 24],
[14, 15, 16, 17, 18, 19, 14, 15, 16, 20, 14, 25, 23, 24]],
'jack_lengths': [17, 16, 14],
'support': [[[5, 6, 26, 9], [26, 9]], [[18, 27, 28]], [[1, 2], [1, 9], [9]]],
'support_lengths': [[4, 2], [3], [2, 2, 1]]}
"""
if isinstance(xss, list) and all([not isinstance(xs, list) for xs in xss]):
return fun(xss)
else:
if isinstance(xss, dict):
xss_mapped = {}
for k, xs in xss.items():
if keys is None or k in keys:
if expand:
xss_mapped[k] = xs
k = '%s_%s' % (str(k), str(fun_name) if fun_name is not None else 'trf')
if isinstance(xs, list) and all([not isinstance(x, list) for x in xs]):
xss_mapped[k] = fun(xs)
else:
xss_mapped[k] = deep_seq_map(xs, fun) # fun_name not needed, because expand==False
else:
xss_mapped[k] = xs
else:
xss_mapped = []
for k, xs in enumerate(xss):
if keys is None or k in keys:
if expand:
xss_mapped.append(xs)
if isinstance(xs, list) and all([not isinstance(x, list) for x in xs]):
xss_mapped.append(fun(xs))
else:
xss_mapped.append(deep_seq_map(xs, fun))
else:
xss_mapped.append(xs)
return xss_mapped | 5,358,681 |
def create_user(steamid, admin):
"""Create a user"""
steamid = string_to_steamid(steamid)
if not steamid.is_valid() or not steamid.type == EType.Individual:
echo('Invalid steam ID')
return 1
user = User(steamid64=steamid.as_64, admin=admin)
user.refresh_name()
if user.name is not None:
db.session.add(user)
db.session.commit()
echo('added ' + user.name)
else:
echo('No such steam user')
return 1 | 5,358,682 |
def get_vocabularies():
"""
Return the currently used ontology
:return:
"""
vocabs = vocs.get_vocabularies()
vocabs = [(x, url_for('get_vocabulary', vid=x, _external=True)) for x in vocabs]
response = make_response(json.dumps(dict(vocabs)))
response.headers['Content-Type'] = 'application/json'
return response | 5,358,683 |
def test_handle_transient_files(transient_files, transient_files_contents, transient_files_cids, expected_output):
"""Check the parsing of transient files
Given:
- Files names (as a string)
- Files contents (as a string)
- Files cids (as a string)
When:
- Parsing the data for transient files creation
Then:
- Create the dictionary for files creation
"""
assert handle_transient_files(transient_files, transient_files_contents, transient_files_cids) == expected_output | 5,358,684 |
def test_port_stripper_invalid_protocol():
"""Test the port stripper for using invalid protocol"""
_, _, valid = _port_stripper("127.0.0.1:8080", protocol='IPv9')
assert valid is False | 5,358,685 |
def plot_feature_importance(sorted_series_features, title_str):
""" Plot feature importance from tree models """
sns.set()
plt.figure()
sorted_series_features.plot(kind = 'barh', color = 'blue')
plt.title(title_str)
plt.xlabel('Importance')
plt.ylabel('Feature')
plt.tight_layout(pad=2.0, w_pad=5.0, h_pad=1.0)
plt.show() | 5,358,686 |
def do3byte(*args):
"""do3byte(ea_t ea, asize_t length) -> bool"""
return _idaapi.do3byte(*args) | 5,358,687 |
def test_simple_seed_only(driver, function_store):
"""
Simple integration test w/ a seed dataset only. This is the most simple way to create a cube.
"""
df = pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v": [10, 11, 12, 13]})
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
result = driver(data=df, cube=cube, store=function_store)
assert set(result.keys()) == {cube.seed_dataset}
ds = list(result.values())[0]
ds = ds.load_all_indices(function_store())
assert ds.uuid == cube.ktk_dataset_uuid(cube.seed_dataset)
assert len(ds.partitions) == 2
assert set(ds.indices.keys()) == {"p", "x"}
assert isinstance(ds.indices["p"], PartitionIndex)
assert isinstance(ds.indices["x"], ExplicitSecondaryIndex)
assert ds.table_name == SINGLE_TABLE | 5,358,688 |
def compute_sap(ground_truth_data,
representation_function,
random_state,
num_train=gin.REQUIRED,
num_test=gin.REQUIRED,
batch_size=16,
continuous_factors=gin.REQUIRED):
"""Computes the SAP score.
Args:
ground_truth_data: GroundTruthData to be sampled from.
representation_function: Function that takes observations as input and
outputs a dim_representation sized representation for each observation.
random_state: Numpy random state used for randomness.
num_train: Number of points used for training.
num_test: Number of points used for testing discrete variables.
batch_size: Batch size for sampling.
continuous_factors: Factors are continuous variable (True) or not (False).
Returns:
Dictionary with SAP score.
"""
logging.info("Generating training set.")
mus, ys = utils.generate_batch_factor_code(
ground_truth_data, representation_function, num_train,
random_state, batch_size)
mus_test, ys_test = utils.generate_batch_factor_code(
ground_truth_data, representation_function, num_test,
random_state, batch_size)
logging.info("Computing score matrix.")
score_matrix = _compute_score_matrix(mus, ys, mus_test,
ys_test, continuous_factors)
# Score matrix should have shape [num_latents, num_factors].
assert score_matrix.shape[0] == mus.shape[0]
assert score_matrix.shape[1] == ys.shape[0]
scores_dict = {}
scores_dict["SAP_score"] = _compute_avg_diff_top_two(score_matrix)
logging.info("SAP score: %.2g", scores_dict["SAP_score"])
return scores_dict | 5,358,689 |
def process_request(ctx: click.Context, results: Sequence[Any], **kwargs: Any) -> None:
"""
All click commands finished, start any jobs necessary
"""
options: Options = ctx.obj
if options.exit:
return
config = options.config
contexts = resolve_contexts(config)
if options.python:
context_versions = [context.python_version for context in contexts]
matched_versions = version_match(context_versions, options.python)
contexts = [
context
for context in contexts
if context.python_version in matched_versions
]
print(f"runtimes: {[str(ctx.python_version) for ctx in contexts]}")
job_names = options.jobs
if not job_names:
if config.default:
job_names.extend(config.default)
else:
ctx.invoke(list_commands)
ctx.exit(1)
print(f"will run: {job_names!r}")
jobs = resolve_jobs(job_names, config)
run(jobs, contexts, config)
if options.benchmark:
click.echo("\nbenchmark timings:\n------------------")
for timing in get_timings():
click.echo(f" {timing}") | 5,358,690 |
def clean_cmd(cmd):
"""Removes multiple spaces and whitespace at beginning or end of command.
Args:
cmd (str): A string containing the command to clean.
Returns:
A cleaned command string.
"""
return re.sub(r'\s{2, }', ' ', cmd).strip(' \t\n\r') | 5,358,691 |
def multiply_scalar(mat, value):
""" Multiplies every element in the matrix by the given scalar value.
Args:
mat (Matrix): The input matrix.
value (int or float): The number that mat will be multipled by.
Returns:
Matrix: The resulting matrix from the multiplication of mat and value.
"""
mat_1d = flatten(mat.data)
result = [x * value for x in mat_1d]
return reshape(Matrix(result), mat.shape()) | 5,358,692 |
def bin4D(data4D, bin_factor):
"""
Bin 4D data in spectral dimensions
Parameters
----------
data4D: ndarray of shape (4,4)
the first two dimensions are Fourier
space, while the next two dimensions
are real space
bin_factor: int
Value by which to bin data
Returns
-------
binned_data: ndarray of shape (4,4)
Data binned in the spectral dimensions
Notes
-----
The data is binned in the first two dimensions - which are
the Fourier dimensions using the internal numba functions
`resizer2D_numbaopt` and `resizer1D_numbaopt`
See Also
--------
resizer1D_numbaopt
resizer2D_numbaopt
"""
data4D_flat = np.reshape(
data4D, (data4D.shape[0], data4D.shape[1], data4D.shape[2] * data4D.shape[3])
)
datashape = np.asarray(data4D_flat.shape)
res_shape = np.copy(datashape)
res_shape[0:2] = np.round(datashape[0:2] / bin_factor)
data4D_res = np.zeros(res_shape.astype(int), dtype=data4D_flat.dtype)
resampled_x = np.zeros((datashape[0], res_shape[1]), data4D_flat.dtype)
resampled_f = np.zeros(res_shape[0:2], dtype=data4D_flat.dtype)
for zz in range(data4D_flat.shape[-1]):
data4D_res[:, :, zz] = resizer2D_numbaopt(
data4D_flat[:, :, zz], resampled_x, resampled_f, bin_factor
)
binned_data = np.reshape(
data4D_res,
(resampled_f.shape[0], resampled_f.shape[1], data4D.shape[2], data4D.shape[3]),
)
return binned_data | 5,358,693 |
def test_life_cycle(err_workbench):
"""Test basic behavior of ErrorsPlugin.
"""
plugin = err_workbench.get_plugin(ERRORS_ID)
assert len(plugin.errors) == 4
plugin._errors_handlers.contributions = {}
assert len(plugin.errors) == 0
plugin.stop()
assert not len(plugin.errors) | 5,358,694 |
def lorentz(x, a, mu, ga):
""" Input: x - value and a=I, mu=x_0, ga - lorentz f. coeffitients (float)
Return: value of function with desired parameters in x (float)
Descr.: Calculate L-type function for given x and parameters"""
return (a * ga ** 2) / ((x - mu) ** 2 + ga ** 2) | 5,358,695 |
def main(arguments):
"""
if you call this then it will create and return the thunder obj for you
:param arguments: a thunder object or a dicitonary to initialise the thunder obj
:return:
"""
thunder = Thunder(deepcopy(arguments)) # load object
return thunder | 5,358,696 |
def scriptSaveAs():
"""scriptSaveAs(filename=None, overwrite=-1) -> None
Saves the current script with the given file name if supplied, or (in GUI mode) asks the user for one using the file chooser. If Nuke is not running in GUI mode, you must supply a filename.
@param filename: Saves the current script with the given file name if supplied, or (in GUI mode) asks the user for one using the file chooser.
@param overwrite: If 1 (true) always overwrite; if 0 (false) never overwrite; otherwise, in GUI mode ask the user, in terminal do same as False. Default is -1, meaning 'ask the user'."""
pass | 5,358,697 |
def remove_existing_furigana(tree: ET.ElementTree, parent_map: dict):
"""
Replace all existing ruby elements by their text, e.g., <ruby>X<rt>Y</rt></ruby> becomes X.
"""
elems = tree.findall(f'.//{NAMESPACE}ruby')
for elem in elems:
# Remove all the <rt> children, e.g., the readings, but keep the text from other childs
childs_text = []
for child in list(elem):
if not child.tag.endswith("rt") and not child.tag.endswith("rp"):
text = (child.text or "") + (child.tail or "")
else:
text = child.tail or ""
childs_text.append(text)
elem.remove(child)
# Replacing the node the its text, childs text and tail
new_text = (elem.text or "") + "".join(childs_text) + (elem.tail or "")
parent_elem = parent_map[elem]
# Find the previous child to append our new text to it
idx = list(parent_elem).index(elem)
if idx == 0:
# If our element was the first child, append to parent node's text
parent_elem.text = (parent_elem.text or "") + new_text
else:
# Otherwise, append to the tail of previous children
previous_elem = parent_elem[idx - 1]
previous_elem.tail = (previous_elem.tail or "") + new_text
# Finally, remove our ruby element from its parent
parent_elem.remove(elem) | 5,358,698 |
def make_train_func(
model,
loss_func,
optimizer,
dtype=None,
device=None,
call_model=None,
get_train_loss=None,
):
"""Create a train func for ``ignite``.
This function assumes that each batch is of the form ``(x, y)`` with no assumptions placed on ``x`` or ``y``.
Each batch will be transformed into a ``torch.Tensor``.
:param model:
the model to optimize, it will be called with the features
:param loss_func:
the loss function to optimize, it will be called with the
model output and the targets. The return value of the loss
function must be compatible with ``get_train_loss``.
:param optimizer:
the optimizer to use
:param dtype:
the dtype of the batch, can be a structured object, e.g., a tuple of dtypes
:param device:
the device to assign to the batch, can be a structured object, e.g.,
a tuple of devices
:param call_model:
instead of calling the model directly, ``call_model(model, x)`` it will be used.
If not given a default implementation is used, that passes ``x`` as varargs if it is a tuple,
as keyword args if it is a dict and directly otherwise.
:param get_train_loss:
The output of ``loss_func`` will be passed through ``get_train_loss`` before calling backward.
If not given a default implementation is used that takes the first item of the loss if it is a tuple
and the loss directly otherwise.
"""
def default_get_train_loss(loss):
return loss[0] if isinstance(loss, tuple) else loss
if call_model is None:
call_model = default_call_model
if get_train_loss is None:
get_train_loss = default_get_train_loss
def train_func(engine, batch):
x, y = n2t(batch, dtype=dtype, device=device)
optimizer.zero_grad()
pred = call_model(model, x)
loss = loss_func(pred, y)
train_loss = get_train_loss(loss)
train_loss.backward()
optimizer.step()
return t2n(loss)
return train_func | 5,358,699 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.