content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def is_modified(filename: str) -> bool:
"""
Given a filename return if it has been modified
"""
global new_hashes
global old_hashes
if filename in old_hashes.keys():
if old_hashes[filename] == new_hashes[filename]:
return False
return True | 2,200 |
def _is_url_without_path_query_or_fragment(url_parts):
"""
Determines if a URL has a blank path, query string and fragment.
:param url_parts: A URL.
:type url_parts: :class:`urlparse.ParseResult`
"""
return url_parts.path.strip('/') in ['', 'search'] and url_parts.query == '' \
and url_parts.fragment == '' | 2,201 |
def delay_waterfall(uvp, blpairs, spw, pol, component='abs-real',
average_blpairs=False, fold=False, delay=True,
deltasq=False, log=True, lst_in_hrs=True,
vmin=None, vmax=None, cmap='YlGnBu', axes=None,
figsize=(14, 6), force_plot=False, times=None,
title_type='blpair', colorbar=True, **kwargs):
"""
Plot a 1D delay spectrum waterfall (or spectra) for a group of baselines.
Parameters
----------
uvp : UVPspec
UVPSpec object, containing delay spectra for a set of baseline-pairs,
times, polarizations, and spectral windows.
blpairs : list of tuples or lists of tuples
List of baseline-pair tuples, or groups of baseline-pair tuples.
spw, pol : int or str
Which spectral window and polarization to plot.
component : str
Component of complex spectra to plot, options=['abs', 'real', 'imag', 'abs-real', 'abs-imag'].
abs-real is abs(real(data)), whereas 'real' is real(data)
Default: 'abs-real'.
average_blpairs : bool, optional
If True, average over the baseline pairs within each group.
fold : bool, optional
Whether to fold the power spectrum in :math:`|k_\parallel|`.
Default: False.
delay : bool, optional
Whether to plot the power spectrum in delay units (ns) or cosmological
units (h/Mpc). Default: True.
deltasq : bool, optional
If True, plot dimensionless power spectra, Delta^2. This is ignored if
delay=True. Default: False.
log : bool, optional
Whether to plot the log10 of the data. Default: True.
lst_in_hrs : bool, optional
If True, LST is plotted in hours, otherwise its plotted in radians.
vmin, vmax : float, optional
Clip the color scale of the delay spectrum to these min./max. values.
If None, use the natural range of the data. Default: None.
cmap : str, optional
Matplotlib colormap to use. Default: 'YlGnBu'.
axes : array of matplotlib.axes, optional
Use this to pass in an existing Axes object or array of axes, which
the power spectra will be added to. (Warning: Labels and legends will
not be altered in this case, even if the existing plot has completely different axis
labels etc.) If None, a new Axes object will be created. Default: None.
figsize : tuple
len-2 integer tuple specifying figure size if axes is None
force_plot : bool
If plotting a large number of blpairs (>20), this routine will quit
unless force_plot == True.
times : array_like, optional
Float ndarray containing elements from time_avg_array to plot.
title_type : str, optional
Type of title to put above plot(s). Options = ['blpair', 'blvec']
blpair : "bls: {bl1} x {bl2}"
blvec : "bl len {len} m & ang {ang} deg"
colorbar : bool, optional
Whether to make a colorbar. Default: True
kwargs : keyword arguments
Additional kwargs to pass to ax.matshow()
Returns
-------
fig : matplotlib.pyplot.Figure
Matplotlib Figure instance if input ax is None.
"""
import matplotlib
import matplotlib.pyplot as plt
# assert component
assert component in ['real', 'abs', 'imag', 'abs-real', 'abs-imag'], "Can't parse specified component {}".format(component)
fix_negval = component in ['real', 'imag'] and log
# Add ungrouped baseline-pairs into a group of their own (expected by the
# averaging routines)
blpairs_in = blpairs
blpairs = [] # Must be a list, not an array
for i, blpgrp in enumerate(blpairs_in):
if not isinstance(blpgrp, list):
blpairs.append([blpairs_in[i],])
else:
blpairs.append(blpairs_in[i])
# iterate through and make sure they are blpair integers
_blpairs = []
for blpgrp in blpairs:
_blpgrp = []
for blp in blpgrp:
if isinstance(blp, tuple):
blp_int = uvp.antnums_to_blpair(blp)
else:
blp_int = blp
_blpgrp.append(blp_int)
_blpairs.append(_blpgrp)
blpairs = _blpairs
# Select times if requested
if times is not None:
uvp = uvp.select(times=times, inplace=False)
# Average over blpairs or times if requested
blpairs_in = copy.deepcopy(blpairs) # Save input blpair list
if average_blpairs:
uvp_plt = uvp.average_spectra(blpair_groups=blpairs,
time_avg=False, inplace=False)
else:
uvp_plt = copy.deepcopy(uvp)
# Fold the power spectra if requested
if fold:
uvp_plt.fold_spectra()
# Convert to Delta^2 units if requested
if deltasq and not delay:
uvp_plt.convert_to_deltasq()
# Get x-axis units (delays in ns, or k_parallel in Mpc^-1 or h Mpc^-1)
if delay:
dlys = uvp_plt.get_dlys(spw) * 1e9 # ns
x = dlys
else:
k_para = uvp_plt.get_kparas(spw)
x = k_para
# Extract power spectra into array
waterfall = odict()
for blgrp in blpairs:
# Loop over blpairs in group and plot power spectrum for each one
for blp in blgrp:
# make key
key = (spw, blp, pol)
# get power data
power = uvp_plt.get_data(key, omit_flags=False)
# set flagged power data to nan
flags = np.isclose(uvp_plt.get_integrations(key), 0.0)
power[flags, :] = np.nan
# get component
if component == 'abs':
power = np.abs(power)
elif component == 'real':
power = np.real(power)
elif component == 'abs-real':
power = np.abs(np.real(power))
elif component == 'imag':
power = np.imag(power)
elif component == 'abs-imag':
power = np.abs(np.real(power))
# if real or imag and log is True, set negative values to near zero
# this is done so that one can use cmap.set_under() and cmap.set_bad() separately
if fix_negval:
power[power < 0] = np.abs(power).min() * 1e-6 + 1e-10
# assign to waterfall
waterfall[key] = power
# If blpairs were averaged, only the first blpair in the group
# exists any more (so skip the rest)
if average_blpairs: break
# check for reasonable number of blpairs to plot...
Nkeys = len(waterfall)
if Nkeys > 20 and force_plot == False:
raise ValueError("Nblps > 20 and force_plot == False, quitting...")
# Take logarithm of data if requested
if log:
for k in waterfall:
waterfall[k] = np.log10(np.abs(waterfall[k]))
logunits = "\log_{10}"
else:
logunits = ""
# Create new Axes if none specified
new_plot = False
if axes is None:
new_plot = True
# figure out how many subplots to make
Nkeys = len(waterfall)
Nside = int(np.ceil(np.sqrt(Nkeys)))
fig, axes = plt.subplots(Nside, Nside, figsize=figsize)
# Ensure axes is an ndarray
if isinstance(axes, matplotlib.axes._subplots.Axes):
axes = np.array([[axes]])
if isinstance(axes, list):
axes = np.array(axes)
# Ensure its 2D and get side lengths
if axes.ndim == 1:
axes = axes[:, None]
assert axes.ndim == 2, "input axes must have ndim == 2"
Nvert, Nhorz = axes.shape
# Get LST range: setting y-ticks is tricky due to LST wrapping...
y = uvp_plt.lst_avg_array[
uvp_plt.key_to_indices(list(waterfall.keys())[0])[1] ]
y = np.unwrap(y)
if y[0] > np.pi:
# if start is closer to 2pi than 0, lower axis by an octave
y -= 2 * np.pi
if lst_in_hrs:
lst_units = "Hr"
y *= 24 / (2 * np.pi)
else:
lst_units = "rad"
# get baseline vectors
blvecs = dict(zip([uvp_plt.bl_to_antnums(bl) for bl in uvp_plt.bl_array], uvp_plt.get_ENU_bl_vecs()))
# Sanitize power spectrum units
psunits = uvp_plt.units
if "h^-1" in psunits: psunits = psunits.replace("h^-1", "h^{-1}")
if "h^-3" in psunits: psunits = psunits.replace("h^-3", "h^{-3}")
if "Hz" in psunits: psunits = psunits.replace("Hz", r"{\rm Hz}")
if "str" in psunits: psunits = psunits.replace("str", r"\,{\rm str}")
if "Mpc" in psunits and "\\rm" not in psunits:
psunits = psunits.replace("Mpc", r"{\rm Mpc}")
if "pi" in psunits and "\\pi" not in psunits:
psunits = psunits.replace("pi", r"\pi")
if "beam normalization not specified" in psunits:
psunits = psunits.replace("beam normalization not specified",
r"{\rm unnormed}")
# Iterate over waterfall keys
keys = list(waterfall.keys())
k = 0
for i in range(Nvert):
for j in range(Nhorz):
# set ax
ax = axes[i, j]
# turn off subplot if no more plots to make
if k >= Nkeys:
ax.axis('off')
continue
# get blpair key for this subplot
key = keys[k]
blp = uvp_plt.blpair_to_antnums(key[1])
# plot waterfall
cax = ax.matshow(waterfall[key], cmap=cmap, aspect='auto',
vmin=vmin, vmax=vmax,
extent=[x[0], x[-1], y[-1], y[0]], **kwargs)
# ax config
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(labelsize=12)
if ax.get_title() == '':
if title_type == 'blpair':
ax.set_title("bls: {} x {}".format(*blp), y=1)
elif title_type == 'blvec':
blv = 0.5 * (blvecs[blp[0]] + blvecs[blp[1]])
lens, angs = utils.get_bl_lens_angs([blv], bl_error_tol=1.0)
ax.set_title("bl len {len:0.2f} m & {ang:0.0f} deg".format(len=lens[0], ang=angs[0]), y=1)
# set colorbar
if colorbar:
if fix_negval:
cb_extend = 'min'
else:
cb_extend = 'neither'
cbar = ax.get_figure().colorbar(cax, ax=ax, extend=cb_extend)
cbar.ax.tick_params(labelsize=14)
if fix_negval:
cbar.ax.set_title("$< 0$",y=-0.05, fontsize=16)
# configure left-column plots
if j == 0:
# set yticks
ax.set_ylabel(r"LST [{}]".format(lst_units), fontsize=16)
else:
ax.set_yticklabels([])
# configure bottom-row plots
if k + Nhorz >= Nkeys:
if ax.get_xlabel() == "":
if delay:
ax.set_xlabel(r"$\tau$ $[{\rm ns}]$", fontsize=16)
else:
ax.set_xlabel("$k_{\parallel}\ h\ Mpc^{-1}$", fontsize=16)
else:
ax.set_xticklabels([])
k += 1
# make suptitle
if axes[0][0].get_figure()._suptitle is None:
if deltasq:
units = "$%s\Delta^2$ $[%s]$" % (logunits, psunits)
else:
units = "$%sP(k_\parallel)$ $[%s]$" % (logunits, psunits)
spwrange = np.around(np.array(uvp_plt.get_spw_ranges()[spw][:2]) / 1e6, 2)
axes[0][0].get_figure().suptitle("{}\n{} polarization | {} -- {} MHz".format(units, pol, *spwrange),
y=1.03, fontsize=14)
# Return Axes
if new_plot:
return fig | 2,202 |
def delete_kind_cluster(name):
"""Delete a kind cluster from config."""
config = get_config()
config.remove_section("kind.{}".format(name))
if config.get("kind", "current-cluster", fallback=None):
config.set("kind", "current-cluster", "")
write_config(config) | 2,203 |
def wgs84_distance(lat1, lon1, lat2, lon2):
"""Distance (in meters) between two points in WGS84 coord system."""
dLat = math.radians(lat2 - lat1)
dLon = math.radians(lon2 - lon1)
a = (math.sin(dLat / 2) * math.sin(dLat / 2) +
math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) *
math.sin(dLon / 2) * math.sin(dLon / 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = EARTH_RADIUS * c
return d | 2,204 |
def init(vagrant=False):
"""Prepare a local machine for development."""
install_requirements()
local('createdb %(project_name)s' % env) # create postgres database
manage('migrate') | 2,205 |
def optimize_acq_func(acq_func: AcquisitionFunction, bounds=None, options=None):
"""Optimizes the acquisition function"""
# optimize
candidates, _ = optimize_acqf(
acq_function=acq_func,
bounds=bounds,
q=1,
num_restarts=20,
raw_samples=512,
options=options,
)
new_x = candidates.detach()
return new_x | 2,206 |
def _recover_distributor(lb_id):
"""Get cached Distributor object or generate from ovs external_ids
{
'dist-lb-id': lb_id,
'dist-vip': vip,
'dist-size': size,
'dist-status': status,
'dist-mac': mac,
'dist-hash-fields': field-list,
'dist-ofport': ofport, # of external iface
'slot-100': 'amphora_id,mac',
'slot-101': 'amphora_id,mac',
'slot-...': 'amphora_id,mac',
}
"""
if _provision_state.state == DISTRIBUTOR_BOOTING:
msg = _('Error while recovering loadbalancer %(lb)s.'
' Server status is %(status)s'
) % dict(lb=lb_id, status=_provision_state.state)
LOG.error(msg)
raise DistributorUsageError(msg)
if lb_id in _distributors:
return _distributors[lb_id]
ret, out, err = _run_vsctl(
VSCTL_FIND_EXTERNAL_ID.format(key='dist-lb-id',
value=lb_id),
extra_args=[VSCTL_JSON_FORMAT])
if ret != 0:
msg = _('Error while recovering loadbalancer %(lb)s.'
' Find failed with exit_status=%(ret)d'
'\nsterr=%(err)s'
) % dict(lb=lb_id, ret=ret, err=err)
LOG.error(msg)
_provision_state.go_error(msg)
raise DistributorFatalError(msg)
# ovs json is a nested [tpye, value] list
# br_list = {'data': [[br_name,
# ['map',
# [['dist-lb-id', lb_id],
# ['dist-vip', vip],
# ['dist-size', size],
# ['dist-status', status],
# ['dist-mac', mac],
# ['dist-hash-fields', field-list],
# ['dist-ofport', ofport],
# ['slot-100', amphora_id,mac],
# ['slot-101', amphora_id,mac],
# ['slot-...', amphora_id,mac]]]]]
# 'headings': ['name', 'external_ids']}
try:
br_list = json.loads(out)
br_name = br_list['data'][0][0]
br_properties = dict(br_list['data'][0][1][1])
except (ValueError, KeyError, IndexError, TypeError):
msg = _('Error while recovering loadbalancer %(lb)s.'
' Could not parse find results %(out)s.'
) % dict(lb=lb_id, out=out)
LOG.error(msg)
_provision_state.go_error(msg)
raise DistributorFatalError(msg)
found_id = br_properties.pop('dist-lb-id', None)
if lb_id != found_id or len(br_list['data']) != 1:
msg = _('Error while recovering loadbalancer %(lb)s. None or'
' duplicate bridge found. out=%(out)s'
) % dict(lb=lb_id, out=br_list)
LOG.error(msg)
return None
# one error type for all property parsing issues, catch all
# expected errors
try:
vip = netaddr.IPAddress(br_properties.pop('dist-vip'))
size = int(br_properties.pop('dist-size'))
status = br_properties.pop('dist-status')
assert status in (ONLINE, DEGRADED, ERROR, NO_MONITOR)
mac = netaddr.EUI(br_properties.pop('dist-mac'),
dialect=netaddr.mac_unix)
iface = _interface_by_mac(mac)
hash_selection_fields = br_properties.pop(
'dist-hash-fields').split(',')
ofport = int(br_properties.pop('dist-ofport'))
except (AssertionError, KeyError, ValueError, UnicodeDecodeError,
AddrFormatError, TypeError, IndexError,
NotImplementedError, AddrConversionError, StopIteration):
# we have a bridge name so we should try to delete it
ret, out, err = _run_vsctl(VSCTL_DEL_BR.format(br_name))
killed = 'killed' if ret == 0 else 'kill failed: stderr=%s' % err
msg = _('Error while recovering loadbalancer %(lb)s.'
' bad bridge properties %(props)s.'
' Killing bridge %(kill_msg)s'
) % dict(lb=lb_id, props=br_properties, kill_msg=killed)
LOG.error(msg)
raise DistributorInstanceError(msg)
distributor = _Distributor(name=br_name, lb_id=lb_id, vip=vip,
mac=mac, iface=iface, size=size)
for slot in range(DST_GROUPS_OFFSET, DST_GROUPS_OFFSET + size):
slot_key = SLOT_KEY_FORMAT.format(slot)
if slot_key in br_properties:
amphora_id, amphora_mac = br_properties[slot_key].split(',')
# mac = netaddr.EUI(amphora_mac, dialect=netaddr.mac_unix)
distributor.destinations[amphora_id] = slot, amphora_mac
else:
distributor.free_slots.add(slot)
distributor.hash_selection_fields = hash_selection_fields
distributor.fail = (ERROR == status)
distributor.ofport = ofport
_distributors[lb_id] = distributor
return distributor | 2,207 |
def rprecision_score(
y_true, y_pred, ratio: float = 1.0, negative_class=-1,
zero_division: Literal["warn", 0, 1] = "warn"
):
"""Calculate r-precision score for multiclass classification.
The variables y_true and y_pred are the true and predicted labels
respectively. The variable ratio defines the expected number of
samples in the negative class relative to the foreground class.
See the paper:
T. Wang, "High Precision Open-World Website Fingerprinting," in
2020 IEEE Symposium on Security and Privacy (SP), Los Alamitos,
CA, USA, 2020, pp. 231–246, doi: 10.1109/SP.2020.00015.
for more information.
"""
# pylint: disable=too-many-locals
logger = logging.getLogger(__name__)
pos_labels = (y_true != negative_class)
pos_predictions = (y_pred != negative_class)
n_true_positive = np.sum(pos_labels & (y_true == y_pred))
logger.debug("n_true_positive: %d", n_true_positive)
# Positive predictions which were not correct for positive classes
n_wrong_positive = np.sum(pos_labels & pos_predictions & (y_true != y_pred))
n_false_positive = np.sum(~pos_labels & pos_predictions)
logger.debug("n_wrong_positive: %d, n_false_positive: %d",
n_wrong_positive, n_false_positive)
n_positive = np.sum(pos_labels)
n_negative = len(y_true) - n_positive
logger.debug("n_positive: %d, n_negative: %d", n_positive, n_negative)
true_positive_rate = n_true_positive / n_positive
wrong_positive_rate = n_wrong_positive / n_positive
false_positive_rate = n_false_positive / n_negative
if n_true_positive == n_wrong_positive == n_false_positive == 0:
if zero_division == "warn":
warnings.warn("Attempted division by zero in rprecision. "
"Returning 0 instead.", RuntimeWarning)
zero_division = 0
return zero_division
logger.debug("r_%d-precision = %.3g / (%.3g + %.3g + %d * %.3g)",
ratio, true_positive_rate, true_positive_rate,
wrong_positive_rate, ratio, false_positive_rate)
return true_positive_rate / (
true_positive_rate + wrong_positive_rate + ratio * false_positive_rate) | 2,208 |
def export_gpkg(dataframes, gpkg_path):
"""Receives a dictionary of pandas dataframes and exports them as geopackage layers."""
# Create gpkg from template if it doesn't already exist.
if not os.path.exists(gpkg_path):
copy(os.path.abspath("../data/empty.gpkg"), gpkg_path)
# Export target dataframes to GeoPackage layers.
try:
for name, gdf in dataframes.items():
logger.info("Writing to GeoPackage {}, layer={}.".format(gpkg_path, name))
# Spatial data.
if "geometry" in dir(gdf):
# Open GeoPackage.
with fiona.open(gpkg_path, "w", layer=name, driver="GPKG", crs=gdf.crs,
schema=gpd.io.file.infer_schema(gdf)) as gpkg:
# Write to GeoPackage.
gpkg.writerecords(gdf.iterfeatures())
# Tabular data.
else:
# Create sqlite connection.
con = sqlite3.connect(gpkg_path)
# Write to GeoPackage.
gdf.to_sql(name, con)
# Insert record into gpkg_contents metadata table.
con.cursor().execute("insert into 'gpkg_contents' ('table_name', 'data_type') values "
"('{}', 'attributes');".format(name))
# Commit and close db connection.
con.commit()
con.close()
logger.info("Successfully exported layer.")
except (ValueError, fiona.errors.FionaValueError):
logger.exception("ValueError raised when writing GeoPackage layer.")
sys.exit(1) | 2,209 |
def setup_dispatcher(dp):
"""
Adding handlers for events from Telegram
"""
# commands
dp.add_handler(CommandHandler("start", commands.command_start))
dp.add_handler(CommandHandler("help", commands.command_help))
# admin & mod commands
dp.add_handler(CommandHandler("admin", admin.admin_command))
dp.add_handler(CommandHandler("bot_stats", admin.bot_user_stats))
dp.add_handler(CommandHandler(f"{broadcast_command[1:]}", broadcast_command_with_message))
dp.add_handler(CommandHandler('add_mod', admin.add_moderator))
dp.add_handler(CommandHandler('remove_mod', admin.remove_moderator))
# conversations
pass
# callback queries
dp.add_handler(CallbackQueryHandler(broadcast_decision_handler, pattern=f"^{CONFIRM_DECLINE_BROADCAST}"))
return dp | 2,210 |
def get_version():
"""Gets the current version"""
_version_re = re.compile(r"__VERSION__\s+=\s+(.*)")
with open("leaked/__init__.py", "rb") as init_file:
version = str(ast.literal_eval(_version_re.search(
init_file.read().decode("utf-8")).group(1)))
return version | 2,211 |
def dir_keys(path):
"""A function to take a path, and return a list of all the numbers in the path. This is
mainly used for sorting
by the parameters they contain"""
regex = '[-+]?[0-9]+(?:\.[0-9]+)?(?:[eE][-+]?[0-9]+)?' # matching any floating point
m = re.findall(regex, path)
if(m): val = m
else: raise ValueError('Your path does not contain any numbers')
val = list(map(float,val))
return val | 2,212 |
def generate_data(n=5, T=1000, random_state=None, initial_data=None):
"""
Parameter
---------
n : int
number of variables
T : int
number of samples
random_state : int
seed for np.random.seed
initial_data : list of np.ndarray
dictionary of initial datas
"""
T_spurious = 20
expon = 1.5
if initial_data is None:
permutation = np.random.permutation(n)
value = np.random.uniform(low=0.05, high=0.5, size=(n, n))
sign = np.random.choice([-1, 1], size=(n, n))
B0 = np.multiply(value, sign)
B0 = np.multiply(B0, np.random.binomial(1, 0.4, size=(n, n)))
B0 = np.tril(B0, k=-1)
B0 = B0[permutation][:, permutation]
value = np.random.uniform(low=0.05, high=0.5, size=(n, n))
sign = np.random.choice([-1, 1], size=(n, n))
B1 = np.multiply(value, sign)
B1 = np.multiply(B1, np.random.binomial(1, 0.4, size=(n, n)))
causal_order = np.empty(len(permutation))
causal_order[permutation] = np.arange(len(permutation))
causal_order = causal_order.astype(int)
else:
B0 = initial_data['B0']
B1 = initial_data['B1']
causal_order =initial_data['causal_order']
M1 = np.dot(np.linalg.inv(np.eye(n) - B0), B1);
ee = np.empty((n, T + T_spurious))
for i in range(n):
ee[i, :] = np.random.normal(size=(1, T + T_spurious));
ee[i, :] = np.multiply(np.sign(ee[i, :]), abs(ee[i, :]) ** expon);
ee[i, :] = ee[i, :] - np.mean(ee[i, :]);
ee[i, :] = ee[i, :] / np.std(ee[i, :]);
std_e = np.random.uniform(size=(n,)) + 0.5
nn = np.dot(np.dot(np.linalg.inv(np.eye(n) - B0), np.diag(std_e)), ee);
xx = np.zeros((n, T + T_spurious))
xx[:, 0] = np.random.normal(size=(n, ));
for t in range(1, T + T_spurious):
xx[:, t] = np.dot(M1, xx[:, t - 1]) + nn[:, t];
data = xx[:, T_spurious + 1 : T_spurious + T];
return data.T, B0, B1, causal_order | 2,213 |
def _get_paragraphs(paragraphs: List[str]) -> List[str]:
"""
Returns the paragraphs of an article's body, annotated with HTML tags.
Args:
paragraphs (:obj:`List[str]`):
List of strings denoting paragraphs.
Returns:
:obj:`List[str]`:
List of paragraphs annotated with HTML tags.
"""
paragraphs = [_add_html_tag(paragraph, 'p') for paragraph in paragraphs if not re.findall('trends.embed.renderExploreWidget', paragraph)]
return paragraphs | 2,214 |
def calculate_kde(
ascending: bool = True,
evaluate: bool = False,
input_ts="-",
columns=None,
start_date=None,
end_date=None,
clean=False,
skiprows=None,
index_type="datetime",
source_units=None,
target_units=None,
names=None,
):
"""Return the kernel density estimation (KDE) curve."""
tsd = tsutils.common_kwds(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
start_date=start_date,
end_date=end_date,
pick=columns,
source_units=source_units,
target_units=target_units,
clean=clean,
)
if len(tsd.columns) > 1:
raise ValueError(
tsutils.error_wrapper(
"""
Right now "calculate_kde" only support one time-series at a time.
You gave {}.
""".format(
tsd.columns
)
)
)
from scipy.stats import gaussian_kde
tmptsd = tsd.dropna()
ndf = tmptsd.sort_values(tmptsd.columns[0], ascending=ascending)
gkde = gaussian_kde(ndf.iloc[:, 0])
if evaluate is True:
y = gkde.evaluate(tmptsd.iloc[:, 0])
ndf = pd.DataFrame(y, index=tmptsd.index)
else:
y = gkde.evaluate(ndf.iloc[:, 0])
ndf = pd.DataFrame(y)
return ndf | 2,215 |
def EPmulk(a, da, k):
"""
C = A * k
"""
return a * k, np.absolute(da * k) | 2,216 |
def addDeterminants(iterative_interactions, version, options=None):
"""
The iterative pKa scheme. Later it is all added in 'calculateTotalPKA'
"""
# --- setup ---
iteratives = []
done_residue = []
#debug.printIterativeDeterminants(iterative_interactions)
# creating iterative objects with references to their real residue counterparts
for interaction in iterative_interactions:
pair = interaction[0]
for residue in pair:
if residue in done_residue:
#print "done already"
""" do nothing - already have an iterative object for this residue """
else:
newIterative = Iterative(residue)
iteratives.append(newIterative)
done_residue.append(residue)
# Initialize iterative scheme
if options.print_iterations == True:
pka_print("\n --- pKa iterations (%d residues, %d interactions) ---" % ( len(iteratives), len(iterative_interactions) ))
converged = False
iteration = 0
for itres in iteratives:
itres.pKa_iter.append(itres.pKa_NonIterative)
# --- starting pKa iterations ---
while converged == False:
# initialize pKa_new
iteration += 1
for itres in iteratives:
itres.determinants = [[], [], []]
itres.pKa_new = itres.pKa_NonIterative
# Adding interactions to temporary determinant container
for interaction in iterative_interactions:
pair = interaction[0]
values = interaction[1]
annihilation = interaction[2]
#print "len(interaction) = %d" % (len(interaction))
object1, object2 = findIterative(pair, iteratives)
Q1 = object1.Q
Q2 = object2.Q
if Q1 < 0.0 and Q2 < 0.0:
""" both are acids """
addIterativeAcidPair(object1, object2, interaction)
elif Q1 > 0.0 and Q2 > 0.0:
""" both are bases """
addIterativeBasePair(object1, object2, interaction)
else:
""" one of each """
addIterativeIonPair(object1, object2, interaction, version)
# Calculating pKa_new values
for itres in iteratives:
for type in range(0,3):
for determinant in itres.determinants[type]:
itres.pKa_new += determinant[1]
# Check convergence
converged = True
for itres in iteratives:
if itres.pKa_new == itres.pKa_old:
itres.converged = True
else:
itres.converged = False
converged = False
# reset pKa_old & storing pKa_new in pKa_iter
for itres in iteratives:
itres.pKa_old = itres.pKa_new
itres.pKa_iter.append(itres.pKa_new)
if iteration == 10:
pka_print("did not converge in %d iterations" % (iteration))
break
# --- Iterations finished ---
# printing pKa iterations
if options.print_iterations == True:
str = "%12s" % (" ")
for index in range(0, iteration+1 ):
str += "%8d" % (index)
pka_print(str)
for itres in iteratives:
str = "%s " % (itres.label)
for pKa in itres.pKa_iter:
str += "%8.2lf" % (pKa)
if itres.converged == False:
str += " *"
pka_print(str)
# creating real determinants and adding them to residue object
for itres in iteratives:
for type in range(0,3):
for interaction in itres.determinants[type]:
value = interaction[1]
if value > 0.005 or value < -0.005:
label = interaction[0]
newDeterminant = Determinant(label, value)
itres.residue.determinants[type].append(newDeterminant) | 2,217 |
def configuration(parent_package='', top_path=None):
"""
A utility function from numpy.distutils.misc_util to compile Fortran and C
codes. This function will be passed to numpy.distutil.core.setup().
"""
config = Configuration(None, parent_package, top_path)
# Define extern directory where external libraries source codes are.
package_name = 'special_functions'
extern_dir_name = '_extern'
extern_dir = os.path.join('.', package_name, extern_dir_name)
macros = []
if sys.platform == 'win32':
macros.append(('_USE_MATH_DEFINES', None))
# amos (fortran library)
config.add_library(
'amos',
sources=[
os.path.join(extern_dir, 'amos', 'mach', '*.f'),
os.path.join(extern_dir, 'amos', 'double_precision', '*.f'),
os.path.join(extern_dir, 'amos', 'single_precision', '*.f')
],
macros=macros)
# cephes (c library)
config.add_library(
'cephes',
sources=[
os.path.join(extern_dir, 'cephes', 'bessel', '*.c'),
os.path.join(extern_dir, 'cephes', 'cprob', '*.c'),
os.path.join(extern_dir, 'cephes', 'eval', '*.c'),
os.path.join(extern_dir, 'cephes', 'cmath', '*.c')
],
include_dirs=[
os.path.join(extern_dir, 'cephes', 'eval')
],
macros=macros)
# If envirinment var "CYTHON_BUILD_IN_SOURCE" exists, cython creates *.c
# files in the source code, otherwise in /build.
cython_build_in_source = os.environ.get('CYTHON_BUILD_IN_SOURCE', None)
if bool(cython_build_in_source):
cython_build_dir = None # builds *.c in source alongside *.pyx files
else:
cython_build_dir = 'build'
# Cythontize *.pyx files to generate *.c files.
extensions = cythonize(
os.path.join('.', package_name, '*.pyx'),
build_dir=cython_build_dir,
include_path=[os.path.join('.', package_name)],
language_level="3",
compiler_directives={
'boundscheck': False,
'cdivision': True,
'wraparound': False,
'nonecheck': False,
'embedsignature': True,
'linetrace': True
})
# Add extensions to config per each *.c file
for extension in extensions:
config.add_extension(
extension.name,
sources=extension.sources,
include_dirs=extension.include_dirs,
libraries=['amos', 'cephes'],
language=extension.language,
define_macros=macros)
# Additional files, particularly, the API files to (c)import (*.pxd, *.py)
config.add_data_files(os.path.join(package_name, '*.pxd')) # cython API
config.add_data_files(os.path.join(package_name, '*.py')) # python API
config.add_data_files((package_name, 'LICENSE.txt'))
config.add_data_files((package_name, 'AUTHORS.txt'))
config.add_data_files((package_name, 'README.rst'))
config.add_data_files((package_name, 'CHANGELOG.rst'))
return config | 2,218 |
def main(args):
"""Main function for adding diffusion error to
a KDE of buoyant density values.
Parameters:
args : dict
See ``diffusion`` subcommand
"""
kde2d = Utils.load_kde(args['<fragment_kde>'])
# creating a diffusion index of guassian distributions
start,stop,step = parse_bin_range(args['--BD_range'])
BD_bins = np.arange(start, stop, step)
start,stop,step = parse_bin_range(args['--len_range'])
len_bins = np.arange(start, stop, step)
diff_index = create_diff_index(BD_bins, len_bins,
method=args['-m'],
B=float(args['-B']),
D=float(args['-D']),
w=float(args['-w']),
r_min=float(args['--r_min']),
r_max=float(args['--r_max']),
t=int(args['-t']),
T=float(args['-T']),
G=float(args['-G']),
M=float(args['-M']))
diff_index_file = args['--index_out']
with open(diff_index_file, 'wb') as outFH:
dill.dump(diff_index, outFH)
# difussion calc in parallel
pfunc = partial(make_kde,
diff_index=diff_index_file,
BD_bins=BD_bins,
len_bins=len_bins,
n = int(args['-n']),
bw_method=args['--bw'])
## pool
pool = ProcessingPool(nodes=int(args['--np']))
if args['--debug']:
KDE_BD = map(pfunc, kde2d.items())
else:
KDE_BD = pool.map(pfunc, kde2d.items())
# pickling output
dill.dump({taxon:KDE for taxon,KDE in KDE_BD}, sys.stdout) | 2,219 |
def load_plugins():
""" Helper function that attempts to load all the plugins """
# provide some info about the env in use
import platform
log.debug("Python %s %s on %s %s (%s)" % (platform.python_version(), platform.architecture()[0],
platform.uname()[0], platform.uname()[2], platform.uname()[4]))
import numpy
log.debug("numpy %s" % numpy.__version__)
log.debug("matplotlib %s" % matplotlib.__version__)
log.debug("wxPython %s" % wx.__version__)
from hdf_compass import compass_model
try:
from hdf_compass import filesystem_model
except ImportError:
log.warning("Filesystem plugin: NOT loaded")
try:
from hdf_compass import array_model
except ImportError:
log.warning("Array plugin: NOT loaded")
try:
from hdf_compass import hdf5_model
import h5py
log.debug("h5py %s" % h5py.__version__)
except ImportError:
log.warning("HDF5 plugin: NOT loaded")
try:
from hdf_compass import bag_model
from hydroffice import bag
from lxml import etree
log.debug("hydroffice.bag %s" % bag.__version__)
log.debug("lxml %s (libxml %s, libxslt %s)"
% (etree.__version__, ".".join(str(i) for i in etree.LIBXML_VERSION),
".".join(str(i) for i in etree.LIBXSLT_VERSION)))
except (ImportError, OSError):
log.warning("BAG plugin: NOT loaded")
try:
from hdf_compass import asc_model
except ImportError:
log.warning("Ascii grid plugin: NOT loaded")
try:
from hdf_compass import opendap_model
from pydap import lib
log.debug("pydap %s (protocol %s)"
% (".".join(str(i) for i in lib.__version__), ".".join(str(i) for i in lib.__dap__)))
except ImportError:
log.warning("Opendap plugin: NOT loaded")
from hdf_compass import hdf5rest_model
try:
from hdf_compass import hdf5rest_model
except ImportError:
log.warning("HDF5 REST plugin: NOT loaded")
try:
from hdf_compass import adios_model
import adios
log.debug("ADIOS %s" % adios.__version__)
except ImportError:
log.warning("ADIOS plugin: NOT loaded") | 2,220 |
def prepare_recent_years():
"""
Splits PUMS data dictionaries for recent years, and creates Values
dictionary JSON files and types json file.
"""
dictionaries = set([])
for year in recent_years:
if year > 2017:
dictionaries.add(year)
elif year > 2012:
dictionaries.add(2013)
for dictionary in dictionaries:
split_original_dictionary(dictionary)
create_values_json(dictionary)
define_data_types() | 2,221 |
def show_plot(pyplt=plt, prompt=''):
"""
Close and display the current plot. Matplotlib wrapper.
This function allows a caller to finish and display a plot
without needing to import the matplotlib library separately.
:Parameters:
pyplt: matplotlib pyplot object, optional
A top level matplotlib pyplot object.
Defaults to the pyplot object imported by the miriplot module.
prompt: str, optional
An optional string, which may be printed when the plot
is displayed. (Program execution may halt until the plot
window is closed.)
"""
if pyplt is not None:
if prompt:
print( prompt )
# Display the current plot and then clear and close the current figure.
pyplt.show()
pyplt.clf()
pyplt.close()
else:
logger.warning("matplotlib.pyplot not available") | 2,222 |
def decrypt_location(location):
"""Decrypts the `location` field in Xiami responses to URL."""
if not location:
return None
rows, url = int(location[:1]), location[1:]
urllen = len(url)
cols_base = urllen // rows # basic column count
rows_ex = urllen % rows # count of rows that have 1 more column
matrix = []
for r in range(rows):
length = cols_base + 1 if r < rows_ex else cols_base
matrix.append(url[:length])
url = url[length:]
url = ''
for i in range(urllen):
url += matrix[i % rows][i // rows]
return parse.unquote(url).replace('^', '0') | 2,223 |
def select_regularization_parameter(n_samples: int = 50, n_evaluations: int = 500):
"""
Using sklearn's diabetes dataset use cross-validation to select the best fitting regularization parameter
values for Ridge and Lasso regressions
Parameters
----------
n_samples: int, default=50
Number of samples to generate
n_evaluations: int, default = 500
Number of regularization parameter values to evaluate for each of the algorithms
"""
# Question 6 - Load diabetes dataset and split into training and testing portions
X, y = datasets.load_diabetes(return_X_y=True)
X_train, y_train, X_test, y_test = X[:n_samples], y[:n_samples], X[n_samples:], y[n_samples:]
# Question 7 - Perform CV for different values of the regularization parameter for Ridge and Lasso regressions
fig = go.Figure()
rig_test_errors = np.zeros(n_evaluations)
rig_train_errors = np.zeros(n_evaluations)
las_test_errors = np.zeros(n_evaluations)
las_train_errors = np.zeros(n_evaluations)
for i in range(n_evaluations):
reg_param = 2*i / n_evaluations
rig_train_errors[i], rig_test_errors[i] = cross_validate(RidgeRegression(reg_param), X_train, y_train, mean_square_error, 5)
las_train_errors[i], las_test_errors[i] = cross_validate(Lasso(reg_param), X_train, y_train, mean_square_error, 5)
x_axis = np.linspace(0, 2, n_evaluations)
fig.add_trace(go.Scatter(x=x_axis, y=rig_train_errors,
mode="lines",name="Ridge Train Error",
line=dict(color="blue", width=2)))
fig.add_trace(go.Scatter(x=x_axis, y=rig_test_errors,
mode="lines",name="Ridge Test Error",
line=dict(color="red", width=2)))
fig.add_trace(go.Scatter(x=x_axis, y=las_train_errors,
mode="lines",name="Lasso Train Error",
line=dict(color="green", width=2)))
fig.add_trace(go.Scatter(x=x_axis, y=las_test_errors,
mode="lines",name="Lasso Test Error",
line=dict(color="orange", width=2)))
fig.update_layout(title="Fitting Ridge and Lasso Regressions",
margin=dict(t=100))
fig.show()
# Question 8 - Compare best Ridge model, best Lasso model and Least Squares model
reg_param_func = lambda x: 2*x / n_evaluations
best_reg_param = reg_param_func(np.argmin(rig_test_errors))
best_lasso_param = reg_param_func(np.argmin(las_test_errors))
reg = RidgeRegression(float(best_reg_param)).fit(X_train, y_train)
lasso = Lasso(best_lasso_param).fit(X_train, y_train)
LS = LinearRegression().fit(X_train, y_train)
print(f"Ridge Regression: {mean_square_error(y_test, reg.predict(X_test))}")
print(f"Lasso Regression: {mean_square_error(y_test, lasso.predict(X_test))}")
print(f"Least Squares: {mean_square_error(y_test, LS.predict(X_test))}") | 2,224 |
def upgrade(db_url: str = DEFAULT_DB, revision='head', cmd_opts=None):
"""Upgrade the given database to revision.
db_url: str [default: 'sqlite:////tmp/ngshare.db']
The SQLAlchemy database url, e.g. `sqlite:///ngshare.db`.
revision: str [default: head]
The alembic revision to upgrade to.
"""
alembic.command.upgrade(get_alembic_config(db_url, cmd_opts), revision) | 2,225 |
def start(sleep: float = 0) -> None:
"""Run MocaVirtualDM in background."""
mzk.sleep(sleep)
mzk.call(
f'nohup {mzk.executable} "{core.TOP_DIR.joinpath("moca.py")}" run &> /dev/null &',
shell=True
) | 2,226 |
def test_should_parse_word2vec_with_single_entry(load_embedding_func, tmp_path):
"""Loading a Word2Vec Embedding should pass for single word"""
# GIVEN
word2vec_path = create_tmp_word_embedding(
tmp_path,
"""
1 2
word 1.0 2.0
""",
)
# WHEN
embedding = load_embedding_func(word2vec_path)
# THEN
assert embedding.get_words() == ["word"]
assert np.array_equal(embedding.get_word_vector("word"), np.array([1.0, 2.0])) | 2,227 |
def sqd_yinfast(samples):
""" compute approximate sum of squared difference
Using complex convolution (fast, cost o(n*log(n)) )"""
# yin_t(tau) = (r_t(0) + r_(t+tau)(0)) - 2r_t(tau)
B = len(samples)
W = B//2
yin = np.zeros(W)
sqdiff = np.zeros(W)
kernel = np.zeros(B)
# compute r_(t+tau)(0)
squares = samples**2
for tau in range(W):
sqdiff[tau] = squares[tau:tau+W].sum()
# add r_t(0)
sqdiff += sqdiff[0]
# compute r_t(tau) using kernel convolution in complex domain
samples_fft = np.fft.fft(samples)
kernel[1:W+1] = samples[W-1::-1] # first half, reversed
kernel_fft = np.fft.fft(kernel)
r_t_tau = np.fft.ifft(samples_fft * kernel_fft).real[W:]
# compute yin_t(tau)
yin = sqdiff - 2 * r_t_tau
return yin | 2,228 |
def get_colours_extend(graph_size, start_set, end_set, source, target, reachable=None):
"""
Get colours for nodes including source and target nodes.
Blue nodes are those in the source set.
Orange nodes are those in the start set, not in the source set.
Green nodes are those reachable from the source that are in target.
Red nodes are those in target that are not reachable from the source.
All other nodes are grey.
"""
# Setup the colours
c = []
if reachable is None:
reachable = end_set
for acc_val in range(graph_size):
if acc_val in start_set:
if acc_val in source:
c.append("dodgerblue")
else:
c.append("darkorange")
elif acc_val in target:
if acc_val in reachable:
c.append("g")
else:
c.append("r")
else:
c.append("gray")
return c | 2,229 |
def init_signals(sig_handler):
"""Set exit handler."""
signal.signal(signal.SIGTERM, sig_handler)
signal.signal(signal.SIGINT, sig_handler) | 2,230 |
def test_re_dg7_re_dg7_v(mode, save_output, output_format):
"""
TEST :branch : base='gMonth', pattern='[123456789]|(10|11|12)',
value='9', type='valid', RULE=''
"""
assert_bindings(
schema="msData/regex/reDG7.xsd",
instance="msData/regex/reDG7.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 2,231 |
def suspend_circuit():
"""
Suspends the circuits for some seconds, allowing the user to exit the house without playing the song.
"""
circuit.suspend()
return render_template("suspend.html", seconds=EXIT_HOUSE_TIMER, name=get_guest_name()) | 2,232 |
def get_scalar_data_from_path(udatapath, name='pressure', x0=0, x1=None, y0=0, y1=None, z0=0, z1=None,
t0=0, t1=None, inc=1, frame=None, return_xy=False, verbose=True,
slicez=None, crop=None, mode='r',
reverse_x=False, reverse_y=False, reverse_z=False):
"""
Returns a scalar data from a path of udata
... There could be a case that a scalar data such as temperature and pressure is also stored in udata.h5
... This function serves as a reader of such a quantity
If return_xy is True, it returns udata, xx(2d grid), yy(2d grid)
Parameters
----------
udatapath: str, a path to udata
name: str, name of the dataset in the udata h5
x0: int
x1: int
y0: int
y1: int
t0: int
t1: int
inc: int
time increment of data to load from udatapath, default: 1
frame: array-like or int, default: None
If an integer is given, it returns a velocity field at that instant of time
If an array or a list is given, it returns a velocity field at the given time specified by the array/list.
By default, it loads data by a specified increment "inc".
If "frame" is given, it is prioritized over the incremental loading.
return_xy: bool, defualt: False
verbose: bool
If True, return the time it took to load udata to memory
Returns
-------
pdata, (optional- xx, yy, zz(if 3D)
"""
f = h5py.File(udatapath, 'r')
keys = list(f.keys())
f.close()
###
if not name in keys:
raise ValueError('%s does not exist in the given path' % name)
else:
if verbose:
tau0 = time_mod.time()
print('... reading %s from the path' % name)
if crop is not None and [x0, x1, y0, y1, z0, z1] == [0, None, 0, None, 0, None]:
x0, x1, y0, y1, z0, z1 = crop, -crop, crop, -crop, crop, -crop
if mode == 'w' or mode == 'wb':
raise ValueError('... w was passed to h5Py.File(...) which would delete the file if it exists. \n'
'Probably, this is not what you want. Pass r for read-only')
with h5py.File(udatapath, 'r') as f:
if 'z' in f.keys():
dim = 3
else:
dim = 2
if dim == 2:
if frame is None:
pdata = f[name][y0:y1, x0:x1, t0:t1:inc]
else:
frame = np.asarray(frame)
pdata = f[name][y0:y1, x0:x1, frame]
if return_xy:
xx, yy = f['x'][y0:y1, x0:x1], f['y'][y0:y1, x0:x1]
elif dim == 3:
if frame is None and slicez is None:
pdata = f[name][y0:y1, x0:x1, z0:z1, t0:t1:inc]
elif frame is None and slicez is not None:
pdata = f[name][y0:y1, x0:x1, slicez, t0:t1:inc]
elif frame is not None and slicez is not None:
frame = np.asarray(frame)
pdata = f[name][y0:y1, x0:x1, slicez, frame]
else:
frame = np.asarray(frame)
pdata = f[name][y0:y1, x0:x1, z0:z1, frame]
if return_xy:
if slicez is None:
xx, yy, zz = f['x'][y0:y1, x0:x1, z0:z1], f['y'][y0:y1, x0:x1, z0:z1], f['z'][y0:y1, x0:x1,
z0:z1]
else:
xx, yy, zz = f['x'][y0:y1, x0:x1, slicez], f['y'][y0:y1, x0:x1, slicez], f['z'][0, 0, slicez]
tau1 = time_mod.time()
if verbose:
print('... time took to load udata in sec: ', tau1 - tau0)
if return_xy:
if dim == 2:
if reverse_x:
pdata[...] = pdata[:, ::-1, :]
xx[...] = xx[:, ::-1]
yy[...] = yy[:, ::-1]
if reverse_y:
pdata[...] = pdata[:, ::-1, :, :]
xx[...] = xx[::-1, :]
yy[...] = yy[::-1, :]
return pdata, xx, yy
elif dim == 3:
if reverse_x:
pdata[...] = pdata[:, ::-1, :, :]
xx[...] = xx[:, ::-1, :]
yy[...] = yy[:, ::-1, :]
zz[...] = zz[:, ::-1, :]
if reverse_y:
pdata[...] = pdata[::-1, :, :, :]
xx[...] = xx[::-1, :, :]
yy[...] = yy[::-1, :, :]
zz[...] = zz[::-1, :, :]
if reverse_z:
pdata[...] = pdata[:, :, ::-1, :]
xx[...] = xx[:, :, ::-1]
yy[...] = yy[:, :, ::-1]
zz[...] = zz[:, :, ::-1]
return pdata, xx, yy, zz
else:
return pdata | 2,233 |
def create_transformed_df(old_df, elem_list, features_list):
"""elem_list should be in type list"""
from statistics import mean
new_dict = {}
for index, elems in zip(old_df.index, old_df[elem_list]):
for elem in elems:
if elem in new_dict.keys():
for j, feature in enumerate(features_list):
new_dict[elem][j].append(float(old_df.loc[index, feature]))
else:
new_dict[elem] = [[] for i in range(len(features_list))]
for j, feature in enumerate(features_list):
new_dict[elem][j].append(float(old_df.loc[index, feature]))
headers = [elem_list]
for i in features_list:
headers.append(f'avg_movie_{i}')
headers.append('number_of_movies') ##? how to name?
new_df = pd.DataFrame(columns=headers)
for key in new_dict:
row = []
row.append(key)
for i, col in enumerate(headers[1:-1]):
mean_val = mean(new_dict[key][i])
row.append(mean_val)
num = len(new_dict[key][0])
row.append(num)
length = len(new_df)
new_df.loc[length] = row
return new_df | 2,234 |
def sem_id_semester_get(semester, obs_id):
"""
retrieves all the sem_id associated with an observer for the semester.
:param semester: semester id
:type semester: str
:param obs_id: observer id
:type obs_id: int
:rtype: List[str]
"""
semester_list = []
sem_ids = utils.get_proposal_ids(obs_id)
for semid in sem_ids:
if semester in semid:
semester_list.append(semid)
return semester_list | 2,235 |
def getLabels (dataMatrix, classOfInterest):
"""
Gets labels on a per class basis that will inputted to the randomForest function
Parameters
----------
dataMatrix : anndata object
The data file of interest
classOfInterest : str
The class you will split the data by in the set of dataMatrix.obs
Returns
-------
labelsDict : dict
Dictionary with labels for each class
"""
dataMatrix = filterNormalize (dataMatrix, classOfInterest)
labelsDict = {}
for label in np.unique(dataMatrix.obs[classOfInterest]):
lists = []
for obs in dataMatrix.obs[classOfInterest]:
if obs == label:
lists.append('A')
else:
lists.append('B')
labelsDict[label] = lists #this is usually in line w if and else
return labelsDict | 2,236 |
def publish(path: Path = Path(config.PACKAGE_CONFIG)) -> None:
"""Upload a pacakge to the package index"""
if not path.is_file():
typer.echo(f"{config.PACKAGE_CONFIG} not found")
raise typer.Abort()
contents = json.loads(path.read_text())
try:
contents = validate_database_json(contents)
package_version_source_path = contents["source"][0]
package_version = contents["version"]
package_handle, _, partial_package_name = contents["name"].partition("/")
assert Path(package_version_source_path).is_file()
except Exception as exc:
typer.echo(f"Error while validating {config.PACKAGE_CONFIG}")
typer.echo(exc)
raise typer.Abort()
email_address = prompt.email_address()
password = prompt.password()
anon_client: Client = create_client(config.get_url(), config.get_anon_key())
try:
session = anon_client.auth.sign_in(email=email_address, password=password)
except APIError as exc:
typer.echo(exc.msg)
raise typer.Abort()
# Using access token as apiKey because of the (incorrect) way headers are set for storage client
storage_client = (
create_client(config.get_url(), session.access_token)
.storage()
.StorageFileAPI(id_=config.PACKAGE_VERSION_BUCKET)
)
storage_object_name = storage_package_version_key(
package_handle=package_handle,
partial_name=partial_package_name,
version=package_version,
)
storage_resp = storage_client.upload(
path=storage_object_name,
file=package_version_source_path,
)
if storage_resp.status_code != 200:
typer.echo(storage_resp.json()["message"])
raise typer.Abort()
headers = {
"authorization": f"Bearer {session.access_token}",
"apiKey": config.get_anon_key(),
}
base_url = config.get_url() + "/rest/v1/"
resp = httpx.post(
base_url + "rpc/publish_package_version",
headers=headers,
json={"body": contents, "object_name": storage_object_name},
)
if resp.status_code != 200:
typer.echo(storage_resp.json()["message"])
raise typer.Abort()
typer.echo(f"Successfully published pacakge {contents['name']}") | 2,237 |
def load_file(file_location):
"""
Opens a given file and returns its contents.
:param str file_location: The absolute path to the file
:rtype: str
:return: The contents of the file
"""
with open(file_location, 'r') as file_contents:
contents = file_contents.read()
return contents | 2,238 |
def create_bam(data, args):
"""
aligner and conversion to BAM file
"""
workdir = safe_makedir("align")
sample = data['name']
# workdir = op.join("align", sample)
data['final_bam'] = _align(data['trimmed'], sample, op.abspath(workdir),
args.index, args.is_directional, args.bowtie2,
args.reference, data['config'])
data['order_bam'] = data['final_bam']
return data | 2,239 |
def calculateStorageLocationsDistance(D_loc: pd.DataFrame, input_loccodex: float,
input_loccodey: float, output_loccodex: float,
output_loccodey: float) -> pd.DataFrame:
"""
calculate the sum of the rectangular distances from
Input point -> physical location -> Output point
Args:
D_loc (pd.DataFrame): Input location DataFrame.
input_loccodex (float): Input X coordinate.
input_loccodey (float): Input Y coordinate.
output_loccodex (float): Output X coordinate.
output_loccodey (float): Output Y coordinate.
Returns:
D_loc (TYPE): DESCRIPTION.
"""
D_loc = D_loc.dropna(subset=['LOCCODEX', 'LOCCODEY'])
D_loc['INPUT_DISTANCE'] = np.abs(input_loccodex - D_loc['LOCCODEX']) + np.abs(input_loccodey - D_loc['LOCCODEY'])
D_loc['OUTPUT_DISTANCE'] = np.abs(output_loccodex - D_loc['LOCCODEX']) + np.abs(output_loccodey - D_loc['LOCCODEY'])
return D_loc | 2,240 |
def join(words, sep = ' '):
"""join(list [,sep]) -> string
Return a string composed of the words in list, with
intervening occurrences of sep. The default separator is a
single space.
(joinfields and join are synonymous)
"""
return sep.join(words) | 2,241 |
def lstsq_with_smoothness_prior(data:ArrayLike) -> np.ndarray:
""" not finished,
Parameters:
-----------
Returns:
--------
Reference:
----------
[1]. Sameni, Reza. "Online Filtering Using Piecewise Smoothness Priors: Application to Normal and Abnormal Electrocardiogram Denoising." Signal Processing 133.C (2017): 52-63. Web.
"""
raise NotImplementedError | 2,242 |
def pickle(obj):
""" Creates a serialization of the provided object
Serialization is done by :mod:`pickle` module. If :mod:`cPickle` package is
available, that package will be used instead, yielding a gain in speed.
Parameters
----------
obj: :obj:`obj`
Object to be serialized.
Returns
-------
pickle: :obj:`pickle.pickle`
Serialized version of the provided object. """
return codecs.encode(pkl.dumps(obj), "base64").decode() | 2,243 |
def calc_E_E_AP_d_t(n_p):
"""1 時間当たりの家電の消費電力量
Args:
n_p(float): 仮想居住人数 仮想居住人数
Returns:
ndarray: 1 時間当たりの家電の消費電力量
"""
schedule = load_schedule()
schedule_app = get_schedule_app(schedule)
if 1 <= n_p and n_p <= 2:
E_E_AP_1_d_t = get_E_E_AP_p_d_t(1, schedule_app)
E_E_AP_2_d_t = get_E_E_AP_p_d_t(2, schedule_app)
return E_E_AP_1_d_t * (2 - n_p) / (2 - 1) + E_E_AP_2_d_t * (n_p - 1) / (2 - 1)
elif 2 <= n_p and n_p <= 3:
E_E_AP_2_d_t = get_E_E_AP_p_d_t(2, schedule_app)
E_E_AP_3_d_t = get_E_E_AP_p_d_t(3, schedule_app)
return E_E_AP_2_d_t * (3 - n_p) / (3 - 2) + E_E_AP_3_d_t * (n_p - 2) / (3 - 2)
elif 3 <= n_p and n_p <= 4:
E_E_AP_3_d_t = get_E_E_AP_p_d_t(3, schedule_app)
E_E_AP_4_d_t = get_E_E_AP_p_d_t(4, schedule_app)
return E_E_AP_3_d_t * (4 - n_p) / (4 - 3) + E_E_AP_4_d_t * (n_p - 3) / (4 - 3)
else:
raise ValueError(n_p) | 2,244 |
def Squeeze_forward(op: Operation, values: List[torch.Tensor], ctx: TorchBackendContext = None, **kwargs) -> torch.Tensor:
"""
Remove single-dimensional entries from the shape of a tensor.
Takes an input axes with a list of axes to squeeze.
If axes is not provided, all the single dimensions will be removed from the shape.
If an axis is selected with shape entry not equal to one, an error is raised.
Inputs (1 - 2)
data (differentiable) : T
Tensors with at least max(dims) dimensions.
axes (optional, non-differentiable) : tensor(int64)
List of integers indicating the dimensions to squeeze.
Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data).
Outputs
squeezed (differentiable) : T
Reshaped tensor with same data as input.
Args:
op (Operation): [description]
input_values (List[torch.Tensor]): [description]
Returns:
torch.Tensor: [description]
"""
ASSERT_ALL_TENSORS_AT_SAME_DEVICE(op=op, values=values)
ASSERT_NUM_OF_INPUT(op=op, values=values, min_num_of_input=1, max_num_of_input=2)
[squeezing_tensor], axes = values, GET_ATTRIBUTE_FROM_OPERATION(op=op, attribute='axes', compulsive=True)
if isinstance(axes, list):
for squeezing_dim in sorted(axes, reverse=True):
squeezing_tensor = torch.squeeze(squeezing_tensor, squeezing_dim)
elif isinstance(axes, int):
squeezing_tensor = torch.squeeze(squeezing_tensor, axes)
else: raise TypeError(f'Parameter axes of operation {op.name} misunderstood, '
f'expect int value of list of int, while {type(axes)} was given.')
return squeezing_tensor | 2,245 |
def rm_empty_dir(path):
"""
Remove the directory `path` if it is a directory and empty.
If the directory does not exist or is not empty, do nothing.
"""
try:
os.rmdir(path)
except OSError: # directory might not exist or not be empty
pass | 2,246 |
async def test_validation_event(
loop, bus: lightbus.path.BusPath, dummy_api, mocker, worker: Worker
):
"""Check validation happens when firing an event"""
bus.client.register_api(dummy_api)
config = Config.load_dict({"apis": {"default": {"validate": True, "strict_validation": True}}})
bus.client.config = config
mocker.patch("jsonschema.validate", autospec=True)
async def co_listener(*a, **kw):
pass
await bus.client.schema.add_api(dummy_api)
await bus.client.schema.save_to_bus()
await bus.client.schema.load_from_bus()
bus.client.listen_for_event("my.dummy", "my_event", co_listener, listener_name="test")
async with worker(bus):
await asyncio.sleep(0.1)
await bus.my.dummy.my_event.fire_async(field="Hello")
await asyncio.sleep(0.001)
# Validate gets called
jsonschema.validate.assert_called_with(
{"field": "Hello"},
{
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"additionalProperties": False,
"properties": {"field": {"type": "string"}},
"required": ["field"],
"title": "Event my.dummy.my_event parameters",
},
) | 2,247 |
def format_test_output(test_name, test_res, H0_unit_root=True):
"""
Helper function to format output. Return a dictionary with specific keys. Will be used to
construct the summary data frame for all unit root tests.
TODO: Add functionality of choosing based on the max lag order specified by user.
:param test_name: name of the test
:param test_res: object that contains corresponding test information. Can be None if test failed.
:param H0_unit_root: does the null hypothesis of the test assume a unit root process? Some tests do (ADF),
some don't (KPSS).
:return: dictionary of summary table for all tests and final decision on stationary vs non-stationary.
If test failed (test_res is None), return empty dictionary.
"""
# Check if the test failed by trying to extract the test statistic
if test_name in ('ADF', 'KPSS'):
try:
test_res['statistic']
except BaseException:
test_res = None
else:
try:
test_res.stat
except BaseException:
test_res = None
if test_res is None:
return {}
# extract necessary information
if test_name in ('ADF', 'KPSS'):
statistic = test_res['statistic']
crit_val = test_res['critical']['5%']
p_val = test_res['pval']
lags = test_res['resstore'].usedlag if test_name == 'ADF' else test_res['lags']
else:
statistic = test_res.stat
crit_val = test_res.critical_values['5%']
p_val = test_res.pvalue
lags = test_res.lags
if H0_unit_root:
H0 = 'The process is non-stationary'
stationary = "yes" if p_val < 0.05 else "not"
else:
H0 = 'The process is stationary'
stationary = "yes" if p_val > 0.05 else "not"
out = {
'test_name': test_name,
'statistic': statistic,
'crit_val': crit_val,
'p_val': p_val,
'lags': int(lags),
'stationary': stationary,
'Null Hypothesis': H0
}
return out | 2,248 |
def build_dataset(instruction_dicts,
dataset_from_file_fn,
shuffle_files=False,
parallel_reads=64):
"""Constructs a `tf.data.Dataset` from TFRecord files.
Args:
instruction_dicts: `list` of {'filepath':, 'mask':, 'offset_mask':}
containing the information about which files and which examples to use.
The boolean mask will be repeated and zipped with the examples from
filepath.
dataset_from_file_fn: function returning a `tf.data.Dataset` given a
filename.
shuffle_files: `bool`, Whether to shuffle the input filenames.
parallel_reads: `int`, how many files to read in parallel.
Returns:
`tf.data.Dataset`
"""
# First case: All examples are taken (No value skipped)
if _no_examples_skipped(instruction_dicts):
# Only use the filenames as instruction
instruction_ds = tf.data.Dataset.from_tensor_slices([
d["filepath"] for d in instruction_dicts
])
build_ds_from_instruction = dataset_from_file_fn
# Second case: Use the instructions to read the examples
else:
instruction_ds = _build_instruction_ds(instruction_dicts)
build_ds_from_instruction = functools.partial(
_build_ds_from_instruction,
ds_from_file_fn=dataset_from_file_fn,
)
# If shuffle is True, we shuffle the instructions/shards
if shuffle_files:
instruction_ds = instruction_ds.shuffle(len(instruction_dicts))
# Use interleave to parallel read files and decode records
ds = instruction_ds.interleave(
build_ds_from_instruction,
cycle_length=parallel_reads,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return ds | 2,249 |
def downloader(url: str, local_path: str, tracker: ProgressTracker, chunk_size: int):
"""
Download the file pointed at by the URL to the local path.
:param url: The URL of the file to be downloaded.
:param local_path: The local name of the file to be downloaded
:param tracker: Tracks information about the progress of the download.
:param chunk_size: The size of downloaded data to copy to memory before saving to disk.
:return:
"""
try:
with requests.get(url, stream=True) as resp:
resp.raise_for_status()
with open(local_path, "wb") as fp:
for chunk in resp.iter_content(chunk_size=chunk_size):
if tracker.stop_downloader.is_set():
logger.info("Download ended early!")
return
elif chunk:
fp.write(chunk)
chunk_size = len(chunk)
tracker.update(chunk_size)
logger.debug(f"chunk size: {chunk_size}")
except (requests.HTTPError, OSError) as ex:
tracker.error = ex
logger.exception(f"Download Failed for {url}")
finally:
tracker.stop_updater.set() | 2,250 |
def _SetRunOptionInRequest(run_option, run_schedule, request, messages):
"""Returns request with the run option set."""
if run_option == 'manual':
arg_utils.SetFieldInMessage(
request,
'googleCloudDatacatalogV1alpha3Crawler.config.adHocRun',
messages.GoogleCloudDatacatalogV1alpha3AdhocRun())
elif run_option == 'scheduled':
scheduled_run_option = arg_utils.ChoiceToEnum(
run_schedule,
(messages.GoogleCloudDatacatalogV1alpha3ScheduledRun
.ScheduledRunOptionValueValuesEnum))
arg_utils.SetFieldInMessage(
request,
'googleCloudDatacatalogV1alpha3Crawler.config.scheduledRun.scheduledRunOption',
scheduled_run_option)
return request | 2,251 |
def test_rank_closest():
"""test if phoneme-inventory is ranked correctly
according to feature vectore distance to a given phoneme"""
# set up custom class, create instance of it
class EtymMonkeyrank_closest:
def __init__(self):
self.phoneme_inventory, self.dm_called_with = None, []
self.dm_return = iter([1, 0, 2])
def distance_measure(self, *args):
arglist = [*args]
self.dm_called_with.append(arglist)
return next(self.dm_return)
mocketym = EtymMonkeyrank_closest()
# assert exception and exception message
with raises(InventoryMissingError) as inventorymissingerror_mock:
Etym.rank_closest(
self=mocketym,
ph="d",
howmany=float("inf"),
inv=None)
assert str(inventorymissingerror_mock.value
) == "define phoneme inventory or forms.csv"
# set up2: mock pick_minmax
with patch("loanpy.helpers.pick_minmax") as pick_minmax_mock:
pick_minmax_mock.return_value = ["b", "a", "c"]
# assert
assert Etym.rank_closest(
self=mocketym, ph="d", inv=[
"a", "b", "c"]) == "b, a, c"
# assert calls
assert mocketym.dm_called_with == [['d', 'a'], ['d', 'b'], ['d', 'c']]
pick_minmax_mock.assert_called_with(
[('a', 1), ('b', 0), ('c', 2)], float("inf"))
# set up3: overwrite mock class instance, mock pick_minmax anew
mocketym = EtymMonkeyrank_closest()
with patch("loanpy.helpers.pick_minmax") as pick_minmax_mock:
pick_minmax_mock.return_value = ["b", "a"]
# assert pick_minmax picks mins correctly again
assert Etym.rank_closest(
self=mocketym, ph="d", inv=[
"a", "b", "c"], howmany=2) == "b, a"
# assert calls
assert mocketym.dm_called_with == [['d', 'a'], ['d', 'b'], ['d', 'c']]
pick_minmax_mock.assert_called_with([('a', 1), ('b', 0), ('c', 2)], 2)
# set up4: check if phoneme inventory can be accessed through self
mocketym = EtymMonkeyrank_closest()
mocketym.phoneme_inventory = ["a", "b", "c"]
with patch("loanpy.helpers.pick_minmax") as pick_minmax_mock:
pick_minmax_mock.return_value = "b"
# assert pick_minmax picks mins correctly again
assert Etym.rank_closest(
self=mocketym,
ph="d",
inv=None,
howmany=1) == "b"
# assert calls
assert mocketym.dm_called_with == [['d', 'a'], ['d', 'b'], ['d', 'c']]
pick_minmax_mock.assert_called_with([('a', 1), ('b', 0), ('c', 2)], 1)
# tear down
del mocketym, EtymMonkeyrank_closest | 2,252 |
def create_virtual_machine(module, azure):
"""
Create new virtual machine
module : AnsibleModule object
azure: authenticated azure ServiceManagementService object
Returns:
True if a new virtual machine was created, false otherwise
"""
name = module.params.get('name')
hostname = module.params.get('hostname') or name + ".cloudapp.net"
endpoints = module.params.get('endpoints').split(',')
ssh_cert_path = module.params.get('ssh_cert_path')
user = module.params.get('user')
password = module.params.get('password')
location = module.params.get('location')
role_size = module.params.get('role_size')
storage_account = module.params.get('storage_account')
image = module.params.get('image')
virtual_network_name = module.params.get('virtual_network_name')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
# Check if a deployment with the same name already exists
cloud_service_name_available = azure.check_hosted_service_name_availability(name)
if not cloud_service_name_available.result:
changed = False
else:
changed = True
# Create cloud service if necessary
try:
result = azure.create_hosted_service(service_name=name, label=name, location=location)
_wait_for_completion(azure, result, wait_timeout, "create_hosted_service")
except WindowsAzureError as e:
module.fail_json(msg="failed to create the new service name, it already exists: %s" % str(e))
# Create linux configuration
disable_ssh_password_authentication = not password
linux_config = LinuxConfigurationSet(hostname, user, password, disable_ssh_password_authentication)
# Add ssh certificates if specified
if ssh_cert_path:
fingerprint, pkcs12_base64 = get_ssh_certificate_tokens(module, ssh_cert_path)
# Add certificate to cloud service
result = azure.add_service_certificate(name, pkcs12_base64, 'pfx', '')
_wait_for_completion(azure, result, wait_timeout, "add_service_certificate")
# Create ssh config
ssh_config = SSH()
ssh_config.public_keys = PublicKeys()
authorized_keys_path = u'/home/%s/.ssh/authorized_keys' % user
ssh_config.public_keys.public_keys.append(PublicKey(path=authorized_keys_path, fingerprint=fingerprint))
# Append ssh config to linux machine config
linux_config.ssh = ssh_config
# Create network configuration
network_config = ConfigurationSetInputEndpoints()
network_config.configuration_set_type = 'NetworkConfiguration'
network_config.subnet_names = []
network_config.public_ips = None
for port in endpoints:
network_config.input_endpoints.append(ConfigurationSetInputEndpoint(name='TCP-%s' % port,
protocol='TCP',
port=port,
local_port=port))
# First determine where to store disk
today = datetime.date.today().strftime('%Y-%m-%d')
disk_prefix = u'%s-%s' % (name, name)
media_link = u'http://%s.blob.core.windows.net/vhds/%s-%s.vhd' % (storage_account, disk_prefix, today)
# Create system hard disk
os_hd = OSVirtualHardDisk(image, media_link)
# Spin up virtual machine
try:
result = azure.create_virtual_machine_deployment(service_name=name,
deployment_name=name,
deployment_slot='production',
label=name,
role_name=name,
system_config=linux_config,
network_config=network_config,
os_virtual_hard_disk=os_hd,
role_size=role_size,
role_type='PersistentVMRole',
virtual_network_name=virtual_network_name)
_wait_for_completion(azure, result, wait_timeout, "create_virtual_machine_deployment")
except WindowsAzureError as e:
module.fail_json(msg="failed to create the new virtual machine, error was: %s" % str(e))
try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
return (changed, urlparse(deployment.url).hostname, deployment)
except WindowsAzureError as e:
module.fail_json(msg="failed to lookup the deployment information for %s, error was: %s" % (name, str(e))) | 2,253 |
def calcCumulOverlap(modes1, modes2, array=False):
"""Returns cumulative overlap of modes in *modes2* with those in *modes1*.
Returns a number of *modes1* contains a single :class:`.Mode` or a
:class:`.Vector` instance. If *modes1* contains multiple modes, returns an
array. Elements of the array correspond to cumulative overlaps for modes
in *modes1* with those in *modes2*. If *array* is **True**, returns an array
of cumulative overlaps. Returned array has the shape ``(len(modes1),
len(modes2))``. Each row corresponds to cumulative overlaps calculated for
modes in *modes1* with those in *modes2*. Each value in a row corresponds
to cumulative overlap calculated using upto that many number of modes from
*modes2*."""
overlap = calcOverlap(modes1, modes2)
if array:
return np.sqrt(np.power(overlap, 2).sum(axis=overlap.ndim-1))
else:
return np.sqrt(np.power(overlap, 2).cumsum(axis=overlap.ndim-1)) | 2,254 |
def apply_ntimes(func, n, args, verbose=True, timeout=None):
"""
Applies `n` times the function `func` on `args` (useful if, eg, `func` is partly random).
Parameters
----------
func : function
func must be pickable, see https://docs.python.org/2/library/pickle.html#what-can-be-pickled-and-unpickled .
n : int
args : any
timeout : int or float
If given, the computation is cancelled if it hasn't returned a result before `timeout` seconds.
Returns
-------
type
Result of the computation of func(iter).
"""
pool = multiprocessing.Pool()
multiple_results = [pool.apply_async(func, args) for _ in range(n)]
pool.close()
return [res.get(timeout) for res in tqdm(multiple_results, desc='# castor.parallel.apply_ntimes', disable = True)] | 2,255 |
def travel_time_without_Rebalancing(tnet, i, j, exo=0):
"""
evalute the travel time function for edge i->j
Parameters
----------
tnet: transportation network object
i: starting node of edge
j: ending node of edge
Returns
-------
float
"""
return sum(
[tnet.fcoeffs[n] * ((tnet.G_supergraph[i][j]['flowNoRebalancing'] +exo )/ tnet.G_supergraph[i][j]['capacity']) ** n for n in range(len(tnet.fcoeffs))]) | 2,256 |
def CleanDatanode(vm):
"""Delete Hadoop data from 'vm'."""
vm.RemoteCommand('rm -rf {0}'.format(
posixpath.join(vm.GetScratchDir(), 'hadoop'))) | 2,257 |
def crawl_mean_temp_for_dates():
"""Get mean temperature for dates."""
# TODO | 2,258 |
def twistless(*args):
"""
Wraps the entry point function, this function should setup and run a
twisted reactor.
A twisted task will be created to constantly schedule other stackless
tasklets as often as the timesched argument.
"""
def _twistless(func):
"""
Wrap the given function
"""
@wraps(func)
def wrapped(*args, **kwargs):
"""
Calls the wrapped function in a stackless tasklet and sets up a
looping twisted task to pump the schedueler.
"""
@wraps(func)
def execute():
"""
Execute the entry point and create a looping call.
"""
from .utils import REACTASK as reactor_tasklet
reactor_tasklet = sl.getcurrent()
task.LoopingCall(sl.schedule).start(timesched)
func(*args, **kwargs)
sl.tasklet(execute)()
sl.run()
return wrapped
# Add the timeshed arg if it is not given.
if len(args) == 1 and callable(args[0]):
timesched = DEFAULT_TIMESCHED
return _twistless(args[0])
else:
timesched = args[0] if len(args) >= 1 else DEFAULT_TIMESCHED
return _twistless | 2,259 |
def enhance_with_function(images, labels, ratio, enhance_func):
"""
:param images:
:param labels:
:param ratio: the ratio of max input class. for example, highest sample count is 1000, ratio is 3, the result
will be around 1000 * 3 * how_many_classes
:param enhance_func the func used for enhance f(image, label, how_many_to_generate)
:return: new genrated features and labels
"""
inputs_per_class = numpy.bincount(labels)
max_inputs = numpy.max(inputs_per_class)
# One Class
for i in range(len(inputs_per_class)):
input_ratio = math.ceil((max_inputs * ratio - inputs_per_class[i]) / inputs_per_class[i])
print("generating class:{} with ratio:{}, max input:{}, current:{}".format(
i, input_ratio, max_inputs, inputs_per_class[i]))
if input_ratio <= 1:
continue
new_features = []
new_labels = []
mask = numpy.where(labels == i)
for feature in images[mask]:
generated_images = enhance_func(feature, input_ratio)
for generated_image in generated_images:
new_features.append(generated_image)
new_labels.append(i)
images = numpy.append(images, new_features, axis=0)
labels = numpy.append(labels, new_labels, axis=0)
return images, labels | 2,260 |
async def port_create(
request: Request,
server_id: int,
port: PortCreate,
db=Depends(get_db),
user=Depends(get_current_active_admin),
):
"""
Create a new port on server
"""
db_port = create_port(db, server_id, port)
trigger_tc(db_port)
return db_port | 2,261 |
def main():
"""First function to be called"""
# Clear the screen using module function.
clear_screen_module.clear_screen()
print("This script prints absolute paths of all files in current directory.\n")
current_dir = os.getcwd()
print(f"Current directory: {current_dir}\n")
print("Files in current dir are as below with their absolute paths,\n")
# Call function to list absolute paths of files.
list_abs_path_of_files(current_dir)
print("\nAll files are listed above.\n")
return None | 2,262 |
def test_CursorDB_str(data) -> None:
"""Testing CursorDB ``__str__`` datamethod."""
db = CursorDB(data)
assert (
db.__str__()
== "CursorDB(aa=[], bb=['cc'], cc=['bb'], ddddd=['ffffff'], ffffff=['ddddd'])"
) | 2,263 |
def rectangle_area(base, height):
"""Returns the area of a rectangle"""
base = float(base)
height = float(height)
if (base < 0.0 or height < 0.0):
raise ValueError('Negative numbers are not allowed')
return base * height | 2,264 |
def create_update_stack_set(stack_set_name, stack_set_accounts):
"""
Creates stack set with the specified accounts
:param stack_set_name: Name of CloudFormation StackSet to create/update
:param stack_set_accounts: Accounts were stackset instances should be created
:return:
"""
try:
logger.info("Retrieving ServiceNow API credentials from Secret Manager")
creds = json.loads(get_secret(servicenow_creds))
except Exception as e:
logger.error(f"Error retrieving servicenow credentials from secret manager")
raise e
resp = None
stack_set_exists = True
## Check if stack set exists
try:
logger.info(f"Checking if a Stack Set exists with name: {stack_set_name}")
resp = cloudformation.describe_stack_set(
StackSetName=stack_set_name
)
logger.debug(f"Describe stack set resp: {json.dumps(resp)}")
except cloudformation.exceptions.StackSetNotFoundException as e:
logger.info(f"Can't find StackSet with name {stack_set_name}")
stack_set_exists = False
except Exception as e:
logger.error(f"Error while looking up StackSet with name {stack_set_name}, Error: {str(e)}")
raise e
if not stack_set_exists:
client = boto3.client("sts")
master_account_id = client.get_caller_identity()["Account"]
stack_set_params = [
{
'ParameterKey': 'pMasterAccountId',
'ParameterValue': master_account_id
},
{
'ParameterKey': 'pExternalId',
'ParameterValue': servicenow_role_external_id
}
]
if enable_servicenow_cloudwatch_intg:
optional_params = [
{
'ParameterKey': 'pEnableCloudWatchAlarmIntegration',
'ParameterValue': 'yes'
},
{
'ParameterKey': 'pServiceNowUrl',
'ParameterValue': parse_url(servicenow_url).hostname
},
{
'ParameterKey': 'pServiceNowEventUserName',
'ParameterValue': creds['username']
},
{
'ParameterKey': 'pServiceNowEventUserPassword',
'ParameterValue': creds['password']
}
]
stack_set_params.extend(optional_params)
logger.info(f"Creating Stack Set {stack_set_name} ...")
template_url = "https://{}.s3.amazonaws.com/{}".format(stack_template_bucket, stack_template_file)
logger.info(template_url)
try:
resp = cloudformation.create_stack_set(
StackSetName=stack_set_name,
Description=stack_set_description,
TemplateURL=template_url,
Parameters=stack_set_params,
AdministrationRoleARN="arn:aws:iam::{}:role/service-role/{}".format(master_account_id, STACK_SET_ADMIN_ROLE),
ExecutionRoleName=STACK_SET_EXECUTION_ROLE,
Capabilities=[
"CAPABILITY_NAMED_IAM"
]
)
except cloudformation.exceptions.NameAlreadyExistsException as e:
logger.info(f"StackSet creation failed as another StackSet with that name already exits")
raise e
except cloudformation.exceptions.LimitExceededException as e:
logger.info(f"StackSet creation failed due to execeding cloudformation API limit")
raise e
except Exception as e:
raise e
logger.info(f"StackSet creation was successfull")
logger.info(f"Creating Stack Instances for Accounts {stack_set_accounts}")
if stack_set_accounts:
try:
create_stack_instances(stack_set_name, stack_set_accounts, stack_set_regions)
except Exception as e:
logger.error(f"Error creating stack instance, Error {str(e)}")
return | 2,265 |
def a_star(G: PCFG):
"""
A generator that enumerates all programs using A*.
Assumes that the PCFG only generates programs of bounded depth.
"""
frontier = []
initial_non_terminals = deque()
initial_non_terminals.append(G.start)
heappush(
frontier,
(
-G.max_probability[G.start].probability[(G.__hash__(), G.start)],
(None, initial_non_terminals, 1),
),
)
# A frontier is a heap of pairs (-max_probability, (partial_program, non_terminals, probability))
# describing a partial program:
# max_probability is the most likely program completing the partial program
# partial_program is the list of primitives and variables describing the leftmost derivation,
# non_terminals is the queue of non-terminals appearing from left to right, and
# probability is the probability of the partial program
while len(frontier) != 0:
max_probability, (partial_program, non_terminals, probability) = heappop(
frontier
)
if len(non_terminals) == 0:
yield partial_program
else:
S = non_terminals.pop()
for P in G.rules[S]:
args_P, w = G.rules[S][P]
new_partial_program = (P, partial_program)
new_non_terminals = non_terminals.copy()
new_probability = probability * w
new_max_probability = new_probability
for arg in args_P:
new_non_terminals.append(arg)
new_max_probability *= G.max_probability[arg].probability[
(G.__hash__(), arg)
]
heappush(
frontier,
(
-new_max_probability,
(new_partial_program, new_non_terminals, new_probability),
),
) | 2,266 |
def pipe(*args, **kwargs):
"""A processor that replaces the text of a field of an item.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. Must contain the key 'rule'.
rule (dict): can be either a dict or list of dicts. Must contain
the keys 'find' and 'replace'. May contain the key 'param'.
find (str): The string to find.
replace (str): The string replacement.
param (str): The type of replacement. Must be one of: 'first',
'last', or 'every' (default: 'every').
assign (str): Attribute to assign parsed content (default: strreplace)
field (str): Item attribute to operate on (default: 'content')
Yields:
dict: an item with replaced content
Examples:
>>> conf = {'rule': {'find': 'hello', 'replace': 'bye'}}
>>> item = {'content': 'hello world'}
>>> next(pipe(item, conf=conf))['strreplace'] == 'bye world'
True
>>> rules = [
... {'find': 'Gr', 'replace': 'M'},
... {'find': 'e', 'replace': 'a', 'param': 'last'}]
>>> conf = {'rule': rules}
>>> kwargs = {'conf': conf, 'field': 'title', 'assign': 'result'}
>>> item = {'title': 'Greetings'}
>>> next(pipe(item, **kwargs))['result'] == 'Meatings'
True
"""
return parser(*args, **kwargs) | 2,267 |
def move_piece(x, y, new_x, new_y, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION):
"""
This function only moves pieces and doesn't apply valid logic whether they should be there
This function will only replace what is in the tile
"""
print("Moving ", x, y, "to", new_x, new_y)
# replace piece on the board
board[new_y][new_x] = board[y][x]
# get piece symbol from the dictionary (based upon board int)
symbol = SYMBOL_DICT[board[y][x]]
# call delete piece
delete_piece(x, y, board, board_turtles)
# Get the turtle stored for the new block
new_turtle = board_turtles[new_y][new_x]
# clear the turtle (in case there is a written piece there) at the desired position
new_turtle.clear()
# write out the piece symbol centered in the block in ariel font with a size of the block height/width
new_turtle.write(symbol, False, align="center", font=("Ariel", int(BOARD_DIMENSION/5))) | 2,268 |
def adjoint(g):
"""Return the adjoint of a rigid body transformation g."""
adg = np.zeros((6, 6))
R_part, p = g[:3, :3], g[:3, 3]
pR = skew(p) @ R_part
adg[:3, :3] = R_part
adg[-3:, -3:] = R_part
adg[:3, -3:] = pR
return adg | 2,269 |
def dmp_rr_yun0_sqf_list(f, u, K):
"""Compute square-free decomposition of ``f`` in zero-characteristic ring ``K``.
References
==========
* :cite:`LeeM2013factor`, page 8
"""
if dmp_ground_p(f, None, u):
return []
result, count = [], 1
qs = [dmp_diff_in(f, 1, i, u, K) for i in range(u + 1)]
g = f
for q in qs:
g = dmp_gcd(g, q, u, K)
while not dmp_one_p(f, u, K):
for i in range(u + 1):
qs[i] = dmp_quo(qs[i], g, u, K)
f = dmp_quo(f, g, u, K)
for i in range(u + 1):
qs[i] = dmp_sub(qs[i], dmp_diff_in(f, 1, i, u, K), u, K)
g = f
for q in qs:
g = dmp_gcd(g, q, u, K)
if not dmp_one_p(g, u, K):
result.append((g, count))
count += 1
return result | 2,270 |
def parse_group(rule):
"""
Parse the group line
"""
parser = argparse.ArgumentParser()
rules = shlex.split(rule)
rules.pop(0)
parser.add_argument("--name", dest="name", action="store")
parser.add_argument("--gid", dest="gid", action="store")
args = clean_args(vars(parser.parse_args(rules)))
parser = None
return args | 2,271 |
def register_module():
"""Registers this module for use."""
def on_module_disable():
tags.Registry.remove_tag_binding(TextFileUploadTag.binding_name)
tags.EditorBlacklists.unregister(
TextFileUploadTag.binding_name,
tags.EditorBlacklists.COURSE_SCOPE)
tags.EditorBlacklists.unregister(
TextFileUploadTag.binding_name,
tags.EditorBlacklists.DESCRIPTIVE_SCOPE)
def on_module_enable():
tags.Registry.add_tag_binding(
TextFileUploadTag.binding_name, TextFileUploadTag)
tags.EditorBlacklists.register(
TextFileUploadTag.binding_name,
tags.EditorBlacklists.COURSE_SCOPE)
tags.EditorBlacklists.register(
TextFileUploadTag.binding_name,
tags.EditorBlacklists.DESCRIPTIVE_SCOPE)
global_routes = [
(os.path.join(_RESOURCES_PATH, '.*'), tags.ResourcesHandler),
]
namespaced_routes = [
(_POST_ACTION_SUFFIX, TextFileUploadHandler),
]
global custom_module
custom_module = custom_modules.Module(
'Student Text File Submission Upload',
'Adds a custom tag for students to upload text files <= 1MB in size.',
global_routes, namespaced_routes,
notify_module_disabled=on_module_disable,
notify_module_enabled=on_module_enable,
)
return custom_module | 2,272 |
def angle2trig(theta):
"""Convert angle to a reportlab ready tuple.
Arguments:
- theta - Angle in degrees, counter clockwise from horizontal
Returns a representation of the passed angle in a format suitable
for ReportLab rotations (i.e. cos(theta), sin(theta), -sin(theta),
cos(theta) tuple)
"""
c = cos(theta * pi / 180)
s = sin(theta * pi / 180)
return (c, s, -s, c) | 2,273 |
def get_state_z0_pure_state_vector() -> np.ndarray:
"""Returns the pure state vector for :math:`|0\\rangle`.
Returns
-------
np.ndarray
the pure state vector.
"""
vec = np.array([1, 0], dtype=np.complex128)
return vec | 2,274 |
def HSV_to_CMYKratio(hsv):
"""Converts HSV color space to CMYK (ratio representation)"""
rgb = HSV_to_RGB(hsv)
return RGB_to_CMYKratio(rgb) | 2,275 |
def delete_single_culture(user_id, culture_id):
"""Delete a culture."""
try:
culture = Culture.query.filter_by(user_id=user_id).filter_by(culture_id=culture_id).first()
if not culture:
response_object = {
'status': 'fail',
'message': f'{culture_id} does not exist.'
}
return jsonify(response_object), 404
else:
db.session.delete(culture)
db.session.commit()
response_object = {
'status': 'success',
'message': f'{culture_id} was deleted.'
}
return jsonify(response_object), 200
except exc.IntegrityError as e:
db.session.rollback()
response_object = {
'status': 'fail',
'message': 'Invalid payload.'
}
return jsonify(response_object), 400 | 2,276 |
def create_atomic_chunk(im, chunk_coord, aff_dtype=np.float32, verbose=True):
""" Creates single atomic chunk
:param im: IngestionManager
:param chunk_coord: np.ndarray
array of three ints
:param aff_dtype: np.dtype
np.float64 or np.float32
:param verbose: bool
:return:
"""
chunk_coord = np.array(list(chunk_coord), dtype=np.int)
edge_dict = collect_edge_data(im, chunk_coord, aff_dtype=aff_dtype)
mapping = collect_agglomeration_data(im, chunk_coord)
active_edge_dict, isolated_ids = define_active_edges(edge_dict, mapping)
edge_ids = {}
edge_affs = {}
edge_areas = {}
for k in edge_dict.keys():
if k == "cross":
edge_ids[k] = np.concatenate([edge_dict[k]["sv1"][:, None],
edge_dict[k]["sv2"][:, None]],
axis=1)
continue
sv1_conn = edge_dict[k]["sv1"][active_edge_dict[k]]
sv2_conn = edge_dict[k]["sv2"][active_edge_dict[k]]
aff_conn = edge_dict[k]["aff"][active_edge_dict[k]]
area_conn = edge_dict[k]["area"][active_edge_dict[k]]
edge_ids[f"{k}_connected"] = np.concatenate([sv1_conn[:, None],
sv2_conn[:, None]],
axis=1)
edge_affs[f"{k}_connected"] = aff_conn.astype(np.float32)
edge_areas[f"{k}_connected"] = area_conn
sv1_disconn = edge_dict[k]["sv1"][~active_edge_dict[k]]
sv2_disconn = edge_dict[k]["sv2"][~active_edge_dict[k]]
aff_disconn = edge_dict[k]["aff"][~active_edge_dict[k]]
area_disconn = edge_dict[k]["area"][~active_edge_dict[k]]
edge_ids[f"{k}_disconnected"] = np.concatenate([sv1_disconn[:, None],
sv2_disconn[:, None]],
axis=1)
edge_affs[f"{k}_disconnected"] = aff_disconn.astype(np.float32)
edge_areas[f"{k}_disconnected"] = area_disconn
im.cg.add_atomic_edges_in_chunks(edge_ids, edge_affs, edge_areas,
isolated_node_ids=isolated_ids)
return edge_ids, edge_affs, edge_areas | 2,277 |
def add_adult(request):
"""
Add a new adult record
:param request:
:return:
"""
args = dict()
app = AppUtil.get_by_user(user=request.user)
if request.method == 'POST':
form = AddAdultForm(request.POST)
if form.is_valid():
adult = form.save(commit=False)
adult.application = app[0]
adult.save()
return redirect('adult_salary', adult_id=adult.id)
else:
form = AddAdultForm()
args['form'] = form
args['nav'] = AppUtil.get_nav(nav=nav, url='adults', app=app[0])
args['progress'] = AppUtil.get_app_progress(app=app[0])
return render(request, "eat/user/application/adult/add_edit.html", args) | 2,278 |
def maybe_download_and_extract(url, dst_dir):
"""Download and extract model tar file.
If the pretrained model we're using doesn't already exist, this function
downloads it from the TensorFlow.org website and unpacks it into a directory.
Args:
url: Web location of the tar file containing the pretrained model.
dst_dir: Destination directory to save downloaded and extracted file.
Returns:
None.
"""
import tarfile
filepath =maybe_download(url, dst_dir)
tarfile.open(filepath, 'r:gz').extractall(dst_dir) | 2,279 |
def test_grad_hermite_multidimensional_vs_finite_differences(tol, renorm):
"""Tests the gradients of hermite polynomials. The gradients of parameters are tested by finite differences."""
d = 4
R = np.random.rand(d, d) + 1j * np.random.rand(d, d)
R += R.T
y = np.random.rand(d) + 1j * np.random.rand(d)
C = 0.5
cutoff = [3, 3, 3, 3]
gate = hermite_multidimensional(R, cutoff, y, C, renorm=renorm, modified=True)
grad_C, grad_R, grad_y = grad_hermite_multidimensional(
gate, R, y, C, renorm=renorm, dtype=np.complex128
)
delta = 0.000001 + 1j * 0.000001
expected_grad_C = (
hermite_multidimensional(R, cutoff, y, C + delta, renorm=renorm, modified=True)
- hermite_multidimensional(R, cutoff, y, C - delta, renorm=renorm, modified=True)
) / (2 * delta)
assert np.allclose(grad_C, expected_grad_C, atol=tol, rtol=0)
for i in range(y.shape[0]):
y[i] += delta
plus = hermite_multidimensional(R, cutoff, y, C, renorm=renorm, modified=True)
y[i] -= 2 * delta
minus = hermite_multidimensional(R, cutoff, y, C, renorm=renorm, modified=True)
expected_grad_y = (plus - minus) / (2 * delta)
y[i] += delta
assert np.allclose(grad_y[..., i], expected_grad_y, atol=tol, rtol=0)
for i in range(R.shape[0]):
for j in range(R.shape[1]):
R[i, j] += delta
plus = hermite_multidimensional(R, cutoff, y, C, renorm=renorm, modified=True)
R[i, j] -= 2 * delta
minus = hermite_multidimensional(R, cutoff, y, C, renorm=renorm, modified=True)
expected_grad_R = (plus - minus) / (2 * delta)
R[i, j] += delta
assert np.allclose(grad_R[..., i, j], expected_grad_R, atol=tol, rtol=0) | 2,280 |
def replace(target_obj):
"""A decorator to replace the specified obj.
`target_obj` can be a class or a function.
Example:
```python
class A:
def f(self):
print('class A')
@replace(A)
class B:
def f(self):
print('class B')
```
Args:
target_obj (class/func/method): a class, method, or function to be
replaced.
Returns:
A decorator function to replace the input object.
"""
def decorator(new_obj):
if target_obj in OPTIMIZED_CLASSES:
logging.warning("{} has been optimized again.".format(target_obj))
setattr(new_obj, "__replaced_class__", target_obj)
OPTIMIZED_CLASSES[target_obj] = new_obj
for k, v in list(sys.modules.items()):
if target_obj.__name__ in v.__dict__ and v.__dict__[target_obj.__name__] is target_obj:
delattr(sys.modules[k], target_obj.__name__)
setattr(sys.modules[k], target_obj.__name__, new_obj)
logging.debug("In module {}, {} is replaced by {}".format(k, target_obj, new_obj))
# replace target_obj if it is used as the base classes.
for key in list(v.__dict__.keys()):
if (
inspect.isclass(v.__dict__[key])
and v.__dict__[key] != new_obj
and target_obj in v.__dict__[key].__bases__
):
idx = v.__dict__[key].__bases__.index(target_obj)
bases = list(v.__dict__[key].__bases__)
bases[idx] = new_obj
v.__dict__[key].__bases__ = tuple(bases)
logging.debug(
"In module {}, the base class of {} is replaced by {}".format(k, v.__dict__[key], new_obj)
)
return new_obj
return decorator | 2,281 |
def get_layers(model, filter_regexp):
"""
Filters out the layers according to a regexp. Note that
we omit biases.
Args:
- model: a nn.Module
- filter_regexp: a regexp to filter the layers to keep
according to their name in model.named_parameters().
For instance, the regexp:
down_layers\\.[123456]\\.(conv[12]|identity\\.conv))
is keeping blocks down_layers from 1 to 6, and inside
each block is keeping conv1, conv2 and identity.conv.
Remarks:
- We add (module\\.)? at the beginning of the regexp to
account for the possible use of nn.parallel.DataParallel
"""
# get all parameter names
all_layers = map(itemgetter(0), model.named_parameters())
# remove biases
all_layers = filter(lambda x: "bias" not in x, all_layers)
# remove .weight in all other names (or .weight_orig is spectral norm)
all_layers = map(lambda x: x.replace(".weight_orig", ""), all_layers)
all_layers = map(lambda x: x.replace(".weight", ""), all_layers)
# return filtered layers
filter_regexp = "(module\\.)?" + "(" + filter_regexp + ")"
r = re.compile(filter_regexp)
return list(filter(r.match, all_layers)) | 2,282 |
def decrypt(data: bytes,
password: Union[str, bytes]) -> bytes:
"""
decrypt data
:param data: encrypted data
:param password: password
:return: plain data
"""
__data = gzip_decompress(data[4:]) if data.startswith(b'moca') else data
iv, cipher = __data[:AES.block_size], __data[AES.block_size:]
return __create_aes(password, iv).decrypt(cipher) | 2,283 |
def stream_from_url(*args, **kwargs):
"""
Save the resource as a file on disk iteratively by first asking
for the 'content-length' header entry and downloading in chunks.
By default we will retry if an HTTP error arises.
By default we will uncompress a downloaded file if it is zipped.
"""
# Just redirect to download_from_url #
kwargs.update({'steam': True})
return download_from_url(*args, **kwargs) | 2,284 |
def modulo_3(lhs, ctx):
"""Element ǒ
(num) -> a % 3
(str) -> a split into chunks of size 2
"""
return {
(NUMBER_TYPE): lambda: lhs % 3,
(str): lambda: [lhs[i : i + 2] for i in range(0, len(lhs), 2)],
}.get(vy_type(lhs), lambda: vectorise(modulo_3, lhs, ctx=ctx))() | 2,285 |
def gradstep(P,dP,drate,mP,mrate,grad,nesterov=False):
"""
Performs a gradient update step on parameters P,
using gradient dP with learning rate (drate), and
momentum vector mP with momentum rate (mrate).
grad() must be a function that computes:
dP[:] = gradient at current P
where 'grad' is assumed to have references to
P and to dP.
If nesterov is False, the computation is:
grad()
mP[:] = drate*dP + mrate*mP
P[:] = P + mP
If nesterov is True, the computation is:
P[:] = P + mrate*mP
grad()
mP[:] = drate*dP + mrate*mP
P[:] = P + drate*dP
"""
assert callable(grad)
if nesterov:
# P[:] += mrate*mP
ext_dll().api_gradstep_nesterov1(P._ptr,mP._ptr,mrate._ptr)
# dP[:] = gradient at P + mrate*mP
grad()
# mP[:] = drate*dP + mrate*mP
# P[:] += drate*dP
ext_dll().api_gradstep_nesterov2(P._ptr,dP._ptr,drate._ptr,mP._ptr,mrate._ptr)
else:
# dP[:] = gradient at P
grad()
# mP[:] = drate*dP + mrate*mP
# P[:] = P + mP
ext_dll().api_gradstep(P._ptr,dP._ptr,drate._ptr,mP._ptr,mrate._ptr)
return | 2,286 |
def _load_score_submission(submission_path, metric, step, data_label=None):
"""Load the score for a single submission."""
if data_label is None:
training_output_path = os.path.join(
submission_path, 'training_output')
else:
training_output_path = os.path.join(
submission_path, 'training_output', data_label)
if not os.path.isdir(training_output_path):
return None
folds_path = [
os.path.join(training_output_path, fold_name)
for fold_name in os.listdir(training_output_path)
if (os.path.isdir(os.path.join(training_output_path, fold_name)) and
'fold_' in fold_name)
]
data = {}
for fold_id, path in enumerate(folds_path):
score_path = os.path.join(path, 'scores.csv')
if not os.path.exists(score_path):
return
scores = pd.read_csv(score_path, index_col=0)
scores.columns.name = 'score'
data[fold_id] = scores
df = pd.concat(data, names=['fold'])
metric = metric if metric else slice(None)
step = step if step else slice(None)
return df.loc[(slice(None), step), metric] | 2,287 |
def main(args):
"""
Returns
-------
"""
# Create training dataset
if args.in_fns is not None:
if args.path_PERC is not None:
logging.info('Preprocess training dataset including output quantiles')
logging.info(f'with real_geography flag set to {args.real_geography}')
preprocess(args.in_dir, args.in_fns, args.out_dir, args.out_fn, args.vars,
path_PERC = args.path_PERC, real_geography = args.real_geography)
else:
logging.info(f'Preprocess training dataset with real_geography flag set to {args.real_geography}')
preprocess(args.in_dir, args.in_fns, args.out_dir, args.out_fn, args.vars,
real_geography = args.real_geography)
else:
if args.list==True:
preprocess_list(args.in_dir, args.out_dir, args.out_fn, args.vars,
#list_xr = args.list_xr,
list_xr1 = args.list_xr1, list_xr2 = args.list_xr2,
lev_range=(0, 30), path_PERC=args.path_PERC)
# Shuffle training dataset
if args.shuffle:
logging.info('Shuffle training dataset')
shuffle(args.out_dir, args.out_fn, args.chunk_size)
# Potentially
if args.val_in_fns is not None:
if args.path_PERC_val is not None:
logging.info('Preprocess validation dataset including output quantiles')
preprocess(args.in_dir, args.val_in_fns, args.out_dir, args.val_out_fn,
args.vars, path_PERC = args.path_PERC_val, real_geography = args.real_geography)
else:
logging.info('Preprocess validation dataset')
preprocess(args.in_dir, args.val_in_fns, args.out_dir, args.val_out_fn, args.vars,
real_geography = args.real_geography)
if args.norm_fn is not None:
logging.info(f'Compute normalization file from {args.norm_train_or_valid}')
normalize(
args.out_dir,
args.out_fn if args.norm_train_or_valid == 'train' else args.val_out_fn,
args.norm_fn
)
logging.info('Finish entire preprocessing script.') | 2,288 |
def ingest_(droplet_db_file, master_cur, master_con):
"""
INGESTS DATA FROM THE DROPLET DB TO MASTER DB
"""
query = "select indeed_id, city_name, country_code from indeed_resumes;"
con = sql.connect(droplet_dbs_folder+droplet_db_file, timeout=10)
cur = con.cursor()
cur.execute(query)
for indeed_id, city_name, country_code in cur:
master_cur.execute("INSERT OR REPLACE INTO indeed_resumes (indeed_id, city_name, country_code) VALUES (?, ?, ?);", (indeed_id, city_name, country_code))
con.close()
master_con.commit()
return | 2,289 |
def geolocalizarCiudades(lista_ciudades: list):
"""Para una lista con nombres de ciudades devuelve una fila de DataFrame.
Parámetros
----------
lista_ciudades : list
Lista de nombres de ciudades.
Devuelve
-------
df_Fila: pandas.DataFrame
Fila de un DataFrame que incluye el nombre de la ciudad, el par de coordenadas, la dirección completa de la ciudad y una instancia de la clase Ciudad.
"""
rows = []
for i in lista_ciudades:
coord, direccion = geolocalizar(i)
rows.append([i, coord, direccion, Ciudad(*coord, i)])
df_Fila = pd.DataFrame(
rows,
columns=[
"Ciudad",
"Coordenadas",
"Direccion",
"ObjetoCiudad"])
return df_Fila | 2,290 |
def HandleConvPaddingModes(x, padding, kernel_shape, strides):
"""Returns an updated tensor and padding type for REFLECT and SYMMETRIC.
Args:
x: A 4D tensor with shape [batch_size, height, width, depth].
padding: Padding mode (SAME, VALID, REFLECT, or SYMMETRIC).
kernel_shape: Shape of convolution kernel that will be applied.
strides: Convolution stride that will be used.
Returns:
x and padding after adjustments for REFLECT and SYMMETRIC.
"""
# For 1x1 convolution, all padding modes are the same.
if np.all(kernel_shape[:2] == 1):
return x, 'VALID'
if padding == 'REFLECT' or padding == 'SYMMETRIC':
# We manually compute the number of paddings as if 'SAME'.
# From Tensorflow kernel, the formulas are as follows.
# output_shape = ceil(input_shape / strides)
# paddings = (output_shape - 1) * strides + filter_size - input_shape
# Let x, y, s be a shorthand notations for input_shape, output_shape, and
# strides, respectively. Let (x - 1) = sn + r where 0 <= r < s. Note that
# y - 1 = ceil(x / s) - 1 = floor((x - 1) / s) = n
# provided that x > 0. Therefore
# paddings = n * s + filter_size - (sn + r + 1)
# = filter_size - r - 1.
input_shape = x.get_shape() # shape at graph construction time
img_shape = tf.shape(x)[1:3] # image shape (no batch) at run time
remainder = tf.mod(img_shape - 1, strides[1:3])
pad_sizes = kernel_shape[:2] - remainder - 1
pad_rows = pad_sizes[0]
pad_cols = pad_sizes[1]
pad = tf.stack([[0, 0], tf.stack([pad_rows // 2, (pad_rows + 1) // 2]),
tf.stack([pad_cols // 2, (pad_cols + 1) // 2]), [0, 0]])
# Manually pad the input and switch the padding mode to 'VALID'.
x = tf.pad(x, pad, mode=padding)
x.set_shape([input_shape[0], x.get_shape()[1],
x.get_shape()[2], input_shape[3]])
padding = 'VALID'
return x, padding | 2,291 |
def uuid1_():
"""用于生成GUID"""
return str(uuid.uuid1()) | 2,292 |
def default_if_none(default):
"""Implements the rule: default if v is None else v"""
return default_if_true(lambda v: v is None, default) | 2,293 |
def test_main():
""" Spawns a io.StringIO daemon in a temporary venv and asserts that it behaves exactly like a local instance """
# --create temporary new python environment
python_exe = _create_temporary_venv('tmp', ".".join(["%s" % s for s in sys.version_info[0:2]]))
TEST_STR = 'str\nhello'
# try locally to be sure our test is correct
print('local test')
o_l = StringIO(TEST_STR)
perform_test_actions(o_l, TEST_STR)
# create daemon object
print('daemon test')
o_r = run_object(InstanceDefinition(StringIO.__module__, 'StringIO', TEST_STR), python_exe=python_exe)
try:
perform_test_actions(o_r, TEST_STR)
finally:
o_r.terminate_daemon() | 2,294 |
def finalize_queues(coord, threads):
""" Finalized the queues used to enqueue examples """
# When done, ask the threads to stop.
coord.request_stop()
# And wait for them to actually do it.
coord.join(threads) | 2,295 |
def fftshift(input, bitmask, b=None):
"""
Apply fftshift along dimensions selected by the {bitmask}.
:param bitmask long:
:param input array:
:param b bool: apply ifftshift
"""
usage_string = "fftshift [-b] bitmask input output"
cmd_str = f'{BART_PATH} '
cmd_str += 'fftshift '
flag_str = ''
opt_args = f''
multituples = []
if b is not None:
flag_str += f'-b '
cmd_str += flag_str + opt_args + ' '
cmd_str += f"{' '.join([' '.join([str(x) for x in arg]) for arg in zip(*multituples)]).strip()} {bitmask} {NAME}input {NAME}output "
cfl.writecfl(NAME + 'input', input)
if DEBUG:
print(cmd_str)
os.system(cmd_str)
outputs = cfl.readcfl(NAME + 'output')
return outputs | 2,296 |
def test_invertibility(txtfile):
"""
roughly, assert txtfile == image_to_txt(txt_to_image(txtfile))
ignoring whitespace before and after txt
"""
pngfile = txtfile.replace('.txt', '.png')
txt_to_image(txtfile, pngfile)
new_txtfile = txtfile.replace('.', '_new.')
image_to_txt(pngfile, new_txtfile)
txt1 = open(txtfile).read().strip()
txt2 = open(new_txtfile).read().strip()
assert txt1 == txt2, show_html_diff((txt1, 'OG'), (txt2, 'NEW')) | 2,297 |
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Unload Unifi Protect config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in METEOBRIDGE_PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok | 2,298 |
def _new_correlation_matrix_inverse(new_data, old_corr_mat_inv):
"""
If old_corr_mat_inv is an approximation for the correlation
matrix inverse of a dataset (p1, ..., pn), then the function
returns an approximatrion for the correlation matrix inverse
of dataset (p1, ..., pn, new_data)
TODO : add forgetting parameter lbda
"""
P = old_corr_mat_inv
x = new_data
# TODO : numerical instabilities if xTP is not computed first
# (order of multiplications)
xTP = x.T @ P
P = P - (P @ x @ xTP)/(1. + np.dot(xTP, x))
return P | 2,299 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.