content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def db_listen(q):
"""
Open a db connection and add notifications to *q*.
"""
cnn = psycopg2.connect(dsn)
cnn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur = cnn.cursor()
cur.execute("LISTEN \"SIMPLE_NOTIFY_CHANNEL\";")
while 1:
trampoline(cnn, read=True)
cnn.poll()
while cnn.notifies:
notify = cnn.notifies.pop()
json_notification = json.loads(notify.payload)
json_notification["backend_receiving"] = str(datetime.now())
# Hack with time zone
time_with_timezone = iso8601.parse_date(json_notification["db_creation"])
json_notification["db_creation"] = time_with_timezone.strftime("%Y.%m.%d %H:%M:%S.%f")
q.put(json_notification) | 1,900 |
def detail_url(reteta_id):
""""Return reteta detail URL"""
return reverse('reteta:reteta-detail', args=[reteta_id]) | 1,901 |
def load_metadata_txt(file_path):
"""
Load distortion coefficients from a text file.
Parameters
----------
file_path : str
Path to a file.
Returns
-------
tuple of floats and list
Tuple of (xcenter, ycenter, list_fact).
"""
if ("\\" in file_path):
raise ValueError(
"Please use a file path following the Unix convention")
with open(file_path, 'r') as f:
x = f.read().splitlines()
list_data = []
for i in x:
list_data.append(float(i.split()[-1]))
xcenter = list_data[0]
ycenter = list_data[1]
list_fact = list_data[2:]
return xcenter, ycenter, list_fact | 1,902 |
def _output_gradient(f, loss_function, dataset, labels, out0, batch_indices, chunk):
"""
internal function
"""
x = _getitems(dataset, batch_indices)
y = _getitems(labels, batch_indices)
if out0 is not None:
out0 = out0[batch_indices]
out = []
grad = 0
loss_value = 0
for i in [slice(i, i + chunk) for i in range(0, len(x), chunk)]:
o = f(x[i])
if out0 is not None:
o = o - out0[i]
l = loss_function(o, y[i])
assert l.shape == (len(o),)
l = l.sum() / len(x)
grad += gradient(l, f.parameters())
out.append(o)
loss_value += l.item()
return torch.cat(out), grad, loss_value | 1,903 |
def test75():
"""
Check that a None in a list raises a gripe
"""
assert isinstance(p, Pod)
copy: Pod = p.dup()
copy.spec.containers.append(None)
try:
o = copy.object_at_path(["spec", "containers", 2])
assert False, "should have gotten a RuntimeError"
except RuntimeError:
pass | 1,904 |
def test_xml_xhtml():
"""XHTML responses are handled by the XML formatter."""
file = XML_FILES_PATH / 'xhtml' / 'xhtml_raw.xml'
xml_data = file.read_text(encoding=UTF8)
# Python < 3.8 was sorting attributes (https://bugs.python.org/issue34160)
# so we have 2 different output expected given the Python version.
expected_file_name = (
'xhtml_formatted_python_less_than_3.8.xml'
if sys.version_info < (3, 8)
else 'xhtml_formatted.xml'
)
expected_xml_file = file.with_name(expected_file_name)
expected_xml_output = expected_xml_file.read_text(encoding=UTF8)
responses.add(responses.GET, URL_EXAMPLE, body=xml_data,
content_type='application/xhtml+xml')
r = http(URL_EXAMPLE)
assert expected_xml_output in r | 1,905 |
def ensure_hash_valid(h):
"""This does not guarantee only hashes get through, it's best-effort."""
passes = True
if not isinstance(h, str):
passes = False
elif [x for x in h if x not in '0123456789abcdef-u']:
passes = False
if not passes:
raise ValueError('Invalid hash: %s' % repr(h)) | 1,906 |
def __material_desc_dict(m, d):
""" Unpack positions 18-34 into material specific dict. """
return dict(zip(MD_FIELDS[m],
{"BK": __material_bk, "CF": __material_cf,
"MP": __material_mp, "MU": __material_mu,
"CR": __material_cr, "VM": __material_vm,
"MX": __material_mx}[m](d))) | 1,907 |
def test_get_valid_filter(mockclient_cl1):
"""Each comliance level has a set of allowed filters.
Test 's' which is not allowed with cl0 but ok for cl1
"""
r = mockclient_cl1.get(TEST_URL + "?p=1")
assert r.status_code == 200 | 1,908 |
def writePendingTaskToDatabase(task):
"""pendingTransfers tracks files prepped for transfer, by dataset:
{
"dataset": {
"files": {
"filename1___extension": {
"name": "filename1",
"md": {},
"md_name": "name_of_metadata_file"
"md_path": "folder_containing_metadata_file"},
"filename2___extension": {...},
...
},
"md": {},
"md_path": "folder_containing_metadata.json"
},
"dataset2": {...},
...}"""
jbody = json.dumps(task)
# Attempt to insert, update if globus ID already exists
q_insert = "INSERT INTO pending_tasks (contents) VALUES ('%s')" % jbody
curs = psql_conn.cursor()
#logger.debug("Writing task %s to PostgreSQL..." % gid)
curs.execute(q_insert)
psql_conn.commit()
curs.close() | 1,909 |
def toint(x):
"""Try to convert x to an integer number without raising an exception."""
try: return int(x)
except: return x | 1,910 |
def obtain_time_image(x, y, centroid_x, centroid_y, psi, time_gradient, time_intercept):
"""Create a pulse time image for a toymodel shower. Assumes the time development
occurs only along the longitudinal (major) axis of the shower, and scales
linearly with distance along the axis.
Parameters
----------
x : u.Quantity[length]
X camera coordinate to evaluate the time at.
Usually the array of pixel X positions
y : u.Quantity[length]
Y camera coordinate to evaluate the time at.
Usually the array of pixel Y positions
centroid_x : u.Quantity[length]
X camera coordinate for the centroid of the shower
centroid_y : u.Quantity[length]
Y camera coordinate for the centroid of the shower
psi : convertible to `astropy.coordinates.Angle`
rotation angle about the centroid (0=x-axis)
time_gradient : u.Quantity[time/length]
Rate at which the time changes with distance along the shower axis
time_intercept : u.Quantity[time]
Pulse time at the shower centroid
Returns
-------
float or ndarray
Pulse time in nanoseconds at (x, y)
"""
longitudinal, _ = camera_to_shower_coordinates(x, y, centroid_x, centroid_y, psi)
longitudinal_m = longitudinal.to_value(u.m)
time_gradient_ns_m = time_gradient.to_value(u.ns / u.m)
time_intercept_ns = time_intercept.to_value(u.ns)
return longitudinal_m * time_gradient_ns_m + time_intercept_ns | 1,911 |
def remove_comment(to_remove, infile):
"""Removes trailing block comments from the end of a string.
Parameters:
to_remove: The string to remove the comment from.
infile: The file being read from.
Returns:
The paramter string with the block comment removed (if comment was
present in string).
"""
start_comment = re.search('\s*(\/\*|//)', to_remove)
# Remove comments if they are in the matched group.
if start_comment:
end_comment = re.search('.*\*\/', to_remove)
if end_comment or ('//' in to_remove and not '/*' in to_remove) :
removed = to_remove[:start_comment.start(0)] + '\n'
return removed
while not end_comment:
to_remove = next(infile)
end_comment = end_comment = re.search('.*\*\/', to_remove)
return ''
else:
removed = to_remove
return removed | 1,912 |
def generate_notification_header(obj):
"""
Generates notification header information based upon the object -- this is
used to preface the notification's context.
Could possibly be used for "Favorites" descriptions as well.
:param obj: The top-level object instantiated class.
:type obj: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`.
:returns: str with a human readable identification of the object
"""
generate_notification_header_handler = NotificationHeaderManager.get_header_handler(obj._meta['crits_type'])
if generate_notification_header_handler is not None:
return generate_notification_header_handler(obj)
else:
return "%s: %s" % (obj._meta['crits_type'], str(obj.id)) | 1,913 |
def promptyn(msg, default=None):
""" Display a blocking prompt until the user confirms """
while True:
yes = "Y" if default else "y"
if default or default is None:
no = "n"
else:
no = "N"
confirm = raw_input("%s [%s/%s]" % (msg, yes, no))
confirm = confirm.lower().strip()
if confirm == "y" or confirm == "yes":
return True
elif confirm == "n" or confirm == "no":
return False
elif len(confirm) == 0 and default is not None:
return default | 1,914 |
def FdGars_main(support: list,
features: tf.SparseTensor,
label: tf.Tensor, masks: list,
args: argparse.ArgumentParser().parse_args()) -> None:
"""
Main function to train, val and test the model
:param support: a list of the sparse adjacency matrices
:param features: node feature tuple for all nodes {coords, values, shape}
:param label: the label tensor for all nodes
:param masks: a list of mask tensors to obtain the train, val, test data
:param args: additional parameters
"""
model = FdGars(args.input_dim, args.nhid, args.output_dim, args)
optimizer = optimizers.Adam(lr=args.lr)
# train
for epoch in tqdm(range(args.epochs)):
with tf.GradientTape() as tape:
train_loss, train_acc = model([support, features, label, masks[0]])
grads = tape.gradient(train_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
val_loss, val_acc = model([support, features, label, masks[1]],
training=False)
if epoch % 10 == 0:
print(
f"train_loss: {train_loss:.4f}, "
f"train_acc: {train_acc:.4f},"
f"val_loss: {val_loss:.4f},"
f"val_acc: {val_acc:.4f}")
# test
_, test_acc = model([support, features, label, masks[2]], training=False)
print(f"Test acc: {test_acc:.4f}") | 1,915 |
def plan_launch_spec(state):
""" Read current job params, and prescribe the next training job to launch
"""
last_run_spec = state['run_spec']
last_warmup_rate = last_run_spec['warmup_learning_rate']
add_batch_norm = last_run_spec['add_batch_norm']
learning_rate = last_run_spec['learning_rate']
if last_warmup_rate / 5 >= 1e-3:
logger.info('Reducing warmup rate by 1/5')
state['history']['num_warmup_adjustments'] += 1
state['run_spec']['warmup_learning_rate'] = last_warmup_rate * 0.5
state['next_action'] = 'launch_new'
elif add_batch_norm == 0:
logger.info('Adding batch normalization layer')
state['history']['num_batch_layer_adjustments'] += 1
state['run_spec']['add_batch_norm'] = 1 # we are only changing the model by adding batch layers
# prior to ELU. But can make more tweaks here.
state['next_action'] = 'launch_new'
elif learning_rate * 0.9 > 0.001:
state['run_spec']['learning_rate'] = learning_rate * 0.9
state['history']['num_learning_rate_adjustments'] += 1
state['next_action'] = 'launch_new'
else:
state['next_action'] = 'end'
return state | 1,916 |
def test_infinite_fifo_queue():
"""
Validate basic properties and push/pop to the infinite FIFO queue.
"""
queue: Queue[int] = InfiniteFifoQueue()
assert queue.capacity == np.inf
assert queue.size == 0
assert queue.empty
assert not queue.full
# Add some elements:
queue.push(1)
queue.push(2)
assert queue.size == 2
assert not queue.full
assert not queue.empty
assert str(queue) == "(InfiniteFifoQueue: q=[1, 2], size=2)"
# Pop:
assert queue.pop() == 1
assert queue.pop() == 2
assert queue.pop() is None
assert queue.empty
assert str(queue) == "(InfiniteFifoQueue: q=[], size=0)"
# Push many elements:
num_elements = 1000
for i in range(num_elements):
item = (i + 42) * 10
queue.push(item)
assert queue.size == num_elements
assert not queue.full
assert queue.pop() == 420 | 1,917 |
def ssgenTxOut0():
"""
ssgenTxOut0 is the 0th position output in a valid SSGen tx used to test out the
IsSSGen function
"""
# fmt: off
return msgtx.TxOut(
value=0x00000000, # 0
version=0x0000,
pkScript=ByteArray(
[
0x6a, # OP_RETURN
0x24, # 36 bytes to be pushed
0x94, 0x8c, 0x76, 0x5a, # 32 byte hash
0x69, 0x14, 0xd4, 0x3f,
0x2a, 0x7a, 0xc1, 0x77,
0xda, 0x2c, 0x2f, 0x6b,
0x52, 0xde, 0x3d, 0x7c,
0xda, 0x2c, 0x2f, 0x6b,
0x52, 0xde, 0x3d, 0x7c,
0x52, 0xde, 0x3d, 0x7c,
0x00, 0xe3, 0x23, 0x21, # 4 byte height
]
),
)
# fmt: on | 1,918 |
def sexag_to_dec(sexag_unit):
""" Converts Latitude and Longitude Coordinates from the Sexagesimal Notation
to the Decimal/Degree Notation"""
add_to_degree = (sexag_unit[1] + (sexag_unit[2]/60))/60
return sexag_unit[0]+add_to_degree | 1,919 |
def is_immutable_type(value: Any) -> bool:
"""
Get a boolean value whether specified value is immutable
type or not.
Notes
-----
apysc's value types, such as the `Int`, are checked
as immutable since these js types are immutable.
Parameters
----------
value : Any
Target value to check.
Returns
-------
result : bool
If a specified value is immutable, then True
will be set.
"""
import apysc as ap
immutable_types: Tuple = (
int, float, bool, str, complex, tuple, range, bytes,
ap.Int, ap.Number, ap.String, ap.Boolean,
)
if isinstance(value, immutable_types):
return True
return False | 1,920 |
def add_column_node_type(df: pd.DataFrame) -> pd.DataFrame:
"""Add column `node_type` indicating whether a post is a parent or a leaf node
Args:
df: The posts DataFrame with the columns `id_post` and `id_parent_post`.
Returns:
df: A copy of df, extended by `node_type`.
"""
if "node_type" not in df.columns:
df_parent_posts = pd.DataFrame({"id_post": df.query("id_parent_post == id_parent_post").id_parent_post.unique()})
df_parent_posts["node_type"] = "parent"
return df.merge(df_parent_posts, how="left", on="id_post").replace({"node_type": np.nan}, "leaf")
else:
return df.copy() | 1,921 |
def compute_occurrences(ibs, config=None):
"""
Clusters ungrouped images into imagesets representing occurrences
CommandLine:
python -m ibeis.control.IBEISControl --test-compute_occurrences
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.control.IBEISControl import * # NOQA
>>> import ibeis # NOQA
>>> ibs = ibeis.opendb('testdb1')
>>> ibs.compute_occurrences(config={'use_gps': False, 'seconds_thresh': 600})
>>> ibs.update_special_imagesets()
>>> # Remove some images from a non-special imageset
>>> nonspecial_imgsetids = [i for i in ibs.get_valid_imgsetids() if i not in ibs.get_special_imgsetids()]
>>> images_to_remove = ibs.get_imageset_gids(nonspecial_imgsetids[0:1])[0][0:1]
>>> ibs.unrelate_images_and_imagesets(images_to_remove,nonspecial_imgsetids[0:1] * len(images_to_remove))
>>> ibs.update_special_imagesets()
>>> ungr_imgsetid = ibs.get_imageset_imgsetids_from_text(const.UNGROUPED_IMAGES_IMAGESETTEXT)
>>> ungr_gids = ibs.get_imageset_gids([ungr_imgsetid])[0]
>>> #Now let's make sure that when we recompute imagesets, our non-special imgsetid remains the same
>>> print('PRE COMPUTE: ImageSets are %r' % ibs.get_valid_imgsetids())
>>> print('Containing: %r' % ibs.get_imageset_gids(ibs.get_valid_imgsetids()))
>>> ibs.compute_occurrences(config={'use_gps': False, 'seconds_thresh': 600})
>>> print('COMPUTE: New imagesets are %r' % ibs.get_valid_imgsetids())
>>> print('Containing: %r' % ibs.get_imageset_gids(ibs.get_valid_imgsetids()))
>>> ibs.update_special_imagesets()
>>> print('UPDATE SPECIAL: New imagesets are %r' % ibs.get_valid_imgsetids())
>>> print('Containing: %r' % ibs.get_imageset_gids(ibs.get_valid_imgsetids()))
>>> assert(images_to_remove[0] not in ibs.get_imageset_gids(nonspecial_imgsetids[0:1])[0])
"""
from ibeis.algo.preproc import preproc_occurrence
print('[ibs] Computing and adding imagesets.')
# Only ungrouped images are clustered
gid_list = ibs.get_ungrouped_gids()
#gid_list = ibs.get_valid_gids(require_unixtime=False, reviewed=False)
with ut.Timer('computing imagesets'):
flat_imgsetids, flat_gids = preproc_occurrence.ibeis_compute_occurrences(
ibs, gid_list, config=config)
sortx = ut.argsort(flat_imgsetids)
flat_imgsetids = ut.take(flat_imgsetids, sortx)
flat_gids = ut.take(flat_gids, sortx)
valid_imgsetids = ibs.get_valid_imgsetids()
imgsetid_offset = 0 if len(valid_imgsetids) == 0 else max(valid_imgsetids)
# This way we can make sure that manually separated imagesets
# remain untouched, and ensure that new imagesets are created
flat_imgsetids_offset = [imgsetid + imgsetid_offset
for imgsetid in flat_imgsetids]
imagesettext_list = ['Occurrence ' + str(imgsetid)
for imgsetid in flat_imgsetids_offset]
print('[ibs] Finished computing, about to add imageset.')
ibs.set_image_imagesettext(flat_gids, imagesettext_list)
# HACK TO UPDATE IMAGESET POSIX TIMES
# CAREFUL THIS BLOWS AWAY SMART DATA
ibs.update_imageset_info(ibs.get_valid_imgsetids())
print('[ibs] Finished computing and adding imagesets.') | 1,922 |
def read_motifs(fmotif):
"""
create a random pool of motifs to choose from for the monte-carlo simulations
"""
motif_pool = []
for line in open(fmotif):
if not line.strip(): continue
if line[0] == "#": continue
motif, count = line.rstrip().split()
motif_pool.extend(motif * int(count))
random.shuffle(motif_pool)
return motif_pool | 1,923 |
def createCone( axis=1, basePos=-1, tipPos=1, radius=1, colour=(0.6,0.6,0.6), moiScale = 1, withMesh = True, **kwargs ):
"""
Create a rigid body for a cone with the specified attributes (axis is 0:x, 1:y, 2:z). Other rigid body parameters can be specified with keyword arguments, look at
App.Proxys.RigidBody for more details on available arguments. The following arguments will not be used:
meshes, moi, cdps.
If a negative mass parameter is specified, it will be scaled by the box volume and made positive.
"""
_fixMass( kwargs, math.pi*radius*radius*math.fabs(tipPos-basePos) )
from App import Proxys
proxy = Proxys.RigidBody( **kwargs )
return _createCone( proxy, axis, basePos, tipPos, radius, colour, moiScale, withMesh ) | 1,924 |
def test_ap_wpa2_tdls(dev, apdev):
"""WPA2-PSK AP and two stations using TDLS"""
hapd = start_ap_wpa2_psk(apdev[0])
wlantest_setup(hapd)
connect_2sta_wpa2_psk(dev, hapd)
setup_tdls(dev[0], dev[1], hapd)
teardown_tdls(dev[0], dev[1], hapd)
setup_tdls(dev[1], dev[0], hapd)
#teardown_tdls(dev[0], dev[1], hapd) | 1,925 |
async def test_setup_via_discovery_cannot_connect(hass):
"""Test setting up via discovery and we fail to connect to the discovered device."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery():
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "form"
assert result2["step_id"] == "pick_device"
assert not result2["errors"]
with patch(
"homeassistant.components.wiz.wizlight.getBulbConfig",
side_effect=WizLightTimeOutError,
), _patch_discovery():
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_DEVICE: FAKE_MAC},
)
await hass.async_block_till_done()
assert result3["type"] == "abort"
assert result3["reason"] == "cannot_connect" | 1,926 |
def test_ap_wpa2_ptk_rekey(dev, apdev):
"""WPA2-PSK AP and PTK rekey enforced by station"""
ssid = "test-wpa2-psk"
passphrase = 'qwertyuiop'
params = hostapd.wpa2_params(ssid=ssid, passphrase=passphrase)
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, psk=passphrase, wpa_ptk_rekey="1", scan_freq="2412")
ev = dev[0].wait_event(["WPA: Key negotiation completed"])
if ev is None:
raise Exception("PTK rekey timed out")
hwsim_utils.test_connectivity(dev[0], hapd) | 1,927 |
def createInputFiles():
"""Create input files from the command line: from stdin, from --inputfiles,
and also create an empty scratch file
Return them in a dictionary with standard names as keys: names = ("$file1", "$file2", etc. )
Also give them names name = <stdin>, filename from the command line, or "scratch"
"""
global ifilesd
# Create InputFile from stdin
msg(oginfo,"Creating input files from stdin")
x = createInputFile(sys.stdin,InputFile)
filenum = 1
addfilename(x,filebase,filenum)
msg(oginfo,"Names = {}".format(x.names))
# Create any files from input argument "-i" or "--inputfiles"
if parserargs.inputfiles:
msg(oginfo,"Creating input files from -i or --inputfiles")
for myfile in parserargs.inputfiles:
msg(ogdebug,"file = {}".format(myfile))
filenum += 1
x = createInputFile(myfile,InputFile)
addfilename(x,filebase,filenum)
msg(oginfo,"Names = {}".format(x.names))
# Create outgrab program files from input argument "-p" or "--program"
if parserargs.program:
msg(oginfo,"Creating program files from -p or --program")
filenum = 0
x = createInputFile(parserargs.program,ProgramFile)
addfilename(x,filebase,filenum)
addfilename(x,"program")
msg(oginfo,"Names = {}".format(x.names))
# now create one empty file named "scratch" for a scratch space
# ignore the original name created
x = createScratchFile("Scratch")
addfilename(x,"scratch")
msg(oginfo,"Creating scratch file with names: \"{}\"".format(x.names))
msg(ogdebug,"The ifilesd dictionary at end of createInputFiles:")
for key,value in ifilesd.items():
msg(ogdebug,"name= {} : object = {}".format(key,value)) | 1,928 |
def ef(candles: np.ndarray, lp_per: int = 10, hp_per: int = 30, f_type: str = "Ehlers", normalize: bool = False, source_type: str = "close", sequential: bool = False) -> Union[
float, np.ndarray]:
# added to definition : use_comp: bool = False, comp_intensity: float = 90.0,
"""
https://www.tradingview.com/script/kPe86Nbc-Roofing-Filter-DW/
compression function not working
"""
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
if f_type == "Ehlers":
roof = erf( source, hp_per, lp_per)
elif f_type == "Gaussian":
roof = grf( source, hp_per, lp_per)
elif f_type == "Butterworth":
roof = brf( source, hp_per, lp_per)
rms = RMS(source, roof, np.round((hp_per + lp_per)/2))
if roof[-1] > 0:
norm_roof = roof/rms
elif roof[-1] < 0:
norm_roof = -np.abs(roof)/rms
else:
norm_roof = 0
if normalize:
filt = norm_roof
else:
filt = roof
if sequential:
return filt
else:
return filt[-1] | 1,929 |
def stl_plot(
ts: "TSDataset",
period: int,
segments: Optional[List[str]] = None,
columns_num: int = 2,
figsize: Tuple[int, int] = (10, 10),
plot_kwargs: Optional[Dict[str, Any]] = None,
stl_kwargs: Optional[Dict[str, Any]] = None,
):
"""Plot STL decomposition for segments.
Parameters
----------
ts:
dataset with timeseries data
period:
length of seasonality
segments:
segments to plot
columns_num:
number of columns in subplots
figsize:
size of the figure per subplot with one segment in inches
plot_kwargs:
dictionary with parameters for plotting, :py:meth:`matplotlib.axes.Axes.plot` is used
stl_kwargs:
dictionary with parameters for STL decomposition, :py:class:`statsmodels.tsa.seasonal.STL` is used
"""
if plot_kwargs is None:
plot_kwargs = {}
if stl_kwargs is None:
stl_kwargs = {}
if segments is None:
segments = sorted(ts.segments)
in_column = "target"
segments_number = len(segments)
columns_num = min(columns_num, len(segments))
rows_num = math.ceil(segments_number / columns_num)
figsize = (figsize[0] * columns_num, figsize[1] * rows_num)
fig = plt.figure(figsize=figsize, constrained_layout=True)
subfigs = fig.subfigures(rows_num, columns_num, squeeze=False)
df = ts.to_pandas()
for i, segment in enumerate(segments):
segment_df = df.loc[:, pd.IndexSlice[segment, :]][segment]
segment_df = segment_df[segment_df.first_valid_index() : segment_df.last_valid_index()]
decompose_result = STL(endog=segment_df[in_column], period=period, **stl_kwargs).fit()
# start plotting
subfigs.flat[i].suptitle(segment)
axs = subfigs.flat[i].subplots(4, 1, sharex=True)
# plot observed
axs.flat[0].plot(segment_df.index, decompose_result.observed, **plot_kwargs)
axs.flat[0].set_ylabel("Observed")
# plot trend
axs.flat[1].plot(segment_df.index, decompose_result.trend, **plot_kwargs)
axs.flat[1].set_ylabel("Trend")
# plot seasonal
axs.flat[2].plot(segment_df.index, decompose_result.seasonal, **plot_kwargs)
axs.flat[2].set_ylabel("Seasonal")
# plot residuals
axs.flat[3].plot(segment_df.index, decompose_result.resid, **plot_kwargs)
axs.flat[3].set_ylabel("Residual")
axs.flat[3].tick_params("x", rotation=45) | 1,930 |
def parse_displays(config: Dict) -> Dict[str, QueryDisplay]:
"""Parse display options from configuration."""
display_configs = config.get("displays")
if not display_configs:
return {}
displays = {}
for name, display_config in display_configs.items():
displays[name] = QueryDisplay(
name=name,
nrql=display_config.get("nrql"),
visualization=WidgetVisualization.from_str(display_config["visualization"]),
)
return displays | 1,931 |
async def update_result(user: dict, form: dict) -> str:
"""Extract form data and update one result and corresponding start event."""
informasjon = await create_finish_time_events(user, "finish_bib", form) # type: ignore
return informasjon | 1,932 |
def rasterize_polygons_within_box(
polygons: List[np.ndarray], box: np.ndarray, mask_size: int
) -> torch.Tensor:
"""
Rasterize the polygons into a mask image and
crop the mask content in the given box.
The cropped mask is resized to (mask_size, mask_size).
This function is used when generating training targets for mask head in Mask R-CNN.
Given original ground-truth masks for an image, new ground-truth mask
training targets in the size of `mask_size x mask_size`
must be provided for each predicted box. This function will be called to
produce such targets.
Args:
polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
box: 4-element numpy array
mask_size (int):
Returns:
Tensor: BoolTensor of shape (mask_size, mask_size)
"""
# 1. Shift the polygons w.r.t the boxes
w, h = box[2] - box[0], box[3] - box[1]
polygons = copy.deepcopy(polygons)
for p in polygons:
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
# 2. Rescale the polygons to the new box size
ratio_h = mask_size / max(h, 0.1)
ratio_w = mask_size / max(w, 0.1)
if ratio_h == ratio_w:
for p in polygons:
p *= ratio_h
else:
for p in polygons:
p[0::2] *= ratio_w
p[1::2] *= ratio_h
# 3. Rasterize the polygons with coco api
mask = polygons_to_bitmask(polygons, mask_size, mask_size)
mask = torch.from_numpy(mask)
return mask | 1,933 |
def has_ao_num(trexio_file) -> bool:
"""Check that ao_num variable exists in the TREXIO file.
Parameter is a ~TREXIO File~ object that has been created by a call to ~open~ function.
Returns:
True if the variable exists, False otherwise
Raises:
- Exception from trexio.Error class if TREXIO return code ~rc~ is TREXIO_FAILURE and prints the error message using string_of_error.
- Exception from some other error (e.g. RuntimeError).
"""
try:
rc = pytr.trexio_has_ao_num(trexio_file.pytrexio_s)
if rc == TREXIO_FAILURE:
raise Error(rc)
except:
raise
if rc == TREXIO_SUCCESS:
return True
else:
return False | 1,934 |
def teapot(size=1.0):
"""
Z-axis aligned Utah teapot
Parameters
----------
size : float
Relative size of the teapot.
"""
vertices, indices = data.get("teapot.obj")
xmin = vertices["position"][:,0].min()
xmax = vertices["position"][:,0].max()
ymin = vertices["position"][:,1].min()
ymax = vertices["position"][:,1].max()
zmin = vertices["position"][:,2].min()
zmax = vertices["position"][:,2].max()
# Centering
vertices["position"][:,0] -= xmin + (xmax-xmin)/2
vertices["position"][:,1] -= ymin + (ymax-ymin)/2
vertices["position"][:,2] -= zmin + (zmax-zmin)/2
# Rotation to align on Z-axis
X = vertices["position"][:,0].copy()
Y = vertices["position"][:,1].copy()
Z = vertices["position"][:,2].copy()
NX = vertices["normal"][:,0].copy()
NY = vertices["normal"][:,1].copy()
NZ = vertices["normal"][:,2].copy()
vertices["position"][:,0] = X
vertices["position"][:,1] = Z
vertices["position"][:,2] = Y
vertices["normal"][:,0] = NX
vertices["normal"][:,1] = NZ
vertices["normal"][:,2] = NY
# Scaling according to height
vertices["position"] *= 2.0*size/(zmax-zmin)
return vertices, indices | 1,935 |
def f30(x, rotations=None, shifts=None, shuffles=None):
"""
Composition Function 10 (N=3)
Args:
x (array): Input vector of dimension 2, 10, 20, 30, 50 or 100.
rotations (matrix): Optional rotation matrices (NxDxD). If None
(default), the official matrices from the benchmark suite will be
used.
shifts (array): Optional shift vectors (NxD). If None (default), the
official vectors from the benchmark suite will be used.
shuffles (array): Optional shuffle vectors (NxD). If None (default), the
official permutation vectors from the benchmark suite will be used.
"""
nx = len(x)
if rotations is None:
rotations = transforms.rotations_cf[nx][9]
if shifts is None:
shifts = transforms.shifts_cf[9]
if shuffles is None:
shuffles = transforms.shuffles_cf[nx][1]
N = 3
funcs = [hybrid.f15, hybrid.f18, hybrid.f19]
sigmas = np.array([10.0, 30.0, 50.0])
biases = np.array([0.0, 100.0, 200.0])
offsets = np.array(
[1500, 1800, 1900]
) # subtract F* added at the end of the functions
vals = np.zeros(N)
w = np.zeros(N)
w_sm = 0.0
for i in range(0, N):
x_shifted = x - shifts[i][:nx]
vals[i] = funcs[i](
x, rotation=rotations[i], shift=shifts[i][:nx], shuffle=shuffles[i]
)
vals[i] -= offsets[i]
w[i] = _calc_w(x_shifted, sigmas[i])
w_sm += w[i]
if w_sm != 0.0:
w /= w_sm
else:
w = np.full(N, 1 / N)
return np.sum(w * (vals + biases)) + 3000 | 1,936 |
def loadNode( collada, node, localscope ):
"""Generic scene node loading from a xml `node` and a `collada` object.
Knowing the supported nodes, create the appropiate class for the given node
and return it.
"""
if node.tag == tag('node'): return Node.load(collada, node, localscope)
elif node.tag == tag('translate'): return TranslateTransform.load(collada, node)
elif node.tag == tag('rotate'): return RotateTransform.load(collada, node)
elif node.tag == tag('scale'): return ScaleTransform.load(collada, node)
elif node.tag == tag('matrix'): return MatrixTransform.load(collada, node)
elif node.tag == tag('lookat'): return LookAtTransform.load(collada, node)
elif node.tag == tag('instance_geometry'): return GeometryNode.load(collada, node)
elif node.tag == tag('instance_camera'): return CameraNode.load(collada, node)
elif node.tag == tag('instance_light'): return LightNode.load(collada, node)
elif node.tag == tag('instance_controller'): return ControllerNode.load(collada, node)
elif node.tag == tag('instance_node'): return NodeNode.load(collada, node, localscope)
elif node.tag == tag('extra'):
return ExtraNode.load(collada, node)
elif node.tag == tag('asset'):
return None
else: raise DaeUnsupportedError('Unknown scene node %s' % str(node.tag)) | 1,937 |
def create_xla_tff_computation(xla_computation, type_spec):
"""Creates an XLA TFF computation.
Args:
xla_computation: An instance of `xla_client.XlaComputation`.
type_spec: The TFF type of the computation to be constructed.
Returns:
An instance of `pb.Computation`.
"""
py_typecheck.check_type(xla_computation, xla_client.XlaComputation)
py_typecheck.check_type(type_spec, computation_types.FunctionType)
return pb.Computation(
type=type_serialization.serialize_type(type_spec),
xla=pb.Xla(
hlo_module=pack_xla_computation(xla_computation),
parameter=_make_xla_binding_for_type(type_spec.parameter),
result=_make_xla_binding_for_type(type_spec.result))) | 1,938 |
def render_contact_form(context):
"""
Renders the contact form which must be in the template context.
The most common use case for this template tag is to call it in the
template rendered by :class:`~envelope.views.ContactView`. The template
tag will then render a sub-template ``envelope/contact_form.html``.
.. versionadded:: 0.7.0
"""
try:
form = context['form']
except KeyError:
raise template.TemplateSyntaxError("There is no 'form' variable in the template context.")
return {
'form': form,
} | 1,939 |
def get_basic_project(reviews: int = 0) -> List[Dict]:
"""Get basic project config with reviews."""
reviews = max(reviews, MIN_REVIEW)
reviews = min(reviews, MAX_REVIEW)
middle_stages, entry_point = _get_middle_stages(reviews, OUTPUT_NAME)
input_stage = {
"brickName": "labelset-input",
"routing": {
"nextStageName": entry_point,
},
"stageName": "Input",
"stageConfig": {},
}
output_stage = {
"brickName": "labelset-output",
"stageName": OUTPUT_NAME,
"routing": {
"nextStageName": "END",
},
"stageConfig": {},
}
temp = [input_stage] + middle_stages + [output_stage]
return temp | 1,940 |
def choose_quality(link, name=None, selected_link=None):
"""
choose quality for scraping
Keyword Arguments:
link -- Jenitem link with sublinks
name -- Name to display in dialog (default None)
"""
import re
if name is None:
name = xbmc.getInfoLabel('listitem.label')
if link.startswith("http") or link.startswith("plugin"):
sublinks = [link]
else:
jen_link = JenItem(link)
sublinks = jen_link.getAll("sublink")
if not sublinks:
sublinks = [jen_link]
links = []
message = get_link_message()
if selected_link is None:
default_link = ADDON.getSetting("default_link")
else:
default_link = selected_link
link_dialog = ADDON.getSetting("use_link_dialog") == "true"
direct_links = False
for sublink in sublinks:
if link_dialog and "search" in sublink:
continue
if "searchsd" in sublink:
if default_link == "SD":
return sublink
label = 'SD'
if message['SD'] != '':
label += ' (%s)' % message['SD']
new_item = (label, sublink)
elif "search" in sublink:
if default_link == "HD":
return sublink
label = 'HD'
if message['HD'] != '':
label += ' (%s)' % message['HD']
new_item = (label, sublink)
else:
direct_links = True
match = re.findall("(.*?)\((.*?)\)", sublink)
if match:
new_item = ('%s' % match[0][1], match[0][0])
else:
new_item = ('Link %s' % (int(sublinks.index(sublink)) + 1),
sublink)
links.append(new_item)
if link_dialog and (not direct_links or len(sublinks) > 1):
links.append(("Search", "search"))
if len(links) == 1:
url = links[0][1]
return url
select = xbmcgui.Dialog().select(name, [i[0] for i in links])
if select == -1:
return False
else:
url = links[select][1]
return url | 1,941 |
def picp_loss(target, predictions, total = True):
"""
Calculate 1 - PICP (see eval_metrics.picp for more details)
Parameters
----------
target : torch.Tensor
The true values of the target variable
predictions : list
- predictions[0] = y_pred_upper, predicted upper limit of the target variable (torch.Tensor)
- predictions[1] = y_pred_lower, predicted lower limit of the target variable (torch.Tensor)
total : bool, default = True
- When total is set to True, return a scalar value for 1- PICP
- When total is set to False, return 1-PICP along the horizon
Returns
-------
torch.Tensor
Returns 1-PICP, either as a scalar or over the horizon
"""
return 1-picp(target, predictions, total) | 1,942 |
def find_files_to_upload(upload_dir):
"""
Find the files which are named correctly and have a .asc file
"""
files = []
for name in os.listdir(upload_dir):
asc_file = os.path.join(upload_dir, "{}.asc".format(name))
if valid_format(name) and os.path.isfile(asc_file):
files.extend([name, "{}.asc".format(name)])
return files | 1,943 |
def index_wrap(data, index):
"""
Description: Select an index from an array data
:param data: array data
:param index: index (e.g. 1,2,3, account_data,..)
:return: Data inside the position index
"""
return data[index] | 1,944 |
def image_api_request(identifier, **kwargs):
"""
TODO: A IIIF Image API request; redirect to Wikimedia image URI
""" | 1,945 |
def import_operand_definition(
defdict, yaml, key, base_module,
regs, force=False
):
"""
:param defdict:
:param yaml:
:param key:
:param base_module:
:param regs:
"""
try:
entry = defdict[key]
except KeyError:
raise MicroprobeArchitectureDefinitionError(
"'%s' key in %s "
"file missing or not defined "
"correctly." % (key, yaml)
)
filenames = [yaml] + entry["YAML"]
cache_filename = cache_file("%s.Operand" % (yaml))
result = update_cache_needed(filenames, cachefile=cache_filename)
result = result or force
entry["YAML_inherits"] = entry.get("YAML_inherits", [])
if not result:
LOG.debug("Reading cache contents for Operand")
try:
return read_cache_data(cache_filename), result
except ImportError:
LOG.exception("Unable to read cache contents for Operand")
except MicroprobeCacheError:
LOG.debug("Cache error when reading cache contents for Operand")
try:
data = base_module.import_definition(
entry["YAML"], entry["YAML_inherits"], regs
)
except KeyError:
raise MicroprobeArchitectureDefinitionError(
"'%s' key in %s "
"file missing or not defined "
"correctly." % (key, yaml)
)
try:
write_cache_data(cache_filename, data)
except MicroprobeCacheError:
LOG.debug("Cache error when writing cache contents for Operand")
return data, result | 1,946 |
def mzml_to_pandas_df(filename):
"""
Reads mzML file and returns a pandas.DataFrame.
"""
cols = ["retentionTime", "m/z array", "intensity array"]
slices = []
file = mzml.MzML(filename)
while True:
try:
data = file.next()
data["retentionTime"] = data["scanList"]["scan"][0]["scan time"] / 60
del data["scanList"]
slices.append(pd.DataFrame(data))
except:
break
df = pd.concat(slices)[cols]
df_to_numeric(df)
return df | 1,947 |
def metadata_volumes(response: Response,
request: Request=Query(None, title=opasConfig.TITLE_REQUEST, description=opasConfig.DESCRIPTION_REQUEST),
sourcetype: str=Query(None, title=opasConfig.TITLE_SOURCETYPE, description=opasConfig.DESCRIPTION_PARAM_SOURCETYPE),
sourcecode: str=Query(None, title=opasConfig.TITLE_SOURCECODE, description=opasConfig.DESCRIPTION_SOURCECODE),
limit: int=Query(200, title=opasConfig.TITLE_LIMIT, description=opasConfig.DESCRIPTION_LIMIT),
offset: int=Query(0, title=opasConfig.TITLE_OFFSET, description=opasConfig.DESCRIPTION_OFFSET),
):
"""
## Function
<b>Return a list of volumes for a SourceCode (aka, PEPCode (e.g., IJP)) per the limit and offset parameters</b>
## Return Type
models.JournalInfoList
## Status
This endpoint is working.
## Sample Call
http://localhost:9100/v1/Metadata/Volumes/CPS/
## Notes
## Potential Errors
"""
ocd, session_info = opasAPISupportLib.get_session_info(request, response)
# Solr is case sensitive, make sure arg is upper
try:
source_code = sourcecode.upper()
except:
source_code = None
src_exists = ocd.get_sources(source_code=source_code)
if not src_exists[0] and source_code != "*" and source_code != "ZBK" and source_code is not None: # ZBK not in productbase table without booknum
response.status_code = httpCodes.HTTP_400_BAD_REQUEST
status_message = f"Failure: Bad SourceCode {source_code}"
ocd.record_session_endpoint(api_endpoint_id=opasCentralDBLib.API_METADATA_VOLUME_INDEX,
session_info=session_info,
params=request.url._url,
item_of_interest=f"{source_code}",
return_status_code=response.status_code,
status_message=status_message
)
raise HTTPException(
status_code=response.status_code,
detail=status_message
)
else:
try:
ret_val = opasAPISupportLib.metadata_get_volumes(source_code, source_type=sourcetype, req_url=request.url, limit=limit, offset=offset)
except Exception as e:
response.status_code = httpCodes.HTTP_400_BAD_REQUEST,
status_message = "Error: {}".format(e)
ocd.record_session_endpoint(api_endpoint_id=opasCentralDBLib.API_METADATA_VOLUME_INDEX,
session_info=session_info,
params=request.url._url,
item_of_interest=f"{source_code}",
return_status_code=response.status_code,
status_message=status_message
)
raise HTTPException(
status_code=response.status_code,
detail=status_message
)
else:
response.status_code = httpCodes.HTTP_200_OK
status_message = opasCentralDBLib.API_STATUS_SUCCESS
# 2020-07-23 No need to log success for these, can be excessive.
#ocd.record_session_endpoint(api_endpoint_id=opasCentralDBLib.API_METADATA_VOLUME_INDEX,
#session_info=session_info,
#params=request.url._url,
#item_of_interest=f"{source_code}",
#return_status_code=response.status_code,
#status_message=status_message
#)
return ret_val | 1,948 |
def do_step_right(pos: int, step: int, width: int) -> int:
"""Takes current position and do 3 steps to the
right. Be aware of overflow as the board limit
on the right is reached."""
new_pos = (pos + step) % width
return new_pos | 1,949 |
def unzipConfigFile():
"""
Unzip configuration export using ARCHIVE_SECRET generated above.
Parameters:
None - Local directory is scanned to find downloaded configuration archive
Returns:
None - Files are unzipped in local directory to a "configfiles" directory
"""
with console.status("Unzipping config files..."):
directory = os.listdir()
for file in directory:
if "Export_Configs" in file:
target_file = file
with pyzipper.AESZipFile(target_file, "r") as zip:
try:
zip.extractall(path="configfiles", pwd=bytes(ARCHIVE_SECRET, "utf-8"))
except RuntimeError:
console.print(
"[red][bold]Failed to unzip files. Please make sure any old config archives are deleted prior to running this script"
)
sys.exit(1)
console.print("[green][bold]Unzip Complete!") | 1,950 |
def _initialise_template(workspace: Workspace, path: Path, name: str, type: str = None, template: str = None):
"""Initialise the project directory."""
if template:
workspace.templates.create(
template, path=path, name=name
)
else:
if not type:
raise WorkspaceCLIError("<e>Must specify at least one of <b>--type</b> and <b>--template</b> options.</e>")
adapter = get_adapter(type)
try:
adapter.new(path)
except NotImplementedError:
raise WorkspaceCLIError(
f"""<e>Type <b>{type}</b> does not have a default template.</e>
Please provide one, e.g.
<a>workspace new {path} --type {type} --template TEMPLATE</a>
"""
) | 1,951 |
def log_agm(x, prec):
"""
Fixed-point computation of -log(x) = log(1/x), suitable
for large precision. It is required that 0 < x < 1. The
algorithm used is the Sasaki-Kanada formula
-log(x) = pi/agm(theta2(x)^2,theta3(x)^2). [1]
For faster convergence in the theta functions, x should
be chosen closer to 0.
Guard bits must be added by the caller.
HYPOTHESIS: if x = 2^(-n), n bits need to be added to
account for the truncation to a fixed-point number,
and this is the only significant cancellation error.
The number of bits lost to roundoff is small and can be
considered constant.
[1] Richard P. Brent, "Fast Algorithms for High-Precision
Computation of Elementary Functions (extended abstract)",
http://wwwmaths.anu.edu.au/~brent/pd/RNC7-Brent.pdf
"""
x2 = (x*x) >> prec
# Compute jtheta2(x)**2
s = a = b = x2
while a:
b = (b*x2) >> prec
a = (a*b) >> prec
s += a
s += (MPZ_ONE<<prec)
s = (s*s)>>(prec-2)
s = (s*isqrt_fast(x<<prec))>>prec
# Compute jtheta3(x)**2
t = a = b = x
while a:
b = (b*x2) >> prec
a = (a*b) >> prec
t += a
t = (MPZ_ONE<<prec) + (t<<1)
t = (t*t)>>prec
# Final formula
p = agm_fixed(s, t, prec)
return (pi_fixed(prec) << prec) // p | 1,952 |
def score_model(model):
"""
Fits a model using the training set, predicts using the test set, and then calculates
and reports goodness of fit metrics and alpha.
"""
model.fit(Xtrain, ytrain)
yhat = model.predict(Xtest)
r2 = r2_score(ytest, yhat)
me = mse(ytest, yhat)
ae = mae(ytest, yhat)
mede = medae(ytest, yhat)
ev = evs(ytest, yhat)
print("Results from {}: \nr2={:0.3f} \nMSE={:0.3f} \nMAE={:0.3f} \nMEDAE={:0.3f} \nEVS={:0.3f} \nalpha={:0.3f}".format(model, r2, me, ae, mede, ev, model.alpha_)) | 1,953 |
def addDataset( parent, dataset ):
"""
Convert HDF5 dataset into text
:param parent: xml element corresponding to the dataset
:param dataset: HDF5 dataset to be converted
"""
if str(dataset.dtype).startswith("|S"):
parent.text = dataset[()].decode("utf8")
else:
parent.text = ' '.join( map( PQU.toShortestString, dataset[()].flatten() ) ) | 1,954 |
def simplify_name(name):
"""Converts the `name` to lower-case ASCII for fuzzy comparisons."""
return unicodedata.normalize('NFKD',
name.lower()).encode('ascii', 'ignore') | 1,955 |
async def parse_regex(opsdroid, skills, message):
"""Parse a message against all regex skills."""
matched_skills = []
for skill in skills:
for matcher in skill.matchers:
if "regex" in matcher:
opts = matcher["regex"]
matched_regex = await match_regex(message.text, opts)
if matched_regex:
message.regex = matched_regex
for regroup, value in matched_regex.groupdict().items():
message.update_entity(regroup, value, None)
matched_skills.append(
{
"score": await calculate_score(
opts["expression"], opts["score_factor"]
),
"skill": skill,
"config": skill.config,
"message": message,
}
)
return matched_skills | 1,956 |
def interpolate_minusones(y):
"""
Replace -1 in the array by the interpolation between their neighbor non zeros points
y is a [t] x [n] array
"""
x = np.arange(y.shape[0])
ynew = np.zeros(y.shape)
for ni in range(y.shape[1]):
idx = np.where(y[:,ni] != -1)[0]
if len(idx)>1:
last_value = y[idx[-1],ni]
interp = interp1d(x[idx],y[idx,ni], kind='previous',fill_value=(0,last_value),bounds_error = False)
ynew[:,ni] = interp(x)
elif len(idx) == 1:
last_value = y[idx[-1],ni]
ynew[:,ni] = last_value
return ynew | 1,957 |
def write_read(writer, reader, input_mesh, atol):
"""Write and read a file, and make sure the data is the same as before.
"""
with tempfile.TemporaryDirectory() as temp_dir:
filepath = os.path.join(temp_dir, "test.dat")
writer(filepath, input_mesh)
mesh = reader(filepath)
# Make sure the output is writeable
assert mesh.points.flags["WRITEABLE"]
for cell_type, data in input_mesh.cells.items():
assert mesh.cells[cell_type].flags["WRITEABLE"]
# Numpy's array_equal is too strict here, cf.
# <https://mail.scipy.org/pipermail/numpy-discussion/2015-December/074410.html>.
# Use allclose.
# We cannot compare the exact rows here since the order of the points might
# have changes. Just compare the sums
assert numpy.allclose(input_mesh.points, mesh.points, atol=atol, rtol=0.0)
for cell_type, data in input_mesh.cells.items():
assert numpy.allclose(data, mesh.cells[cell_type])
for key in input_mesh.point_data.keys():
assert numpy.allclose(
input_mesh.point_data[key], mesh.point_data[key], atol=atol, rtol=0.0
)
for cell_type, cell_type_data in input_mesh.cell_data.items():
for key, data in cell_type_data.items():
assert numpy.allclose(
data, mesh.cell_data[cell_type][key], atol=atol, rtol=0.0
)
for name, data in input_mesh.field_data.items():
assert numpy.allclose(data, mesh.field_data[name], atol=atol, rtol=0.0)
return | 1,958 |
def logged_batches(specs: Iterable[ht.JobSpec],
limit: int) -> Iterable[Iterable[ht.JobSpec]]:
"""Accepts an iterable of specs and a 'chunk limit'; returns an iterable of
iterable of JobSpec, each of which is guaranteed to contain at most 'chunk
limit' items.
The subsequences don't pull contiguous chunks off of the original input
sequence, but the set of the union of the subsequences is the set of all
original items.
As you realize the generator you'll trigger:
- a logging side-effect at the beginning of each batch
- a logging effect between each item in each batch
These logging effects will track the index of each batch and each item within
the batch.
"""
# Realize the input generator to get a count for logging.
spec_list = list(specs)
total_specs = len(spec_list)
# Build N chunks such that each chunk contains <= items than the supplied
# limit.
chunked_seq = u.chunks_below_limit(spec_list, limit=limit)
total_chunks = len(chunked_seq)
# Go the extra mile.
plural_batch = "batch" if total_chunks == 1 else "batches"
plural_job = "job" if total_specs == 1 else "jobs"
logging.info("Generating {} {} for {} {}.".format(total_chunks, plural_batch,
total_specs, plural_job))
for i, chunk in enumerate(chunked_seq, 1):
logging.info("Batch {} of {}:".format(i, total_chunks))
yield logged_specs(chunk) | 1,959 |
def precision(y, yhat, positive=True):
"""Returns the precision (higher is better).
:param y: true function values
:param yhat: predicted function values
:param positive: the positive label
:returns: number of true positive predictions / number of positive predictions
"""
table = contingency_table(y, yhat, positive)
return _precision(table) | 1,960 |
def add_posibility_for_red_cross(svg):
"""add a symbol which represents a red cross in a white circle
Arguments:
svg {Svg} -- root element
"""
symbol = Svg(etree.SubElement(svg.root,
'symbol',
{'id': 'red_cross',
'view_box': '0 0 20 20'
}))
symbol.create_circle(
[10, 10],
9,
"red_cross_circle",
fill_colour="white",
additional_arguments={'stroke': 'black'}
)
symbol.create_rectangle(
[4, 8], [12, 4], "red_cross_rect_1", fill_colour="red")
symbol.create_rectangle(
[8, 4], [4, 12], "red_cross_rect_2", fill_colour="red")
return symbol | 1,961 |
def main():
"""
Main entry point for script.
"""
if len(argv) > 1 and argv[1].lstrip("-").startswith("c"):
print(_get_script(), end="")
return
if not _is_clean():
print("Uncommitted changes in working directory. Stopping.")
exit(1)
if len(argv) > 1 and argv[1].lstrip("-").startswith("t"):
site_url = SCRIPT_TEST
else:
site_url = SCRIPT_SITE
print("Uploading script to [[{page}]] on {site}...".format(
page=SCRIPT_PAGE, site=site_url))
script = _get_script()
site = _get_site(site_url)
page = site.get_page(SCRIPT_PAGE)
summary = EDIT_SUMMARY.format(version=_get_version())
page.edit(script, summary, minor=False, bot=False)
params = {
"title": page.title.replace(" ", "_"),
"oldid": "prev",
"diff": "cur"
}
print("Done!")
print(site.url + "/w/index.php?" + urlencode(params)) | 1,962 |
def circular_abstractions(graph, settings):
"""doc goes here"""
for pattern, count in abstractions.circular_abstractions(graph, settings):
print "<LIBRARY CIRCLE:> chain:", pattern, count | 1,963 |
def exceptions2exit(exception_list):
"""
Decorator to convert given exceptions to exit messages
This avoids displaying nasty stack traces to end-users
:param exception_list: list of exceptions to convert
"""
def exceptions2exit_decorator(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except tuple(exception_list) as e:
print("ERROR: {}".format(e))
sys.exit(1)
return func_wrapper
return exceptions2exit_decorator | 1,964 |
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testDummy"
],
"zzcomponent":
[ "testDummy"
],
"integration":
[ "testLoad"
, "testStartStop"
, "testVolume"
],
"pending":
[ "testDummy"
]
}
return TestUtils.getTestSuite(testITunes, testdict, select=select) | 1,965 |
def dasopw(fname):
"""
Open a DAS file for writing.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dasopw_c.html
:param fname: Name of a DAS file to be opened.
:type fname: str
:return: Handle assigned to the opened DAS file.
"""
fname = stypes.stringToCharP(fname)
handle = ctypes.c_int(0)
libspice.dasopw_c(fname, ctypes.byref(handle))
return handle.value | 1,966 |
def is_ncname(value):
"""
BNode identifiers must be valid NCNames.
From the `W3C RDF Syntax doc <http://www.w3.org/TR/REC-rdf-syntax/#section-blank-nodeid-event>`_
"The value is a function of the value of the ``identifier`` accessor.
The string value begins with "_:" and the entire value MUST match
the `N-Triples nodeID <http://www.w3.org/TR/2004/REC-rdf-testcases-20040210/#nodeID>`_ production".
The nodeID production is specified to be a `name <http://www.w3.org/TR/2004/REC-rdf-testcases-20040210/#name>`_
name ::= [A-Za-z][A-Za-z0-9]*
>>> assert is_ncname('') == False
>>> assert is_ncname('999') == False
>>> assert is_ncname('x') == True
>>> assert is_ncname(u'x') == True
>>> assert is_ncname(u'Michèle') == True
However, vanilla uuid4s are not necessarily NCNames:
>>> assert is_ncname('6fa459ea-ee8a-3ca4-894e-db77e160355e') == False
So this has to be finessed with an appropriate prefix ...
>>> assert is_ncname("urn:uuid:"+str(uuid4())) == True
>>> from rdflib import BNode
>>> assert is_ncname(BNode(_sn_gen=bnode_uuid, _prefix="urn:uuid:")) == True
"""
ncnameexp = re.compile("[A-Za-z][A-Za-z0-9]*")
if ncnameexp.match(value):
return True
else:
return False | 1,967 |
def test_metric_type_completeness():
"""
Test that the value map is complete above 243, our arbitrary cutoff from Apr. 14th.
"""
vals = metric_types_map.values()
complete_min = 243
complete_max = max(vals)
complete_range = range(complete_min, complete_max + 1)
# All values in the complete range are in vals.
assert set(complete_range) - set(vals) == set() | 1,968 |
def health_func() -> Dict[str, str]:
"""Give the user the API health."""
return "ok" | 1,969 |
def queue_worker(decoy: Decoy) -> QueueWorker:
"""Get a mock QueueWorker."""
return decoy.mock(cls=QueueWorker) | 1,970 |
def temp_hdf_handler():
"""
Generate temporary empty hdf file and return its handler. Delete file after test is done.
Returns
-------
"""
import tempfile
import os
import time
path = tempfile.mkdtemp()
fp = os.path.join(path,f'{time.time()}.hdf5')
with Hdf5io(fp,lockfile_path=tempfile.gettempdir()) as handler:
yield handler
os.remove(fp)
if not os.listdir(path):
os.rmdir(path) | 1,971 |
def imports_of_package(
package, module_dotpath_filt=None, imported_module_dotpath_filt=None, depth=None
):
"""Generates (module_dotpath, imported_module_dotpaths) pairs from a package, recursively.
:param package: Module, file, folder, or dotpath of package to root the generation from
:param module_dotpath_filt: Filter function for module dotpaths
:param imported_module_dotpath_filt: Filter function for imported module dotpaths
:param depth: How deep the recursion should be
:return: A generator of (module_dotpath, imported_module_dotpaths) pairs
>>> import unbox
>>> for module_dotpath, imported_module_dotpaths in imports_of_package(
... unbox,
... module_dotpath_filt = lambda x: '__init__' not in x,
... depth=1):
... print(f"{module_dotpath}: {sorted(imported_module_dotpaths)[:3]}"
... ) # doctest: +SKIP
_acquire_builtin_names: ['bs4', 'contextlib', 'dol.filesys']
missing_install_names: ['collections', 'config2py', 'json']
recipes: ['dol', 'dol.filesys', 'importlib']
base: ['builtins', 'collections', 'contextlib']
"""
if isinstance(package, str):
if os.path.exists(package):
rootdir = package
else:
package = importlib.import_module(package)
rootdir = os.path.dirname(package.__file__)
elif isinstance(package, ModuleType):
rootdir = os.path.dirname(package.__file__)
else:
raise TypeError(f"Couldn't resolve {package}")
s = ModuleStrings(rootdir, max_levels=depth)
for module_dotpath, imported_module_dotpaths in s.items():
if (module_dotpath_filt or (lambda x: True))(module_dotpath):
imported_module_dotpaths = sorted(
filter(
imported_module_dotpath_filt,
set(imports_for(imported_module_dotpaths)),
)
)
yield module_dotpath, imported_module_dotpaths | 1,972 |
def astra_fp_3d(volume, proj_geom):
"""
:param proj_geom:
:param volume:
:return:3D sinogram
"""
detector_size = volume.shape[1]
slices_number = volume.shape[0]
rec_size = detector_size
vol_geom = build_volume_geometry_3d(rec_size, slices_number)
sinogram_id = astra.data3d.create('-sino', proj_geom)
# Create a data object for the reconstruction
rec_id = astra.data3d.create('-vol', vol_geom, data=volume)
# Set up the parameters for a reconstruction algorithm using the GPU
cfg = astra.astra_dict('FP3D_CUDA')
cfg['VolumeDataId'] = rec_id
cfg['ProjectionDataId'] = sinogram_id
cfg['option'] = {}
alg_id = astra.algorithm.create(cfg)
astra.algorithm.run(alg_id, 1)
res_sino = astra.data3d.get(sinogram_id)
# Clean up. Note that GPU memory is tied up in the algorithm object,
# and main RAM in the data objects.
astra.algorithm.delete(alg_id)
astra.data3d.delete(rec_id)
astra.data3d.delete(sinogram_id)
astra.clear()
return res_sino | 1,973 |
def handle_article_file(file_path, storage_config):
""" Processed a file by extracting a ticker(s) and then moving it to an appropriate dir """
base_name = os.path.basename(file_path)
with open(file_path, 'rw') as f:
j = json.loads(f.read())
if "an error has occured" in j['title'].lower():
copyfile(file_path, os.path.join(storage_config.bad_path, base_name))
return
j['tickers'] = find_stock_tickers(j['text'])
path = storage_config.other_path if len(j['tickers']) == 0 else storage_config.stock_path
with open(os.path.join(path, base_name), 'w') as fn:
fn.write(json.dumps(j)) | 1,974 |
def get_wf_neb_from_images(
parent,
images,
user_incar_settings,
additional_spec=None,
user_kpoints_settings=None,
additional_cust_args=None,
):
"""
Get a CI-NEB workflow from given images.
Workflow: NEB_1 -- NEB_2 - ... - NEB_n
Args:
parent (Structure): parent structure.
images ([Structure]): All images and two endpoints.
user_incar_settings([dict]): Additional user_incar_settings. Note that the order of the
list is set as: "parent", "ep_relax", "neb1", "neb2" etc., which contains
at least three elements. The first dict is for parent structure relaxation,
the second dict is for endpoints relaxation, and the rest are for NEB
calculations. For example, [{}, {}, {"IOPT": 7}, {"IOPT": 1}]. Besides,
user_incar_settings is used to determine how many NEB rounds will be. Default
is [{}, {}, {}].
additional_spec (dict): User spec settings to overwrite default_spec.
user_kpoints_settings ([dict]): Additional user_kpoints_settings, which contains at at
least three elements, which is similar to user_incar_settings. For example,
[{}, {}, {"grid_density": 100}] for the workflow from the parent structure
relaxation, then the endpoint relaxation followed by one-round NEB simulation.
Default values depend on the selected VaspInputSet.
additional_cust_args ([dict]): Optional parameters for RunVaspCustodian, same structure
with user_incar_settings and user_kpoints_settings.
Returns:
Workflow
"""
spec = _update_spec(additional_spec)
spec["parent"] = parent.as_dict()
assert isinstance(images, list) and len(images) >= 3
spec["neb"] = [[s.as_dict() for s in images]]
spec["_queueadapter"] = {
"nnodes": str(len(images) - 2),
"nodes": str(len(images) - 2),
}
if spec["neb_walltime"] is not None:
spec["_queueadapter"].update({"walltime": spec.get("neb_walltime")})
wf_name = spec["wf_name"]
# Assume one round NEB if user_incar_settings not provided.
user_incar_settings = user_incar_settings or [{}, {}, {}]
neb_round = len(user_incar_settings[2:])
user_kpoints_settings = user_kpoints_settings or [{"grid_density": 1000}] * (
neb_round + 2
)
additional_cust_args = additional_cust_args or [{}] * (neb_round + 2)
fws = []
# Get neb fireworks.
for n in range(neb_round):
fw = NEBFW(
spec=spec,
neb_label=str(n + 1),
from_images=True,
user_incar_settings=user_incar_settings[n + 2],
user_kpoints_settings=user_kpoints_settings[n + 2],
additional_cust_args=additional_cust_args[n + 2],
)
fws.append(fw)
# Build fireworks link
links_dict = {}
if neb_round >= 2:
for i in range(neb_round - 1):
links_dict[fws[i]] = [fws[i + 1]]
workflow = Workflow(fws, name=wf_name, links_dict=links_dict)
return workflow | 1,975 |
def categorize_folder_items(folder_items):
"""
Categorize submission items into three lists: CDM, PII, UNKNOWN
:param folder_items: list of filenames in a submission folder (name of folder excluded)
:return: a tuple with three separate lists - (cdm files, pii files, unknown files)
"""
found_cdm_files = []
unknown_files = []
found_pii_files = []
for item in folder_items:
if _is_cdm_file(item):
found_cdm_files.append(item)
elif _is_pii_file(item):
found_pii_files.append(item)
else:
if not (_is_known_file(item) or _is_string_excluded_file(item)):
unknown_files.append(item)
return found_cdm_files, found_pii_files, unknown_files | 1,976 |
def _broadcast_all(indexArrays, cshape):
"""returns a list of views of 'indexArrays' broadcast to shape 'cshape'"""
result = []
for i in indexArrays:
if isinstance(i, NDArray) and i._strides is not None:
result.append(_broadcast(i, cshape))
else:
result.append(i)
return tuple(result) | 1,977 |
def _none_tozero_array(inarray, refarray):
"""Repair an array which is None with one which is not
by just buiding zeros
Attributes
inarray: numpy array
refarray: numpy array
"""
if inarray is None:
if _check_ifarrays([refarray]):
inarray = np.zeros_like(refarray)
else:
if not _check_ifarrays([inarray]):
inarray = None
return inarray | 1,978 |
def dpuGetExceptionMode():
"""
Get the exception handling mode for runtime N2Cube
Returns: Current exception handing mode for N2Cube APIs.
Available values include:
- N2CUBE_EXCEPTION_MODE_PRINT_AND_EXIT
- N2CUBE_EXCEPTION_MODE_RET_ERR_CODE
"""
return pyc_libn2cube.pyc_dpuGetExceptionMode() | 1,979 |
def clean_links(links, category):
"""
clean up query fields for display as category buttons to browse by
:param links: list of query outputs
:param category: category of search from route
:return: list of cleansed links
"""
cleansedlinks = []
for item in links:
# remove blanks
if item == "" or item == "-":
continue
else:
#crop chromosome location output to eg 13p (check if substrate)
if category[:3] == 'Sub':
item = re.search("[\d|X|Y]+[pq]", item).group(0)
# remove forward slashes
item = item.replace("/", "&F&")
if item not in cleansedlinks:
cleansedlinks.append(item)
# sort the links
cleansedlinks.sort()
return cleansedlinks | 1,980 |
def doAllSol(inst,sol):
"""writes out a csv file for all attributes
of a given instrument sol and insturment
Returns: El,Stereo,SeqID,Sclk,Frame,Mission,
Sol,RMC,OOV,Inst,LocType,DataProduct,QUAT,
ObsType,Eas,Az,Elev,Method,Nor,Cpnt"""
print "Entering doAllSol.BulkInstLoc.InstLocUber.py "
flist = findFiles(inst,sol)
if len(flist) == 0:
print "No data products for Sol: " + str(sol) + ", Skipping to Next Sol"
else:
filename = sol + '_' + inst +'.csv'
print "- - - - - START OF DOALLSOL.LOOP: Here is filename from doAllSol.InsLocUber.py: ",filename
header = dict.keys(LocArray)
i = 0
print "- - - - - START OF DOALLSOL.LOOP: HERE IS THE HEADER AND I: ", header," AND: ",i
with open(filename,'a') as f:
for files in flist:
apid = parseVicarLabel.getApIdName(files) #make this more efficient
print "File: " + files + ", has APID: " + apid
if apid == "McamLRecoveredProduct":
print "APID_NAME McamLRecoveredProduct is invalid."
elif apid == "McamRRecoveredProduct":
print "APID_NAME McamRRecoveredProduct is invalid."
elif apid == "McamLRecoveredThumbnail":
print "APID_NAME McamLRecoveredThumbnail is invalid."
elif apid == "McamRRecoveredThumbnail":
print "APID_NAME McamRRecoveredThumbnail is invalid."
elif apid == "RADSendData":
print "APID_NAME RADSendData is invalid."
elif apid == "MrdiRecoveredProduct":
print "APID_NAME MrdiRecoveredProduct is invalid."
else:
# continue
print "filess: ",files
loc = InstLoc.InstLocDB(files)
#print "Here is files from doAllSol.InstLocUber.py: ",files
print "- - - - - DOALLSOL.LOOP: Here is files from doAllSol.InstLocUber.py: ",files," AND HERE IS LOC: ",loc
w = csv.DictWriter(f, loc.keys())
print "- - - - - DOALLSOL.LOOP: HERE IS W: ",w," WHICH IS CSV.DICTWRITER USING F: ",f," AND LOC.KEYS"
while i < 1:
w.writeheader()
i += 1
print "- - - - - DOALLSOL.LOOP: HERE IS I: ",i
w = csv.DictWriter(f, loc.keys())
w.writerow(loc)
print "Leaving doAllSol.InstLocUber.py with this w variable: ",w | 1,981 |
def set_log_level(log_level: str = "None") -> None:
"""Convenience function to set up logging.
Args:
log_level (str): Can be one of None, Critical, Error, Warn, Info, Debug.
"""
configure_logging(log_level) | 1,982 |
def as_bool(value: Any, schema: Optional[BooleanType] = None) -> bool:
"""Parses value as boolean"""
schema = schema or BooleanType()
value = value.decode() if isinstance(value, bytes) else value
if value:
value = str(value).lower()
value = BOOLEANS.get(value)
validation.validate(schema.as_dict(), value)
return value | 1,983 |
def get_raw_feature(
column: Text, value: slicer_lib.FeatureValueType,
boundaries: Dict[Text, List[float]]
) -> Tuple[Text, slicer_lib.FeatureValueType]:
"""Get raw feature name and value.
Args:
column: Raw or transformed column name.
value: Raw or transformed column value.
boundaries: Dictionary containing quantile boundaries of features keyed by
column name.
Returns:
Tuple of raw column name and raw column value.
"""
if column.startswith(auto_slice_key_extractor.TRANSFORMED_FEATURE_PREFIX):
raw_feature = column[len(auto_slice_key_extractor.TRANSFORMED_FEATURE_PREFIX
):]
(start, end) = auto_slice_key_extractor.get_bucket_boundary(
value, boundaries[raw_feature])
return (raw_feature, _format_boundary(start, end))
return (column, value) | 1,984 |
def ndmi(nir: Union[xr.DataArray, np.ndarray, float, int],
swir1: Union[xr.DataArray, np.ndarray, float, int]) -> \
Union[xr.DataArray, np.ndarray, float, int]:
"""
Normalized difference moisture index.
Sentinel-2: B8A, B11
Parameters
----------
nir : xr.DataArray or np.ndarray or float or int
Near infrared band acquisition.
swir1 : xr.DataArray or np.ndarray or float or int
Short wave infrared band acquisition.
Returns
-------
same as input:
Normalised difference moisture index.
"""
return utils.normalized_difference(nir, swir1) | 1,985 |
def _log_failed_job(resource_group, job):
"""Logs information about failed job
:param str resource_group: resource group name
:param models.Job job: failed job.
"""
logger.warning('The job "%s" in resource group "%s" failed.', job.name, resource_group)
info = job.execution_info # type: models.JobPropertiesExecutionInfo
if info:
logger.warning('Job failed with exit code %d at %s; execution took %s', info.exit_code,
str(info.end_time), str(info.end_time - info.start_time))
errors = info.errors
if errors:
for e in errors:
details = '<none>'
if e.details:
details = '\n' + '\n'.join(['{0}: {1}'.format(d.name, d.value) for d in e.details])
logger.warning('Error message: %s\nDetails:\n %s', e.message, details)
sys.exit(info.exit_code)
logger.warning('Failed job has no execution info') | 1,986 |
def coerce_kw_type(kw, key, type_, flexi_bool=True):
"""If 'key' is present in dict 'kw', coerce its value to type 'type\_' if
necessary. If 'flexi_bool' is True, the string '0' is considered false
when coercing to boolean.
"""
if key in kw and type(kw[key]) is not type_ and kw[key] is not None:
if type_ is bool and flexi_bool:
kw[key] = asbool(kw[key])
else:
kw[key] = type_(kw[key]) | 1,987 |
def _collect_data_and_enum_definitions(parsed_models: dict) -> dict[str, dict]:
"""
Collect all data and enum definitions that are referenced as interface messages or as a nested type within an interface message.
Args:
parsed_models: A dict containing models parsed from an AaC yaml file.
Returns:
A dict of data message type keys to data message parsed model values
"""
def collect_nested_types(interface_data_message_types: list[str]):
nested_types = []
for message_type in interface_data_message_types:
data_model = parsed_models[message_type]["data"]
for field in data_model.get("fields"):
field_type = field.get("type")
if field_type in parsed_models:
nested_types.append(field_type)
return list(set(nested_types))
def collect_behaviors(model_with_behaviors):
return util.search(model_with_behaviors, ["model", "behavior"])
def convert_behavior_io_to_data_type(behavior_io_model):
return behavior_io_model.get("type")
def collect_data_message_types(behavior_model):
inputs = behavior_model.get("input") or []
outputs = behavior_model.get("output") or []
return list(map(convert_behavior_io_to_data_type, inputs + outputs))
model_definitions = util.get_models_by_type(parsed_models, "model")
behaviors = list(flatten(map(collect_behaviors, model_definitions.values())))
interface_data_message_types = list(set(flatten(map(collect_data_message_types, behaviors))))
all_definitions_types_to_generate = interface_data_message_types + collect_nested_types(interface_data_message_types)
return {data_message_type: parsed_models[data_message_type] for data_message_type in all_definitions_types_to_generate} | 1,988 |
def _list_goals(context, message):
"""Show all installed goals."""
context.log.error(message)
# Execute as if the user had run "./pants goals".
return Phase.execute(context, 'goals') | 1,989 |
def values(df, varname):
"""Values and counts in index order.
df: DataFrame
varname: strign column name
returns: Series that maps from value to frequency
"""
return df[varname].value_counts().sort_index() | 1,990 |
def get_or_none(l, n):
"""Get value or return 'None'"""
try:
return l[n]
except (TypeError, IndexError):
return 'None' | 1,991 |
def pfam_clan_to_pdb(clan):
"""get a list of associated PDB ids for given pfam clan access key.
:param clan: pfam accession key of clan
:type clan: str
:return: List of associated PDB ids
:rettype:list"""
url='http://pfam.xfam.org/clan/'+clan+'/structures'
pattern='/structure/[A-Z, 0-9]{4}'
return _xfam_to(url,pattern) | 1,992 |
def fib(n):
"""Compute the nth Fibonacci number.
>>> fib(8)
21
"""
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n-2) + fib(n-1) | 1,993 |
def go_prefix(prefix):
"""go_prefix sets the Go import name to be used for this workspace."""
_go_prefix_rule(name = "go_prefix",
prefix = prefix,
visibility = ["//visibility:public" ]
) | 1,994 |
def _is_cache_dir_appropriate(cache_dir, cache_file):
"""
Determine if a directory is acceptable for building.
A directory is suitable if any of the following are true:
- it doesn't exist
- it is empty
- it contains an existing build cache
"""
if os.path.exists(cache_dir):
files = os.listdir(cache_dir)
if cache_file in files:
return True
return not bool(files)
return True | 1,995 |
def run_cmd(cmd, **kwargs):
"""Run a command using parameters kwargs."""
flags = [k for k, v in kwargs.items() if v is None]
kwargs = {k: v for k, v in kwargs.items() if v is not None}
run_cmd_str(dict_to_cmd(cmd, flags, **kwargs)) | 1,996 |
def executeCodeIn(code, namespace):
"""Execute the final translated Python code in the given namespace."""
exec(code, namespace) | 1,997 |
def score_tours_absolute(problems: List[N_TSP], tours: List[Union[int, NDArray]]) -> NDArray:
"""Calculate tour lengths for a batch of tours.
Args:
problems (List[N_TSP]): list of TSPs
tours (List[Union[int, NDArray]]): list of tours (in either index or segment format)
Returns:
NDArray: tour lengths
"""
result = np.ndarray((len(problems),), dtype=np.float)
for i, (p, t) in enumerate(zip(problems, tours)):
result[i] = p.score(t)
return result | 1,998 |
def has(pred: Pred, seq: Seq) -> bool:
"""
Return True if sequence has at least one item that satisfy the predicate.
"""
for x in seq:
if pred(x):
return True
return False | 1,999 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.