content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def mixlogistic_invcdf(y, *, logits, means, logscales, mix_dim,
tol=1e-8, max_bisection_iters=60, init_bounds_scale=100.):
"""
inverse cumulative distribution function of a mixture of logistics, via bisection
"""
if _FORCE_ACCURATE_INV_CDF:
tol = min(tol, 1e-14)
max_bisection_iters = max(max_bisection_iters, 200)
init_bounds_scale = max(init_bounds_scale, 100.)
return mixlogistic_invlogcdf(y.log(), logits=logits, means=means, logscales=logscales, mix_dim=mix_dim,
tol=tol, max_bisection_iters=max_bisection_iters, init_bounds_scale=init_bounds_scale) | 5,358,400 |
def FormatException(exc_info):
"""Gets information from exception info tuple.
Args:
exc_info: exception info tuple (type, value, traceback)
Returns:
exception description in a list - wsgi application response format.
"""
return [cgitb.handler(exc_info)] | 5,358,401 |
def cli(**cli_kwargs):
""" Run a model with a specific pre-transform for all tiles in a slide (tile_images)
\b
Inputs:
input_slide_image: slide image (virtual slide formats compatible with openslide, .svs, .tif, .scn, ...)
input_slide_tiles: slide tiles (manifest tile files, .tiles.csv)
\b
Outputs:
slide_tiles
\b
Example:
run_tissue_detection 10001.svs 10001/tiles
-rmg 0.5 -nc 8
-rq 'otsu_score > 0.1 or stain0_score > 0.1'
-o 10001/filtered_tiles
"""
cli_runner( cli_kwargs, _params_, detect_tissue) | 5,358,402 |
def trackers_init(box, vid_path, image):
"""Initialize a single tracker"""
tracker = cv2.TrackerCSRT_create()
tracker.init(image, box)
return tracker, cv2.VideoCapture(vid_path) | 5,358,403 |
def parse_comment(comment: str, env: Env) -> None:
"""Parse RLE comments and update user environment accordingly.
Parameters
----------
comment: str
RLE comment to parse.
env: `dict` [str, `Any`]
Environment dictionary generated by ``parse_file``.
Notes
-----
This function parses comments according to information on LifeWiki. Check
the LifeWiki page on the RLE file-format for more information:
https://www.conwaylife.com/wiki/Run_Length_Encoded##_lines
"""
comment = comment[1:] # Remove hash
if comment.startswith("P") or comment.startswith("R"):
env["start"] = tuple([int(x)
for x in DIGIT_REG.findall(comment)][:2])
elif comment.startswith("r"):
env["rule"] = comment[2:] | 5,358,404 |
def get_image(id: Optional[int] = None,
name: Optional[str] = None,
slug: Optional[str] = None,
source: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetImageResult:
"""
Get information on an image for use in other resources (e.g. creating a Droplet
based on snapshot). This data source provides all of the image properties as
configured on your DigitalOcean account. This is useful if the image in question
is not managed by the provider or you need to utilize any of the image's data.
An error is triggered if zero or more than one result is returned by the query.
## Example Usage
Get the data about a snapshot:
```python
import pulumi
import pulumi_digitalocean as digitalocean
example1 = digitalocean.get_image(name="example-1.0.0")
```
Reuse the data about a snapshot to create a Droplet:
```python
import pulumi
import pulumi_digitalocean as digitalocean
example_image = digitalocean.get_image(name="example-1.0.0")
example_droplet = digitalocean.Droplet("exampleDroplet",
image=example_image.id,
region="nyc2",
size="s-1vcpu-1gb")
```
Get the data about an official image:
```python
import pulumi
import pulumi_digitalocean as digitalocean
example2 = digitalocean.get_image(slug="ubuntu-18-04-x64")
```
:param int id: The id of the image
:param str name: The name of the image.
:param str slug: The slug of the official image.
:param str source: Restrict the search to one of the following categories of images:
"""
__args__ = dict()
__args__['id'] = id
__args__['name'] = name
__args__['slug'] = slug
__args__['source'] = source
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('digitalocean:index/getImage:getImage', __args__, opts=opts, typ=GetImageResult).value
return AwaitableGetImageResult(
created=__ret__.created,
description=__ret__.description,
distribution=__ret__.distribution,
error_message=__ret__.error_message,
id=__ret__.id,
image=__ret__.image,
min_disk_size=__ret__.min_disk_size,
name=__ret__.name,
private=__ret__.private,
regions=__ret__.regions,
size_gigabytes=__ret__.size_gigabytes,
slug=__ret__.slug,
source=__ret__.source,
status=__ret__.status,
tags=__ret__.tags,
type=__ret__.type) | 5,358,405 |
def _infer_added_params(kw_params):
"""
Infer values for proplot's "added" parameters from stylesheets.
"""
kw_proplot = {}
mpl_to_proplot = {
'font.size': ('tick.labelsize',),
'axes.titlesize': (
'abc.size', 'suptitle.size', 'title.size',
'leftlabel.size', 'rightlabel.size',
'toplabel.size', 'bottomlabel.size',
),
'text.color': (
'abc.color', 'suptitle.color', 'tick.labelcolor', 'title.color',
'leftlabel.color', 'rightlabel.color',
'toplabel.color', 'bottomlabel.color',
),
}
for key, params in mpl_to_proplot.items():
if key in kw_params:
value = kw_params[key]
for param in params:
kw_proplot[param] = value
return kw_proplot | 5,358,406 |
def test_bot_extra_mode_args(mockbot, ircfactory, caplog):
"""Test warning on extraneous MODE args."""
irc = ircfactory(mockbot)
irc.bot._isupport = isupport.ISupport(chanmodes=("b", "k", "l", "mnt", tuple()))
irc.bot.modeparser.chanmodes = irc.bot.isupport.CHANMODES
irc.channel_joined("#test", ["Alex", "Bob", "Cheryl"])
mode_msg = ":Sopel!bot@bot MODE #test +m nonsense"
mockbot.on_message(mode_msg)
assert mockbot.channels["#test"].modes["m"]
assert "Too many arguments received for MODE" in caplog.text | 5,358,407 |
def create_table_description(config: ConfigLoader):
""" creates the description for the pytables table used for dataloading """
n_sample_values = int(config.SAMPLING_RATE * config.SAMPLE_DURATION)
table_description = {
COLUMN_MOUSE_ID: tables.Int16Col(),
COLUMN_LABEL: tables.StringCol(10)
}
for c in config.CHANNELS:
table_description[c] = tables.Float32Col(shape=n_sample_values)
return table_description | 5,358,408 |
def get_target_rank_list(daos_object):
"""Get a list of target ranks from a DAOS object.
Note:
The DaosObj function called is not part of the public API
Args:
daos_object (DaosObj): the object from which to get the list of targets
Raises:
DaosTestError: if there is an error obtaining the target list from the
object
Returns:
list: list of targets for the specified object
"""
try:
daos_object.get_layout()
return daos_object.tgt_rank_list
except DaosApiError as error:
raise DaosTestError(
"Error obtaining target list for the object: {}".format(error)) | 5,358,409 |
def find_global_best(particle_best=[]):
"""
Searches for the best particle best to make it the global best.
:param particle_best:
:return:
"""
best_found = None
for particle in particles_best:
if best_found is None:
best_found = copy(particle)
elif particle.total_cost < best_found.total_cost:
best_found = copy(particle)
print('\nBest found: ', best_found)
return best_found | 5,358,410 |
def test_should_invoke_main_nuki_nukis(monkeypatch, project_dir):
"""Should create a project and exit with 0 code on cli invocation."""
monkeypatch.setenv('PYTHONPATH', '.')
test_dir = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'..',
'fixtures/fake-repo-tmpl-nukis',
)
monkeypatch.chdir(test_dir)
output_dirs = ['fake-nuki-templated', 'fake-nuki2-templated']
for o in output_dirs:
if os.path.isdir(o):
shutil.rmtree(o)
exit_code = subprocess.check_call(
[sys.executable, '-m', 'cookiecutter.cli.cli_parser', '.', '--no-input']
)
assert exit_code == 0
assert os.path.isdir(project_dir)
for o in output_dirs:
if os.path.isdir(o):
shutil.rmtree(o) | 5,358,411 |
def extract_handler(args: argparse.Namespace) -> None:
"""
Save environment variables to file.
"""
result = env_string(map(parameter_to_env, get_parameters(args.path)), args.export)
with args.outfile as ofp:
print(result, file=ofp) | 5,358,412 |
def rename_to_monet_latlon(ds):
"""Short summary.
Parameters
----------
ds : type
Description of parameter `ds`.
Returns
-------
type
Description of returned object.
"""
if "lat" in ds.coords:
return ds.rename({"lat": "latitude", "lon": "longitude"})
elif "Latitude" in ds.coords:
return ds.rename({"Latitude": "latitude", "Longitude": "longitude"})
elif "Lat" in ds.coords:
return ds.rename({"Lat": "latitude", "Lon": "longitude"})
elif "grid_lat" in ds.coords:
return ds.rename({"grid_lat": "latitude", "grid_lon": "longitude"})
else:
return ds | 5,358,413 |
async def test_api_state_change_push(opp, mock_api_client):
"""Test if we can push a change the state of an entity."""
opp.states.async_set("test.test", "not_to_be_set")
events = []
@op.callback
def event_listener(event):
"""Track events."""
events.append(event)
opp.bus.async_listen(const.EVENT_STATE_CHANGED, event_listener)
await mock_api_client.post(
const.URL_API_STATES_ENTITY.format("test.test"), json={"state": "not_to_be_set"}
)
await opp.async_block_till_done()
assert len(events) == 0
await mock_api_client.post(
const.URL_API_STATES_ENTITY.format("test.test"),
json={"state": "not_to_be_set", "force_update": True},
)
await opp.async_block_till_done()
assert len(events) == 1 | 5,358,414 |
def payments_reset():
""" Removes all payments from the database """
Payment.remove_all()
return make_response('', status.HTTP_204_NO_CONTENT) | 5,358,415 |
def get_smallerI(x, i):
"""Return true if string x is smaller or equal to i. """
if len(x) <= i:
return True
else:
return False | 5,358,416 |
def _ParseSourceContext(remote_url, source_revision):
"""Parses the URL into a source context blob, if the URL is a git or GCP repo.
Args:
remote_url: The remote URL to parse.
source_revision: The current revision of the source directory.
Returns:
An ExtendedSourceContext suitable for JSON.
"""
# Assume it's a Git URL unless proven otherwise.
context = None
# Now try to interpret the input as a Cloud Repo URL, and change context
# accordingly if it looks like one. Assume any seemingly malformed URL is
# a valid Git URL, since the inputs to this function always come from Git.
#
# A cloud repo URL can take three forms:
# 1: https://<hostname>/id/<repo_id>
# 2: https://<hostname>/p/<project_id>
# 3: https://<hostname>/p/<project_id>/r/<repo_name>
#
# There are two repo ID types. The first type is the direct repo ID,
# <repo_id>, which uniquely identifies a repository. The second is the pair
# (<project_id>, <repo_name>) which also uniquely identifies a repository.
#
# Case 2 is equivalent to case 3 with <repo_name> defaulting to "default".
match = re.match(_CLOUD_REPO_PATTERN, remote_url)
if match:
# It looks like a GCP repo URL. Extract the repo ID blob from it.
id_type = match.group('id_type')
if id_type == 'id':
raw_repo_id = match.group('project_or_repo_id')
# A GCP URL with an ID can't have a repo specification. If it has
# one, it's either malformed or it's a Git URL from some other service.
if not match.group('repo_name'):
context = {
'cloudRepo': {
'repoId': {
'uid': raw_repo_id
},
'revisionId': source_revision}}
elif id_type == 'p':
# Treat it as a project name plus an optional repo name.
project_id = match.group('project_or_repo_id')
repo_name = match.group('repo_name') or 'default'
context = {
'cloudRepo': {
'repoId': {
'projectRepoId': {
'projectId': project_id,
'repoName': repo_name}},
'revisionId': source_revision}}
# else it doesn't look like a GCP URL
if not context:
context = {'git': {'url': remote_url, 'revisionId': source_revision}}
return ExtendContextDict(context) | 5,358,417 |
def decrypt_and_verify(message, sender_key, private_key):
"""
Decrypts and verifies a message using a sender's public key name
Looks for the sender's public key in the public_keys/ directory.
Looks for your private key as private_key/private.asc
The ASN.1 specification for a FinCrypt message resides in asn1spec.py
Raises exceptions if key files are not found, or are malformed.
:param message: Message to decrypt (bytes)
:param private_key: Decrypter's private key (file like object)
:param sender_key: Sender's public key (file like object)
:return: Tuple (decrypted message (bytes), whether the message was verified (boolean))
If message was unable to be decrypted, the tuple will be (None, False)
"""
try:
decryption_key = read_private_key(private_key.read())
except Exception:
raise FinCryptDecodingError('Private key file is malformed.')
try:
sender_key = read_public_key(sender_key.read())
except Exception:
raise FinCryptDecodingError('Sender key file is malformed.')
try:
rsc = reedsolomon.RSCodec(8)
message = bytes(rsc.decode(message)[0])
decoded, _ = decode_ber(message, asn1Spec=FinCryptMessage())
decoded = encode_native(decoded)
except Exception:
return None, False
try:
decrypted_message = decrypt_message(decryption_key['k'], decoded['key'], decoded['message'])
except Exception:
decrypted_message = None
try:
authenticated = authenticate_message(sender_key['kx'], sender_key['ky'], decrypted_message,
decoded['signature'])
except Exception:
authenticated = False
return decrypted_message, authenticated | 5,358,418 |
def add_obstacle(pos: list) -> None:
"""Adds obstacles"""
grid[pos[0]][pos[1]].passable = False | 5,358,419 |
def find_best_margin(args):
""" return `best_margin / 0.1` """
set_global_seeds(args['seed'])
dataset = DataLoader(args['dataset'], args)
X_train, X_test, X_val, y_train, y_test, y_val = dataset.prepare_train_test_val(args)
results = []
for margin in MARGINS:
model = Perceptron(feature_dim=X_train.shape[-1], margin=margin)
model.fit(X_train, y_train)
results.append(model.score(X_val, y_val))
return results | 5,358,420 |
def get_user_balances(userAddress):
"""
:param userAddress:
:return:
"""
try:
data = get_request_data(request) or {}
from_block = data.get("fromBlock", int(os.getenv("BFACTORY_BLOCK", 0)))
ocean = Ocean(ConfigProvider.get_config())
result = ocean.pool.get_user_balances(userAddress, from_block)
return Response(json.dumps(result), 200, content_type="application/json")
except Exception as e:
logger.error(f"pools/user/{userAddress}: {str(e)}")
return f"Get pool user balances failed: {str(e)}", 500 | 5,358,421 |
def unfold(raw_log_line):
"""Take a raw syslog line and unfold all the multiple levels of
newline-escaping that have been inflicted on it by various things.
Things that got python-repr()-ized, have '\n' sequences in them.
Syslog itself looks like it uses #012.
"""
lines = raw_log_line \
.replace('#012', '\n') \
.replace('\\n', '\n') \
.splitlines()
return lines | 5,358,422 |
def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels | 5,358,423 |
def test_puller_create_ok(started_puller):
"""
Create a PullerActor with a good configuration
"""
assert is_actor_alive(started_puller) | 5,358,424 |
def run(args):
"""Builds a training data set of physics model errors based on the
parameters supplied by the CLI.
:param args: The command line arguments
:type args: argparse.Namespace
"""
logger.info('Loading input DataFrame...')
input_df = pd.read_parquet(args.input_path)
logger.info('Predicting orbits...')
physics_pred_df = predict_orbits(input_df,
last_n_days=args.last_n_days,
n_pred_days=args.n_pred_days)
logger.info('Calculating physical model error...')
physics_pred_df = calc_physics_error(physics_pred_df)
logger.info('Serializing results...')
physics_pred_df.to_parquet(args.output_path) | 5,358,425 |
def _mcs_single(mol, mols, n_atms):
"""Get per-molecule MCS distance vector."""
dists_k = []
n_atm = float(mol.GetNumAtoms())
n_incomp = 0 # Number of searches terminated before timeout
for l in range(0, len(mols)):
# Set timeout to halt exhaustive search, which could take minutes
result = FindMCS([mol, mols[l]], completeRingsOnly=True,
ringMatchesRingOnly=True, timeout=10)
dists_k.append(1. - result.numAtoms /
((n_atm + n_atms[l]) / 2))
if result.canceled:
n_incomp += 1
return np.array(dists_k), n_incomp | 5,358,426 |
def new_transaction():
"""
新的交易
:return:
"""
values = request.get_json()
# 检查 POST 请求中的字段
required = ['sender', 'recipient', 'amount']
if not all(k in values for k in required):
return 'Missing values', 400
# 创建新的交易
index = blockchain.new_transaction(values['sender'], values['recipient'], values['amount'])
response = {'message': f'交易将会被添加到区块 {index}'}
return jsonify(response), 201 | 5,358,427 |
async def get_favicon():
"""Return favicon"""
return FileResponse(path="assets/kentik_favicon.ico", media_type="image/x-icon") | 5,358,428 |
def geocentric_rotation(sphi, cphi, slam, clam):
"""
This rotation matrix is given by the following quaternion operations
qrot(lam, [0,0,1]) * qrot(phi, [0,-1,0]) * [1,1,1,1]/2
or
qrot(pi/2 + lam, [0,0,1]) * qrot(-pi/2 + phi , [-1,0,0])
where
qrot(t,v) = [cos(t/2), sin(t/2)*v[1], sin(t/2)*v[2], sin(t/2)*v[3]]
"""
M = np.zeros(9)
# Local X axis (east) in geocentric coords
M[0] = -slam; M[3] = clam; M[6] = 0;
# Local Y axis (north) in geocentric coords
M[1] = -clam * sphi; M[4] = -slam * sphi; M[7] = cphi;
# Local Z axis (up) in geocentric coords
M[2] = clam * cphi; M[5] = slam * cphi; M[8] = sphi;
return M | 5,358,429 |
def get_price_sma(
ohlcv: DataFrame,
window: int = 50,
price_col: str = "close",
) -> Series:
"""
Price to SMA.
"""
return pd.Series(
ohlcv[price_col] / get_sma(ohlcv, window),
name="Price/SMA{}".format(window),
) | 5,358,430 |
def map_to_closest(multitrack, target_programs, match_len=True, drums_first=True):
"""
Keep closest tracks to the target_programs and map them to corresponding
programs in available in target_programs.
multitrack (pypianoroll.Multitrack): Track to normalize.
target_programs (list): List of available programs.
match_len (bool): If True set multitrack track length to length of target_programs.
(return only the len(target_programs) closest tracks in multitrack).
"""
new_multitrack = deepcopy(multitrack)
for track in new_multitrack.tracks:
min_dist = inf
for target in target_programs:
dist = abs(track.program - target)
if dist < min_dist:
min_dist = dist
track.program = target
track.min_dist = min_dist
if match_len:
length = len(target_programs)
new_multitrack.tracks.sort(key=lambda x: x.min_dist)
new_multitrack.tracks = new_multitrack.tracks[:length]
if drums_first:
new_multitrack.tracks.sort(key=lambda x: not x.is_drum)
return new_multitrack | 5,358,431 |
def train_naive(args, X_train, y_train, X_test, y_test, rng, logger=None):
"""
Compute the time it takes to delete a specified number of
samples from a naive model sequentially.
"""
# initial naive training time
model = get_naive(args)
start = time.time()
model = model.fit(X_train, y_train)
before_train_time = time.time() - start
logger.info('\n[{}] before train time: {:.3f}s'.format('naive', before_train_time))
# predictive performance of the naive model
auc, acc, ap = exp_util.performance(model, X_test, y_test, logger=logger, name='naive')
# naive train after deleting data
delete_indices = rng.choice(np.arange(X_train.shape[0]), size=args.n_delete, replace=False)
new_X_train = np.delete(X_train, delete_indices, axis=0)
new_y_train = np.delete(y_train, delete_indices)
# after training time
model = get_naive(args)
start = time.time()
model = model.fit(new_X_train, new_y_train)
after_train_time = time.time() - start
logger.info('[{}] after train time: {:.3f}s'.format('naive', after_train_time))
# interpolate sequential updates
total_time = ((before_train_time + after_train_time) / 2) * args.n_delete
initial_utility = auc, acc, ap
return total_time, initial_utility | 5,358,432 |
def helperFunction():
"""A helper function created to return a value to the test."""
value = 10 > 0
return value | 5,358,433 |
def app_config(app_config):
"""Get app config."""
app_config['RECORDS_FILES_REST_ENDPOINTS'] = {
'RECORDS_REST_ENDPOINTS': {
'recid': '/files'
}
}
app_config['FILES_REST_PERMISSION_FACTORY'] = allow_all
app_config['CELERY_ALWAYS_EAGER'] = True
return app_config | 5,358,434 |
def ts(timestamp_string: str):
"""
Convert a DataFrame show output-style timestamp string into a datetime value
which will marshall to a Hive/Spark TimestampType
:param timestamp_string: A timestamp string in "YYYY-MM-DD HH:MM:SS" format
:return: A datetime object
"""
return datetime.strptime(timestamp_string, '%Y-%m-%d %H:%M:%S') | 5,358,435 |
def align_to_screenshot(
text_block,
screenshot_path,
output_path=None,
show_text=True,
show_guide_lines=True,
show_bounding_boxes=False,
):
"""
Given a `eyekit.text.TextBlock` and the path to a PNG screenshot file,
produce an image showing the original screenshot overlaid with the text
block (shown in green). If no output path is provided, the output image is
written to the same directory as the screenshot file. This is useful for
establishing `eyekit.text.TextBlock` parameters (position, font size,
etc.) that match what participants actually saw in your experiment.
"""
_validate.is_TextBlock(text_block)
screenshot_path = _pathlib.Path(screenshot_path)
if not screenshot_path.exists():
raise ValueError(f"Screenshot file does not exist: {screenshot_path}")
if screenshot_path.suffix[1:].upper() != "PNG":
raise ValueError("Screenshot must be PNG file")
surface = _cairo.ImageSurface(_cairo.FORMAT_ARGB32, 1, 1).create_from_png(
str(screenshot_path)
)
context = _cairo.Context(surface)
screen_width = surface.get_width()
screen_height = surface.get_height()
context.set_source_rgb(0.60392, 0.80392, 0.19607)
context.set_font_face(text_block._font.toy_font_face)
context.set_font_size(text_block._font.size)
if show_guide_lines:
context.set_line_width(2)
context.move_to(text_block.position[0], 0)
context.line_to(text_block.position[0], screen_height)
context.stroke()
for line in text_block.lines():
if show_guide_lines:
context.move_to(0, line.baseline)
context.line_to(screen_width, line.baseline)
context.stroke()
context.set_dash([8, 4])
if show_text:
context.move_to(line._x_tl, line.baseline) # _x_tl is unpadded x_tl
context.show_text(line.text)
if show_bounding_boxes:
context.set_dash([])
for word in text_block.words():
context.rectangle(*word.box)
context.stroke()
if output_path is None:
output_path = screenshot_path.parent / f"{screenshot_path.stem}_eyekit.png"
else:
output_path = _pathlib.Path(output_path)
if not output_path.parent.exists():
raise ValueError(f"Output path does not exist: {output_path.parent}")
if output_path.suffix[1:].upper() != "PNG":
raise ValueError("Output must be PNG file")
surface.write_to_png(str(output_path)) | 5,358,436 |
def test_mock_given_message(uqcsbot: MockUQCSBot):
"""
Test !mock works for given text.
"""
uqcsbot.post_message(TEST_CHANNEL_ID, f'!mock {LONG_MESSAGE}')
messages = uqcsbot.test_messages.get(TEST_CHANNEL_ID, [])
assert len(messages) == 2
assert messages[-1]['text'].lower() == LONG_MESSAGE.lower()
assert messages[-1]['text'] != LONG_MESSAGE | 5,358,437 |
def test_get_deck_private_saved(client: TestClient, deck1):
"""Unauthenticated users must not be able to access private decks via show_saved"""
response = client.get(f"/v2/decks/{deck1.id}", params={"show_saved": True})
assert response.status_code == status.HTTP_403_FORBIDDEN | 5,358,438 |
def test_list_unsigned_int_min_length_nistxml_sv_iv_list_unsigned_int_min_length_1_1(mode, save_output, output_format):
"""
Type list/unsignedInt is restricted by facet minLength with value 5.
"""
assert_bindings(
schema="nistData/list/unsignedInt/Schema+Instance/NISTSchema-SV-IV-list-unsignedInt-minLength-1.xsd",
instance="nistData/list/unsignedInt/Schema+Instance/NISTXML-SV-IV-list-unsignedInt-minLength-1-1.xml",
class_name="NistschemaSvIvListUnsignedIntMinLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,358,439 |
def boxes(frame, data, f, parameters=None, call_num=None):
"""
Boxes places a rotated rectangle on the image that encloses the contours of specified particles.
Notes
-----
This method requires you to have used contours for the tracking and run boxes
in postprocessing.
Parameters
----------
cmap_type
Options are 'static' or 'dynamic'
cmap_column
Name of column containing data to specify colour in dynamic mode,
cmap_max
Specifies max data value for colour map in dynamic mode
cmap_scale
Scale factor for colour map
colour
Colour to be used for static cmap_type (B,G,R) values from 0-255
classifier_column
None selects all particles, column name of classifier values to specify subset of particles
classifier
The value in the classifier column which applies to subset (True or False)
thickness
Thickness of box. -1 fills the box in
Args
----
frame
This is the unmodified frame of the input movie
data
This is the dataframe that stores all the tracked data
f
frame index
parameters
Nested dictionary like object (same as .param files or output from general.param_file_creator.py)
call_num
Usually None but if multiple calls are made modifies method name with get_method_key
Returns
-----------
annotated frame : np.ndarray
"""
try:
method_key = get_method_key('boxes', call_num=call_num)
thickness = get_param_val(parameters[method_key]['thickness'])
subset_df = _get_class_subset(data, f, parameters, method=method_key)
box_pts = subset_df[['box_pts']].values
if np.shape(box_pts)[0] == 1:
df_empty = np.isnan(box_pts[0])
if np.all(df_empty):
#0 boxes
return frame
colours = colour_array(subset_df, f, parameters, method=method_key)
sz = np.shape(frame)
for index, box in enumerate(box_pts):
frame = _draw_contours(frame, box, col=colours[index],
thickness=int(get_param_val(parameters[method_key]['thickness'])))
return frame
except Exception as e:
raise BoxesError(e) | 5,358,440 |
def process_states(states):
"""
Separate list of states into lists of depths and hand states.
:param states: List of states.
:return: List of depths and list of hand states; each pair is from the same state.
"""
depths = []
hand_states = []
for state in states:
depths.append(state[0])
hand_states.append(state[1])
depths = np.array(depths, dtype=np.float32)
hand_states = np.array(hand_states, dtype=np.int32)
return depths, hand_states | 5,358,441 |
def upload_county_names(url, gcs_bucket, filename):
"""Uploads county names and FIPS codes from census to GCS bucket."""
url_params = get_census_params_by_county(['NAME'])
url_file_to_gcs(url, url_params, gcs_bucket, filename) | 5,358,442 |
def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None):
"""
Perform uniform spatial sampling on the images and corresponding boxes.
Args:
images (tensor): images to perform uniform crop. The dimension is
`num frames` x `channel` x `height` x `width`.
size (int): size of height and weight to crop the images.
spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width
is larger than height. Or 0, 1, or 2 for top, center, and bottom
crop if height is larger than width.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
scale_size (int): optinal. If not None, resize the images to scale_size before
performing any crop.
Returns:
cropped (tensor): images with dimension of
`num frames` x `channel` x `size` x `size`.
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
assert spatial_idx in [0, 1, 2]
ndim = len(images.shape)
if ndim == 3:
images = images.unsqueeze(0)
height = images.shape[2]
width = images.shape[3]
if scale_size is not None:
if width <= height:
width, height = scale_size, int(height / width * scale_size)
else:
width, height = int(width / height * scale_size), scale_size
images = torch.nn.functional.interpolate(
images,
size=(height, width),
mode="bilinear",
align_corners=False,
)
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
if height > width:
if spatial_idx == 0:
y_offset = 0
elif spatial_idx == 2:
y_offset = height - size
else:
if spatial_idx == 0:
x_offset = 0
elif spatial_idx == 2:
x_offset = width - size
cropped = images[
:, :, y_offset : y_offset + size, x_offset : x_offset + size
]
cropped_boxes = (
crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None
)
if ndim == 3:
cropped = cropped.squeeze(0)
return cropped, cropped_boxes | 5,358,443 |
def plot_xr_complex_on_plane(
var: xr.DataArray,
marker: str = "o",
label: str = "Data on imaginary plane",
cmap: str = "viridis",
c: np.ndarray = None,
xlabel: str = "Real{}{}{}",
ylabel: str = "Imag{}{}{}",
legend: bool = True,
ax: object = None,
**kwargs,
) -> Tuple[Figure, Axes]:
"""Plots complex data on the imaginary plane. Points are colored by default
according to their order in the array.
Parameters
----------
var
1D array of complex data.
marker
Marker used for the scatter plot.
label
Data label for the legend.
cmap
The colormap to use for coloring the points.
c
Color of the points. Defaults to an array of integers.
xlabel
Label o x axes.
ylabel
Label o y axes.
legend
Calls :meth:`~matplotlib.axes.Axes.legend` if ``True``.
ax
The matplotlib axes. If ``None`` a new axes (and figure) is created.
"""
if ax is None:
_, ax = plt.subplots()
if c is None:
c = np.arange(0, len(var))
ax.scatter(var.real, var.imag, marker=marker, label=label, c=c, cmap=cmap, **kwargs)
unit_str = get_unit_from_attrs(var)
ax.set_xlabel(xlabel.format(" ", var.name, unit_str))
ax.set_ylabel(ylabel.format(" ", var.name, unit_str))
if legend:
ax.legend()
return ax.get_figure(), ax | 5,358,444 |
def imread(filename, imread=None, preprocess=None):
"""Read a stack of images into a dask array
Parameters
----------
filename: string
A globstring like 'myfile.*.png'
imread: function (optional)
Optionally provide custom imread function.
Function should expect a filename and produce a numpy array.
Defaults to ``skimage.io.imread``.
preprocess: function (optional)
Optionally provide custom function to preprocess the image.
Function should expect a numpy array for a single image.
Examples
--------
>>> from dask.array.image import imread
>>> im = imread('2015-*-*.png') # doctest: +SKIP
>>> im.shape # doctest: +SKIP
(365, 1000, 1000, 3)
Returns
-------
Dask array of all images stacked along the first dimension. All images
will be treated as individual chunks
"""
imread = imread or sk_imread
filenames = sorted(glob(filename))
if not filenames:
raise ValueError("No files found under name %s" % filename)
name = "imread-%s" % tokenize(filenames, map(os.path.getmtime, filenames))
sample = imread(filenames[0])
if preprocess:
sample = preprocess(sample)
keys = [(name, i) + (0,) * len(sample.shape) for i in range(len(filenames))]
if preprocess:
values = [
(add_leading_dimension, (preprocess, (imread, fn))) for fn in filenames
]
else:
values = [(add_leading_dimension, (imread, fn)) for fn in filenames]
dsk = dict(zip(keys, values))
chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape)
return Array(dsk, name, chunks, sample.dtype) | 5,358,445 |
def _set_mode_commands(emacs_state: Dict) -> List[str]:
"""Extract & set the current `-mode` commands."""
defined_commands = emacs_state.get(DEFINED_COMMANDS_KEY, [])
context.lists["user.emacs_mode_commands"] = _extract_mode_commands(defined_commands) | 5,358,446 |
def importPredictions_Tombo(infileName, chr_col=0, start_col=1, readid_col=3, strand_col=5, meth_col=4, baseFormat=1,
score_cutoff=(-1.5, 2.5), output_first=False, include_score=False, filterChr=HUMAN_CHR_SET,
save_unified_format=False, outfn=None):
"""
We checked input as 0-based start format.
Return dict of key='chr1 123 +', and values=list of [1 1 0 0 1 1], in which 0-unmehylated, 1-methylated.
Note that the function requires per read stats, not frequencies of methylation.
### Example input format from Tombo
chr1 48020 48020 3526811b-6958-49f8-b78c-a205c1b5fc6e 1.185219591257949 + TATTACACCCG
chr1 48022 48022 3526811b-6958-49f8-b78c-a205c1b5fc6e 1.6267354150537658 + TTACACCCGTT
chr1 48023 48023 3526811b-6958-49f8-b78c-a205c1b5fc6e 2.6122662196889728 + TACACCCGTTA
chr1 48024 48024 3526811b-6958-49f8-b78c-a205c1b5fc6e 2.771131774766473 + ACACCCGTTAA
chr1 48041 48041 3526811b-6958-49f8-b78c-a205c1b5fc6e 6.524775544143312 + GATTTCTAAAT
chr1 48048 48048 3526811b-6958-49f8-b78c-a205c1b5fc6e 1.9142728191641216 + AAATGCATTGA
chr1 48054 48054 3526811b-6958-49f8-b78c-a205c1b5fc6e 1.8675210090110548 + ATTGACATTTG
......
chr1 8447736 8447736 c9339e26-1898-4483-a312-b78c3fafc6a9 8.073560995614967 - CTGTGCTGTGT
chr1 8447745 8447745 c9339e26-1898-4483-a312-b78c3fafc6a9 2.4467964154940858 - GTTGACCGTGT
chr1 8447746 8447746 c9339e26-1898-4483-a312-b78c3fafc6a9 1.966921521322515 - TTGACCGTGTA
chr1 8447754 8447754 c9339e26-1898-4483-a312-b78c3fafc6a9 5.387457000225035 - GTATGCAATGG
chr1 8447761 8447761 c9339e26-1898-4483-a312-b78c3fafc6a9 -0.8580941645036908 - ATGGACACAGA
============
"""
if score_cutoff is None:
score_cutoff = (-1.5, 2.5)
infile, lines = open_file_gz_or_txt(infileName)
if save_unified_format:
outf = gzip.open(outfn, 'wt')
outf.write(f"ID\tChr\tPos\tStrand\tScore\n")
cpgDict = defaultdict(list)
row_count = 0
meth_cnt = 0
unmeth_cnt = 0
for row in tqdm(infile, total=lines, desc="Import-Tombo"):
tmp = row.strip().split("\t")
if tmp[chr_col] not in filterChr:
continue
if output_first:
logger.debug(f'row = {list(enumerate(tmp))}')
output_first = False
if baseFormat == 1:
try:
start = int(tmp[start_col]) + 1
strand = tmp[strand_col]
if strand == '-':
start = start + 1
except:
logger.error(f" ####Tombo parse error at row={row}")
continue
elif baseFormat == 0:
try:
start = int(tmp[start_col])
strand = tmp[strand_col]
if strand == '-':
start = start + 1
except Exception as e:
logger.error(f" ####Tombo parse error at row={row}, exception={e}")
continue
else:
logger.error(
f"###\timportPredictions_Tombo InputValueError: baseCount value set to '{baseFormat}'. It should be equal to 0 or 1")
sys.exit(-1)
if strand not in ['-', '+']:
raise Exception(f'The file [{infileName}] can not recognized strand-info from row={row}, please check it')
try:
methCallTombo = float(tmp[meth_col])
except Exception as e:
logger.error(f" ####Tombo parse error at row={row}, exception={e}")
continue
meth_score = -methCallTombo
if save_unified_format:
# output to 1-based for meteore, ref: https://github.com/comprna/METEORE/blob/master/script_in_snakemake/extract_tombo_per_read_results.py
outf.write(f"{tmp[readid_col]}\t{tmp[chr_col]}\t{start}\t{tmp[strand_col]}\t{methCallTombo}\n")
key = (tmp[chr_col], start, strand)
if methCallTombo < score_cutoff[0]: # below -1.5 is methylated by default
meth_indicator = 1
meth_cnt += 1
elif methCallTombo > score_cutoff[1]: # above 2.5 is methylated by default
meth_indicator = 0
unmeth_cnt += 1
else:
continue
if include_score:
cpgDict[key].append((meth_indicator, meth_score))
else:
cpgDict[key].append(meth_indicator)
row_count += 1
infile.close()
if save_unified_format:
outf.close()
logger.debug(f'Save METEORE output format to {outfn}')
logger.debug(
f"###\timportPredictions_Tombo SUCCESS: {row_count:,} methylation calls (meth-calls={meth_cnt:,}, unmeth-call={unmeth_cnt:,}) mapped to {len(cpgDict):,} CpGs with score_cutoff={score_cutoff} from {infileName} file")
return cpgDict | 5,358,447 |
def recommend_with_rating(user, train):
"""
用户u对物品i的评分预测
:param user: 用户
:param train: 训练集
:return: 推荐列表
"""
rank = {}
ru = train[user]
for item in _movie_set:
if item in ru:
continue
rank[item] = __predict(user, item)
return rank.iteritems() | 5,358,448 |
def nl_to_break( text ):
"""
Text may have newlines, which we want to convert to <br />
when formatting for HTML display
"""
text=text.replace("<", "<") # To avoid HTML insertion
text=text.replace("\r", "")
text=text.replace("\n", "<br />")
return text | 5,358,449 |
def get_page_state(url):
"""
Checks page's current state by sending HTTP HEAD request
:param url: Request URL
:return: ("ok", return_code: int) if request successful,
("error", return_code: int) if error response code,
(None, error_message: str) if page fetching failed (timeout, invalid URL, ...)
"""
try:
response = requests.head(url, verify=False, timeout=10)
except requests.exceptions.RequestException as exception:
logging.error(exception)
return None, "Error fetching page"
if response.status_code >= 400:
return "error", response.status_code
return "ok", response.status_code | 5,358,450 |
def unique(list_, key=lambda x: x):
"""efficient function to uniquify a list preserving item order"""
seen = set()
result = []
for item in list_:
seenkey = key(item)
if seenkey in seen:
continue
seen.add(seenkey)
result.append(item)
return result | 5,358,451 |
def date_is_older(date_str1, date_str2):
"""
Checks to see if the first date is older than the second date.
:param date_str1:
:param date_str2:
:return:
"""
date1 = dateutil.parser.parse(date_str1)
date2 = dateutil.parser.parse(date_str2)
# set or normalize the timezone
target_tz = pytz.timezone('UTC')
if date1.tzinfo is None:
date1 = target_tz.localize(date1)
else:
date1 = target_tz.normalize(date1)
if date2.tzinfo is None:
date2 = target_tz.localize(date2)
else:
date2 = target_tz.normalize(date2)
return date1 < date2 | 5,358,452 |
def shorten_namespace(elements, nsmap):
"""
Map a list of XML tag class names on the internal classes (e.g. with shortened namespaces)
:param classes: list of XML tags
:param nsmap: XML nsmap
:return: List of mapped names
"""
names = []
_islist = True
if not isinstance(elements, (list, frozenset)):
elements = [elements]
_islist = False
for el in elements:
for key, value in nsmap.items():
if value in el:
if key == "cim":
name = el.split(value)[-1]
name = name[1:] if name.startswith("}") else name
elif "{"+value+"}" in el:
name = el.replace("{"+value+"}", key+"_")
else:
name = el.replace(value, key+"_")
names.append(name)
if el.startswith("#"):
names.append(el.split("#")[-1])
if not _islist and len(names) == 1:
names = names[0]
return names | 5,358,453 |
def list_select_options_stream_points():
""" Return all data_points under data_stream """
product_uid = request.args.get('productID', type=str)
query = DataStream.query
if product_uid:
query = query.filter(DataStream.productID == product_uid)
streams_tree = []
data_streams = query.many()
for data_stream in data_streams:
data_points = []
for data_point in data_stream.dataPoints:
select_option = {
'label': data_point.dataPointName,
'value': data_point.dataPointID
}
data_points.append(select_option)
streams_tree.append({
'label': data_stream.streamName,
'value': data_stream.streamID,
'children': data_points
})
return jsonify(streams_tree) | 5,358,454 |
def test_add_edit_token_with_wrong_swapped_for(globaldb):
"""Test that giving a non-existing swapped_for token in the DB raises InputError
This can only be unit tested since via the API, marshmallow checks for Asset existence already
"""
# To unit test it we need to even hack it a bit. Make a new token, add it in the DB
# then delete it and then try to add a new one referencing the old one. Since we
# need to obtain a valid EthereumToken object
address_to_delete = make_ethereum_address()
token_to_delete = EthereumToken.initialize(
address=address_to_delete,
decimals=18,
name='willdell',
symbol='DELME',
)
token_to_delete_id = 'DELMEID1'
globaldb.add_asset(
asset_id=token_to_delete_id,
asset_type=AssetType.ETHEREUM_TOKEN,
data=token_to_delete,
)
asset_to_delete = Asset(token_to_delete_id)
assert globaldb.delete_ethereum_token(address_to_delete) == token_to_delete_id
# now try to add a new token with swapped_for pointing to a non existing token in the DB
with pytest.raises(InputError):
globaldb.add_asset(
asset_id='NEWID',
asset_type=AssetType.ETHEREUM_TOKEN,
data=EthereumToken.initialize(
address=make_ethereum_address(),
swapped_for=asset_to_delete,
),
)
# now edit a new token with swapped_for pointing to a non existing token in the DB
bat_custom = globaldb.get_ethereum_token(A_BAT.ethereum_address)
bat_custom = EthereumToken.initialize(
address=A_BAT.ethereum_address,
decimals=A_BAT.decimals,
name=A_BAT.name,
symbol=A_BAT.symbol,
started=A_BAT.started,
swapped_for=asset_to_delete,
coingecko=A_BAT.coingecko,
cryptocompare=A_BAT.cryptocompare,
protocol=None,
underlying_tokens=None,
)
with pytest.raises(InputError):
globaldb.edit_ethereum_token(bat_custom) | 5,358,455 |
def func2():
"""
:type: None
:rtype: List[float]
"""
return [math.pi, math.pi / 2, math.pi / 4, math.pi / 8] | 5,358,456 |
def get_playlist_name(pl_id):
"""returns the name of the playlist with the given id"""
sql = """SELECT * FROM playlists WHERE PlaylistId=?"""
cur.execute(sql, (pl_id,))
return cur.fetchone()[1] | 5,358,457 |
def weight(collection):
"""Choose an element from a dict based on its weight and return its key.
Parameters:
- collection (dict): dict of elements with weights as values.
Returns:
string: key of the chosen element.
"""
# 1. Get sum of weights
weight_sum = sum([value for value in collection.values()])
# 2. Generate random number between 1 and sum of weights
random_value = random.randint(1, weight_sum)
# 3. Iterate through items
for key, value in collection.items():
# 4. Subtract weight of each item from random number
random_value -= value
# 5. Compare with 0, if <= 0, that item has been chosen
if random_value <= 0:
return key
# 6. Else continue subtracting
# Should not reach here.
raise ValueError("Invalid argument value.") | 5,358,458 |
def procyon(path,dirname):
"""
calls the procyon decompiler from command line
"""
process = subprocess.Popen(["java","-jar", common.rootDir + "/lib/procyon/procyon-decompiler-0.5.30.jar", path, "-o ", dirname+"2"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
try:
while True:
line = process.stdout.readline()
if not line:
break
if "Decompiling" in line:
common.counter2 = common.counter2 + 1
pub.sendMessage('decompile', procyon=round(common.counter2*100/common.count))
except Exception as e:
logger.debug(e.message) | 5,358,459 |
def knnsearch(y, x, k) :
""" Finds k closest points in y to each point in x.
Parameters
----------
x : (n,3) float array
A point cloud.
y : (m,3) float array
Another point cloud.
k : int
Number of nearest neighbors one wishes to compute.
Returns
-------
ordered_neighbors : (n,k) int array
List of k nearest neighbors to each point in x.
dist : (n,k) flaot array
List of distances between each nearest neighbor and the corresponding point in x.
"""
x, y = map(np.asarray, (x, y))
tree =spatial.cKDTree(y)
ordered_neighbors = tree.query(x, k)[1] #sz x, k
ID = np.transpose(np.matlib.repmat(np.arange(np.shape(x)[0]), k,1))
dist = np.sum((x[ID,:]-y[ordered_neighbors,:])**2,axis=2)**.5
return ordered_neighbors, dist | 5,358,460 |
def test_model_constraint():
""" A retrieval with just the model constraint should converge
to the model constraint. """
Grid0 = pyart.io.read_grid(pydda.tests.EXAMPLE_RADAR0)
""" Make fake model grid of just U = 1 m/s everywhere"""
Grid0.fields["U_fakemodel"] = deepcopy(Grid0.fields["corrected_velocity"])
Grid0.fields["V_fakemodel"] = deepcopy(Grid0.fields["corrected_velocity"])
Grid0.fields["W_fakemodel"] = deepcopy(Grid0.fields["corrected_velocity"])
Grid0.fields["U_fakemodel"]["data"] = np.ones(
Grid0.fields["U_fakemodel"]["data"].shape)
Grid0.fields["V_fakemodel"]["data"] = np.zeros(
Grid0.fields["V_fakemodel"]["data"].shape)
Grid0.fields["W_fakemodel"]["data"] = np.zeros(
Grid0.fields["W_fakemodel"]["data"].shape)
u_init = np.zeros(Grid0.fields["U_fakemodel"]["data"].shape)
v_init = np.zeros(Grid0.fields["V_fakemodel"]["data"].shape)
w_init = np.zeros(Grid0.fields["W_fakemodel"]["data"].shape)
new_grids = pydda.retrieval.get_dd_wind_field(
[Grid0], u_init, v_init, w_init, Co=0.0, Cx=0.0, Cy=0.0, Cm=0.0,
Cmod=1.0, mask_outside_opt=False, filt_iterations=0,
vel_name='corrected_velocity', refl_field='reflectivity',
model_fields=['fakemodel'])
np.testing.assert_allclose(new_grids[0].fields["u"]["data"],
Grid0.fields["U_fakemodel"]["data"],
atol=1e-2)
np.testing.assert_allclose(new_grids[0].fields["v"]["data"],
Grid0.fields["V_fakemodel"]["data"],
atol=1e-2)
np.testing.assert_allclose(new_grids[0].fields["w"]["data"],
Grid0.fields["W_fakemodel"]["data"],
atol=1e-2) | 5,358,461 |
def segmentation_gaussian_measurement(
y_true,
y_pred,
gaussian_sigma=3,
measurement=keras.losses.binary_crossentropy):
""" Apply metric or loss measurement incorporating a 2D gaussian.
Only works with batch size 1.
Loop and call this function repeatedly over each sample
to use a larger batch size.
# Arguments
y_true: is assumed to be [label, x_img_coord, y_image_coord]
y_pred: is expected to be a 2D array of labels
with shape [1, img_height, img_width, 1].
"""
with K.name_scope(name='grasp_segmentation_gaussian_loss') as scope:
if keras.backend.ndim(y_true) == 4:
# sometimes the dimensions are expanded from 2 to 4
# to meet Keras' expectations.
# In that case reduce them back to 2
y_true = K.squeeze(y_true, axis=-1)
y_true = K.squeeze(y_true, axis=-1)
print('y_pred: ', y_pred)
print('y_true: ', y_true)
# y_true should have shape [batch_size, 3] here,
# label, y_height_coordinate, x_width_coordinate become shape:
# [batch_size, 1]
label = K.expand_dims(y_true[:, 0])
print('label: ', label)
y_height_coordinate = K.expand_dims(y_true[:, 1])
x_width_coordinate = K.expand_dims(y_true[:, 2])
# label = K.reshape(label, [1, 1])
print('label: ', label)
image_shape = tf.Tensor.get_shape(y_pred)
y_true_img = tile_vector_as_image_channels(label, image_shape)
y_true_img = K.cast(y_true_img, 'float32')
loss_img = measurement(y_true_img, y_pred)
y_pred_shape = K.int_shape(y_pred)
if len(y_pred_shape) == 3:
y_pred_shape = y_pred_shape[:-1]
if len(y_pred_shape) == 4:
y_pred_shape = y_pred_shape[1:3]
def batch_gaussian(one_y_true):
# def batch_gaussian(y_height_coord, x_width_coord):
# weights = gaussian_kernel_2D(size=y_pred_shape, center=(y_height_coord, x_width_coord), sigma=gaussian_sigma)
# weights = gaussian_kernel_2D(size=y_pred_shape, center=(y_height_coordinate, x_width_coordinate), sigma=gaussian_sigma)
return gaussian_kernel_2D(size=y_pred_shape, center=(one_y_true[0], one_y_true[1]), sigma=gaussian_sigma)
weights = K.map_fn(batch_gaussian, y_true)
loss_img = K.flatten(loss_img)
weights = K.flatten(weights)
weighted_loss_img = tf.multiply(loss_img, weights)
loss_sum = K.sum(weighted_loss_img)
loss_sum = K.reshape(loss_sum, [1, 1])
return loss_sum | 5,358,462 |
def default_check_tput(exp, metrics, case, scores=default_target_score,
real_bw=0):
"""The default test case for tput metrics unless overridden by the user.
Scoring semantics: Suppose a test is N seconds long. Not all flows
necessarily start or end at the same time. However, we assume that during
any second N, at least one flow is active during that time. Thus, during
each second, there will be non-zero link utilization. We compute the average
link utilization across all seconds of the test, from [0, exp.dur]. We
compare this average to a per-test minimum threshold.
Args:
exp: The experiment object.
metrics: The metrics dictionary.
case: The TestCase object.
scores: Is either the score dictionary, or a function that produces the
score dictionary based on an experiment.
real_bw: Real bandwidth value (e.g., bw overwritten by qdisc for ECN)
"""
target_dict = _produce_scores(scores, exp)
target_score = target_dict['tput']
tputs = metrics['tputs'].as_array()
LOG.debug('default_check_tput: target=%s, tputs_array=%s',
target_score, tputs)
case.expect(min(tputs) != 0, 'minimal throughput should not be 0')
# Figure out the expected bottleneck bw (or first one, if several).
if isinstance(exp.bw, Bandwidth):
bwlink = exp.bw.downlink
elif isinstance(exp.bw, VarBandwidth):
bwlink = exp.bw.bws[0].downlink
if real_bw:
bwlink = real_bw
# Use min as the target bandwidth if there was a policer.
if exp.policer is not None:
bwpolicer = exp.policer.bw
LOG.debug('bwlink=%s, bwpolicer=%s', bwlink, bwpolicer)
if bwpolicer < bwlink and bwpolicer > 0:
bwlink = bwpolicer
LOG.debug('bwlink=%s, rtt=%s, #conn=%s', bwlink, exp.rtt, exp.nconns())
case.expect(bwlink != 0, 'bw.downlink should not be 0')
tput_timeline_data = False
for tput in tputs:
if not isinstance(tput, float):
tput_timeline_data = True
break
if not tput_timeline_data:
tput_timeline = {0: tputs}
else:
tput_timeline = {}
# Each (per-port) entry is a list of (timestamp, tput) values.
for port_timeline in tputs:
for timestamp, tput_mbps in port_timeline:
# Don't analyze past the configured end of the experiment.
if timestamp > exp.dur:
break
if timestamp not in tput_timeline:
tput_timeline[timestamp] = []
tput_timeline[timestamp].append(tput_mbps)
scores = []
for timestamp in sorted(tput_timeline.keys()):
tputs_for_timestamp = tput_timeline[timestamp]
tputs_sum = sum(tputs_for_timestamp)
tput_score = 100 * tputs_sum / bwlink
scores.append(tput_score)
LOG.debug('bw=%s, timestamp=%s, tputs_sum=%s, score=%.1f, target=%s',
bwlink, timestamp, tputs_sum, tput_score, target_score)
avg_tput_score = sum(scores) / len(scores)
# Check tput score by comparing with expected threshold.
case.expect(avg_tput_score >= target_score,
'low tput: avg score=%.1f, bw=%s, tputs=%s, tput target=%s'
% (avg_tput_score, bwlink, tputs_sum, target_score))
for cc in _BBR:
bbr_ports = metrics['tputs'].cc_ports(cc)
# Check lock_on_bws, which is the bw when bbr exiting startup
target_score = target_dict['lock_on_bw']
lock_on_bws = metrics['lock_on_bw']
for port in bbr_ports:
lock_on_bw = lock_on_bws.get(port)
fair_share = float(bwlink) / float(exp.nconns())
lock_on_bw_score = 100.0 * lock_on_bw / fair_share
LOG.debug('port=%s, lock_on_bw=%s, score=%.1f, target=%s',
port, lock_on_bw, lock_on_bw_score, target_score)
case.expect(lock_on_bw_score >= target_score,
'premature lock-on port=%s, lock_on_bw=%s, '
'score=%.1f, lock_on_bw target=%.1f' % (
port, lock_on_bw, lock_on_bw_score,
target_score)) | 5,358,463 |
def set_lr(optimizer, lr):
""" set learning rate """
for g in optimizer.param_groups:
g['lr'] = lr | 5,358,464 |
def load_AACHEN_PARAMS(AHCHEN_h5_file, log_file_indicator):
"""
This module extract parameters trainded with the framework https://github.com/rwth-i6/returnn
and the neural network proposed in the mdlsmt demo structure.
Args:
AHCHEN_h5_file: file in format hdf5 generated with https://github.com/rwth-i6/returnn framework.
log_file_indicator: _io.TextIOWrapper of the file where logs will be written
Returns:
The parameters of each layer of the network.
"""
import h5py
import numpy as np
try:
print('Loading AACHEN params from h5 file: ' + str(AHCHEN_h5_file))
log_file_indicator.write('\nLoading AACHEN params from h5 file: ' + str(AHCHEN_h5_file) + '.\n')
fh5 = h5py.File(AHCHEN_h5_file, 'r')
except OSError:
print('File not found: ' + str(AHCHEN_h5_file))
log_file_indicator.write('\n Exception, file not found: ' + str(AHCHEN_h5_file) + '\n')
print('Closing')
log_file_indicator.close()
sys.exit(1)
else:
w_conv0 = fh5['conv0']['W_conv0'][:]
w_conv0 = w_conv0.transpose([2, 3, 1, 0]) # AACHEN shape (n_out, n_in, filter_size), tensorflow shape (filter_size, n_in, n_out)
b_conv0 = fh5['conv0']['b_conv0'][:]
w_conv1 = fh5['conv1']['W_conv1'][:]
w_conv1 = w_conv1.transpose([2, 3, 1, 0]) # AACHEN shape (n_out, n_in, filter_size), tensorflow shape (filter_size, n_in, n_out)
b_conv1 = fh5['conv1']['b_conv1'][:]
w_conv2 = fh5['conv2']['W_conv2'][:]
w_conv2 = w_conv2.transpose([2, 3, 1, 0]) # AACHEN shape (n_out, n_in, filter_size), tensorflow shape (filter_size, n_in, n_out)
b_conv2 = fh5['conv2']['b_conv2'][:]
w_conv3 = fh5['conv3']['W_conv3'][:]
w_conv3 = w_conv3.transpose([2, 3, 1, 0]) # AACHEN shape (n_out, n_in, filter_size), tensorflow shape (filter_size, n_in, n_out)
b_conv3 = fh5['conv3']['b_conv3'][:]
w_conv4 = fh5['conv4']['W_conv4'][:]
w_conv4 = w_conv4.transpose([2, 3, 1, 0]) # AACHEN shape (n_out, n_in, filter_size), tensorflow shape (filter_size, n_in, n_out)
b_conv4 = fh5['conv4']['b_conv4'][:]
# 1 AACHEN --> dwd-fwd
V_x1_mdlstm0, V_h1_mdlstm0, V_v1_mdlstm0 = fh5['mdlstm0']['W1_mdlstm0'][:], fh5['mdlstm0']['U1_mdlstm0'][:], fh5['mdlstm0']['V1_mdlstm0'][:]
W_df_mdlstm0 = np.concatenate((V_x1_mdlstm0, V_h1_mdlstm0, V_v1_mdlstm0), axis=0)
b_df_mdlstm0 = fh5['mdlstm0']['b1_mdlstm0']
# 2 AACHEN --> uwd-fwd
V_x2_mdlstm0, V_h2_mdlstm0, V_v2_mdlstm0 = fh5['mdlstm0']['W2_mdlstm0'][:], fh5['mdlstm0']['U2_mdlstm0'][:], fh5['mdlstm0']['V2_mdlstm0'][:]
W_uf_mdlstm0 = np.concatenate((V_x2_mdlstm0, V_h2_mdlstm0, V_v2_mdlstm0), axis=0)
b_uf_mdlstm0 = fh5['mdlstm0']['b2_mdlstm0']
# 2 AACHEN --> dwd-bwd
V_x3_mdlstm0, V_h3_mdlstm0, V_v3_mdlstm0 = fh5['mdlstm0']['W3_mdlstm0'][:], fh5['mdlstm0']['U3_mdlstm0'][:], fh5['mdlstm0']['V3_mdlstm0'][:]
W_db_mdlstm0 = np.concatenate((V_x3_mdlstm0, V_h3_mdlstm0, V_v3_mdlstm0), axis=0)
b_db_mdlstm0 = fh5['mdlstm0']['b3_mdlstm0']
# 2 AACHEN --> uwd-bwd
V_x4_mdlstm0, V_h4_mdlstm0, V_v4_mdlstm0 = fh5['mdlstm0']['W4_mdlstm0'][:], fh5['mdlstm0']['U4_mdlstm0'][:], fh5['mdlstm0']['V4_mdlstm0'][:]
W_ub_mdlstm0 = np.concatenate((V_x4_mdlstm0, V_h4_mdlstm0, V_v4_mdlstm0), axis=0)
b_ub_mdlstm0 = fh5['mdlstm0']['b4_mdlstm0']
# 1 AACHEN --> dwd-fwd
V_x1_mdlstm1, V_h1_mdlstm1, V_v1_mdlstm1 = fh5['mdlstm1']['W1_mdlstm1'][:], fh5['mdlstm1']['U1_mdlstm1'][:], fh5['mdlstm1']['V1_mdlstm1'][:]
W_df_mdlstm1 = np.concatenate((V_x1_mdlstm1, V_h1_mdlstm1, V_v1_mdlstm1), axis=0)
b_df_mdlstm1 = fh5['mdlstm1']['b1_mdlstm1']
# 2 AACHEN --> uwd-fwd
V_x2_mdlstm1, V_h2_mdlstm1, V_v2_mdlstm1 = fh5['mdlstm1']['W2_mdlstm1'][:], fh5['mdlstm1']['U2_mdlstm1'][:], fh5['mdlstm1']['V2_mdlstm1'][:]
W_uf_mdlstm1 = np.concatenate((V_x2_mdlstm1, V_h2_mdlstm1, V_v2_mdlstm1), axis=0)
b_uf_mdlstm1 = fh5['mdlstm1']['b2_mdlstm1']
# 2 AACHEN --> dwd-bwd
V_x3_mdlstm1, V_h3_mdlstm1, V_v3_mdlstm1 = fh5['mdlstm1']['W3_mdlstm1'][:], fh5['mdlstm1']['U3_mdlstm1'][:], fh5['mdlstm1']['V3_mdlstm1'][:]
W_db_mdlstm1 = np.concatenate((V_x3_mdlstm1, V_h3_mdlstm1, V_v3_mdlstm1), axis=0)
b_db_mdlstm1 = fh5['mdlstm1']['b3_mdlstm1']
# 2 AACHEN --> uwd-bwd
V_x4_mdlstm1, V_h4_mdlstm1, V_v4_mdlstm1 = fh5['mdlstm1']['W4_mdlstm1'][:], fh5['mdlstm1']['U4_mdlstm1'][:], fh5['mdlstm1']['V4_mdlstm1'][:]
W_ub_mdlstm1 = np.concatenate((V_x4_mdlstm1, V_h4_mdlstm1, V_v4_mdlstm1), axis=0)
b_ub_mdlstm1 = fh5['mdlstm1']['b4_mdlstm1']
# 1 AACHEN --> dwd-fwd
V_x1_mdlstm2, V_h1_mdlstm2, V_v1_mdlstm2 = fh5['mdlstm2']['W1_mdlstm2'][:], fh5['mdlstm2']['U1_mdlstm2'][:], fh5['mdlstm2']['V1_mdlstm2'][:]
W_df_mdlstm2 = np.concatenate((V_x1_mdlstm2, V_h1_mdlstm2, V_v1_mdlstm2), axis=0)
b_df_mdlstm2 = fh5['mdlstm2']['b1_mdlstm2']
# 2 AACHEN --> uwd-fwd
V_x2_mdlstm2, V_h2_mdlstm2, V_v2_mdlstm2 = fh5['mdlstm2']['W2_mdlstm2'][:], fh5['mdlstm2']['U2_mdlstm2'][:], fh5['mdlstm2']['V2_mdlstm2'][:]
W_uf_mdlstm2 = np.concatenate((V_x2_mdlstm2, V_h2_mdlstm2, V_v2_mdlstm2), axis=0)
b_uf_mdlstm2 = fh5['mdlstm2']['b2_mdlstm2']
# 2 AACHEN --> dwd-bwd
V_x3_mdlstm2, V_h3_mdlstm2, V_v3_mdlstm2 = fh5['mdlstm2']['W3_mdlstm2'][:], fh5['mdlstm2']['U3_mdlstm2'][:], fh5['mdlstm2']['V3_mdlstm2'][:]
W_db_mdlstm2 = np.concatenate((V_x3_mdlstm2, V_h3_mdlstm2, V_v3_mdlstm2), axis=0)
b_db_mdlstm2 = fh5['mdlstm2']['b3_mdlstm2']
# 2 AACHEN --> uwd-bwd
V_x4_mdlstm2, V_h4_mdlstm2, V_v4_mdlstm2 = fh5['mdlstm2']['W4_mdlstm2'][:], fh5['mdlstm2']['U4_mdlstm2'][:], fh5['mdlstm2']['V4_mdlstm2'][:]
W_ub_mdlstm2 = np.concatenate((V_x4_mdlstm2, V_h4_mdlstm2, V_v4_mdlstm2), axis=0)
b_ub_mdlstm2 = fh5['mdlstm2']['b4_mdlstm2']
# 1 AACHEN --> dwd-fwd
V_x1_mdlstm3, V_h1_mdlstm3, V_v1_mdlstm3 = fh5['mdlstm3']['W1_mdlstm3'][:], fh5['mdlstm3']['U1_mdlstm3'][:], fh5['mdlstm3']['V1_mdlstm3'][:]
W_df_mdlstm3 = np.concatenate((V_x1_mdlstm3, V_h1_mdlstm3, V_v1_mdlstm3), axis=0)
b_df_mdlstm3 = fh5['mdlstm3']['b1_mdlstm3']
# 2 AACHEN --> uwd-fwd
V_x2_mdlstm3, V_h2_mdlstm3, V_v2_mdlstm3 = fh5['mdlstm3']['W2_mdlstm3'][:], fh5['mdlstm3']['U2_mdlstm3'][:], fh5['mdlstm3']['V2_mdlstm3'][:]
W_uf_mdlstm3 = np.concatenate((V_x2_mdlstm3, V_h2_mdlstm3, V_v2_mdlstm3), axis=0)
b_uf_mdlstm3 = fh5['mdlstm3']['b2_mdlstm3']
# 2 AACHEN --> dwd-bwd
V_x3_mdlstm3, V_h3_mdlstm3, V_v3_mdlstm3 = fh5['mdlstm3']['W3_mdlstm3'][:], fh5['mdlstm3']['U3_mdlstm3'][:], fh5['mdlstm3']['V3_mdlstm3'][:]
W_db_mdlstm3 = np.concatenate((V_x3_mdlstm3, V_h3_mdlstm3, V_v3_mdlstm3), axis=0)
b_db_mdlstm3 = fh5['mdlstm3']['b3_mdlstm3']
# 2 AACHEN --> uwd-bwd
V_x4_mdlstm3, V_h4_mdlstm3, V_v4_mdlstm3 = fh5['mdlstm3']['W4_mdlstm3'][:], fh5['mdlstm3']['U4_mdlstm3'][:], fh5['mdlstm3']['V4_mdlstm3'][:]
W_ub_mdlstm3 = np.concatenate((V_x4_mdlstm3, V_h4_mdlstm3, V_v4_mdlstm3), axis=0)
b_ub_mdlstm3 = fh5['mdlstm3']['b4_mdlstm3']
# 1 AACHEN --> dwd-fwd
V_x1_mdlstm4, V_h1_mdlstm4, V_v1_mdlstm4 = fh5['mdlstm4']['W1_mdlstm4'][:], fh5['mdlstm4']['U1_mdlstm4'][:], fh5['mdlstm4']['V1_mdlstm4'][:]
W_df_mdlstm4 = np.concatenate((V_x1_mdlstm4, V_h1_mdlstm4, V_v1_mdlstm4), axis=0)
b_df_mdlstm4 = fh5['mdlstm4']['b1_mdlstm4']
# 2 AACHEN --> uwd-fwd
V_x2_mdlstm4, V_h2_mdlstm4, V_v2_mdlstm4 = fh5['mdlstm4']['W2_mdlstm4'][:], fh5['mdlstm4']['U2_mdlstm4'][:], fh5['mdlstm4']['V2_mdlstm4'][:]
W_uf_mdlstm4 = np.concatenate((V_x2_mdlstm4, V_h2_mdlstm4, V_v2_mdlstm4), axis=0)
b_uf_mdlstm4 = fh5['mdlstm4']['b2_mdlstm4']
# 2 AACHEN --> dwd-bwd
V_x3_mdlstm4, V_h3_mdlstm4, V_v3_mdlstm4 = fh5['mdlstm4']['W3_mdlstm4'][:], fh5['mdlstm4']['U3_mdlstm4'][:], fh5['mdlstm4']['V3_mdlstm4'][:]
W_db_mdlstm4 = np.concatenate((V_x3_mdlstm4, V_h3_mdlstm4, V_v3_mdlstm4), axis=0)
b_db_mdlstm4 = fh5['mdlstm4']['b3_mdlstm4']
# 2 AACHEN --> uwd-bwd
V_x4_mdlstm4, V_h4_mdlstm4, V_v4_mdlstm4 = fh5['mdlstm4']['W4_mdlstm4'][:], fh5['mdlstm4']['U4_mdlstm4'][:], fh5['mdlstm4']['V4_mdlstm4'][:]
W_ub_mdlstm4 = np.concatenate((V_x4_mdlstm4, V_h4_mdlstm4, V_v4_mdlstm4), axis=0)
b_ub_mdlstm4 = fh5['mdlstm4']['b4_mdlstm4']
W_dense = fh5['output']['W_in_mdlstm4_output']
b_dense = fh5['output']['b_output']
return [w_conv0, b_conv0,
w_conv1, b_conv1,
w_conv2, b_conv2,
w_conv3, b_conv3,
w_conv4, b_conv4,
W_df_mdlstm0, b_df_mdlstm0, W_uf_mdlstm0, b_uf_mdlstm0, W_db_mdlstm0, b_db_mdlstm0, W_ub_mdlstm0, b_ub_mdlstm0,
W_df_mdlstm1, b_df_mdlstm1, W_uf_mdlstm1, b_uf_mdlstm1, W_db_mdlstm1, b_db_mdlstm1, W_ub_mdlstm1, b_ub_mdlstm1,
W_df_mdlstm2, b_df_mdlstm2, W_uf_mdlstm2, b_uf_mdlstm2, W_db_mdlstm2, b_db_mdlstm2, W_ub_mdlstm2, b_ub_mdlstm2,
W_df_mdlstm3, b_df_mdlstm3, W_uf_mdlstm3, b_uf_mdlstm3, W_db_mdlstm3, b_db_mdlstm3, W_ub_mdlstm3, b_ub_mdlstm3,
W_df_mdlstm4, b_df_mdlstm4, W_uf_mdlstm4, b_uf_mdlstm4, W_db_mdlstm4, b_db_mdlstm4, W_ub_mdlstm4, b_ub_mdlstm4,
W_dense, b_dense] | 5,358,465 |
def prep_doc_id_context( doc_id: str, usr_first_name: str, usr_is_authenticated: bool ) -> dict:
""" Preps context for record_edit.html template when a doc_id (meaning a `Citation` id) is included.
Called by views.edit_record() """
log.debug( 'starting prep_doc_id_context()' )
log.debug( f'doc_id, ``{doc_id}``' )
context = { 'user_first_name': usr_first_name, 'user_is_authenticated': usr_is_authenticated }
session = make_session()
common_data: dict = prepare_common_data( session )
context.update( common_data ) # merges common_data key-vals into context
doc = session.query( models_alch.Citation ).get( doc_id )
log.debug( f'doc, ``{pprint.pformat(doc.__dict__)}``' )
context['rec_id'] = None
try:
context['doc_display'] = doc.display
except:
log.exception( 'doc.display not available; traceback follows; processing will continue' )
context['doc_display'] = 'display not available'
context['doc_id'] = doc.id
log.debug( f'context (first 1000 characters), ``{pprint.pformat(context)[0:1000]}``' )
return context | 5,358,466 |
def request_pull(repo, requestid, username=None, namespace=None):
"""View a pull request with the changes from the fork into the project."""
repo = flask.g.repo
_log.info("Viewing pull Request #%s repo: %s", requestid, repo.fullname)
if not repo.settings.get("pull_requests", True):
flask.abort(404, description="No pull-requests found for this project")
request = pagure.lib.query.search_pull_requests(
flask.g.session, project_id=repo.id, requestid=requestid
)
if not request:
flask.abort(404, description="Pull-request not found")
if request.remote:
repopath = pagure.utils.get_remote_repo_path(
request.remote_git, request.branch_from
)
parentpath = pagure.utils.get_repo_path(request.project)
else:
repo_from = request.project_from
parentpath = pagure.utils.get_repo_path(request.project)
repopath = parentpath
if repo_from:
repopath = pagure.utils.get_repo_path(repo_from)
repo_obj = pygit2.Repository(repopath)
orig_repo = pygit2.Repository(parentpath)
diff_commits = []
diff = None
# Closed pull-request
if request.status != "Open":
commitid = request.commit_stop
try:
for commit in repo_obj.walk(commitid, pygit2.GIT_SORT_NONE):
diff_commits.append(commit)
if commit.oid.hex == request.commit_start:
break
except KeyError:
# This happens when repo.walk() cannot find commitid
pass
if diff_commits:
# Ensure the first commit in the PR as a parent, otherwise
# point to it
start = diff_commits[-1].oid.hex
if diff_commits[-1].parents:
start = diff_commits[-1].parents[0].oid.hex
# If the start and the end commits are the same, it means we are,
# dealing with one commit that has no parent, so just diff that
# one commit
if start == diff_commits[0].oid.hex:
diff = diff_commits[0].tree.diff_to_tree(swap=True)
else:
diff = repo_obj.diff(
repo_obj.revparse_single(start),
repo_obj.revparse_single(diff_commits[0].oid.hex),
)
else:
try:
diff_commits, diff = pagure.lib.git.diff_pull_request(
flask.g.session, request, repo_obj, orig_repo
)
except pagure.exceptions.PagureException as err:
flask.flash("%s" % err, "error")
except SQLAlchemyError as err: # pragma: no cover
flask.g.session.rollback()
_log.exception(err)
flask.flash(
"Could not update this pull-request in the database", "error"
)
if diff:
diff.find_similar()
form = pagure.forms.MergePRForm()
trigger_ci_pr_form = pagure.forms.TriggerCIPRForm()
# we need to leave out all members of trigger_ci_conf that have
# "meta" set to False or meta["requires_project_hook_attr"] condition
# defined and it's not met
trigger_ci_conf = pagure_config["TRIGGER_CI"]
if not isinstance(trigger_ci_conf, dict):
trigger_ci_conf = {}
trigger_ci = {}
# make sure all the backrefs are set properly on repo
pagure.lib.plugins.get_enabled_plugins(repo)
for comment, meta in trigger_ci_conf.items():
if not meta:
continue
cond = meta.get("requires_project_hook_attr", ())
if cond and not pagure.utils.project_has_hook_attr_value(repo, *cond):
continue
trigger_ci[comment] = meta
committer = False
if request.project_from:
committer = pagure.utils.is_repo_committer(request.project_from)
else:
committer = pagure.utils.is_repo_committer(request.project)
can_rebase_branch = not request.remote_git and committer
can_delete_branch = (
pagure_config.get("ALLOW_DELETE_BRANCH", True) and can_rebase_branch
)
return flask.render_template(
"repo_pull_request.html",
select="requests",
requestid=requestid,
repo=repo,
username=username,
repo_obj=repo_obj,
pull_request=request,
diff_commits=diff_commits,
diff=diff,
mergeform=form,
subscribers=pagure.lib.query.get_watch_list(flask.g.session, request),
tag_list=pagure.lib.query.get_tags_of_project(flask.g.session, repo),
can_rebase_branch=can_rebase_branch,
can_delete_branch=can_delete_branch,
trigger_ci=trigger_ci,
trigger_ci_pr_form=trigger_ci_pr_form,
flag_statuses_labels=json.dumps(pagure_config["FLAG_STATUSES_LABELS"]),
) | 5,358,467 |
def make_matrix(points: List[float], degree: int) -> List[List[float]]:
"""Return a nested list representation of a matrix consisting of the basis
elements of the polynomial of degree n, evaluated at each of the points.
In other words, each row consists of 1, x, x^2, ..., x^n, where n is the degree,
and x is a value in points.
Preconditions:
- degree < len(points)
>>> make_matrix([1, 2, 3], 2)
[[1, 1, 1], [1, 2, 4], [1, 3, 9]]
"""
matrix = []
for point in points:
row = [point ** index for index in range(degree + 1)]
matrix.append(row)
return matrix | 5,358,468 |
def verify(request, token, template_name='uaccounts/verified.html'):
"""Try to verify email address using given token."""
try:
verification = verify_token(token, VERIFICATION_EXPIRES)
except VerificationError:
return redirect('uaccounts:index')
if verification.email.profile != request.user.profile:
return redirect('uaccounts:index')
verification.email.verified = True
verification.email.save()
verification.delete()
return render(request, template_name) | 5,358,469 |
def set_logger(logger: logging.Logger) -> None:
"""
Removes all other loggers, and adds stream_handler and file_handler to the given logger
This is being used for sanic logging, because for some weird reason sanic does not allow you
to pass in a custom logging object. Instead, you have to modify their logger, or use logging.dictConfig.
Args:
The logger object to modify
"""
for handler in logger.handlers:
logger.removeHandler(handler)
logger.addHandler(stream_handler)
logger.addHandler(file_handler) | 5,358,470 |
def run_train(params: dict) -> Tuple[threading.Thread, threading.Thread]:
"""Train a network on a data generator.
params -> dictionary.
Required fields:
* model_name
* generator_name
* dataset_dir
* tile_size
* clf_name
* checkpoints_dir
* summaries_dir
Returns prefetch thread & model.fit thread"""
assert 'model_name' in params
assert 'generator_name' in params
Model = ModelFactory.get_model(params['model_name'])
Generator = GeneratorFactory.get_generator(params['generator_name'])
model = Model(**params)
feed = Generator(**params)
pf = PreFetch(feed)
t1 = threading.Thread(target=pf.fetch)
t2 = threading.Thread(target=model.fit, args=(pf,))
t1.start()
t2.start()
return t1,t2 | 5,358,471 |
def request(url, method='GET', headers=None, original_ip=None, debug=False,
logger=None, **kwargs):
"""Perform a http request with standard settings.
A wrapper around requests.request that adds standard headers like
User-Agent and provides optional debug logging of the request.
Arguments that are not handled are passed through to the requests library.
:param string url: The url to make the request of.
:param string method: The http method to use. (eg. 'GET', 'POST')
:param dict headers: Headers to be included in the request. (optional)
:param string original_ip: Mark this request as forwarded for this ip.
(optional)
:param bool debug: Enable debug logging. (Defaults to False)
:param logging.Logger logger: A logger to output to. (optional)
:raises exceptions.ClientException: For connection failure, or to indicate
an error response code.
:returns: The response to the request.
"""
if not headers:
headers = dict()
if not logger:
logger = _logger
headers.setdefault('User-Agent', USER_AGENT)
if original_ip:
headers['Forwarded'] = "for=%s;by=%s" % (original_ip, USER_AGENT)
if debug:
string_parts = ['curl -i']
if method:
string_parts.append(' -X %s' % method)
string_parts.append(' %s' % url)
if headers:
for header in six.iteritems(headers):
string_parts.append(' -H "%s: %s"' % header)
logger.debug("REQ: %s" % "".join(string_parts))
data = kwargs.get('data')
if data:
logger.debug("REQ BODY: %s\n" % data)
try:
resp = requests.request(
method,
url,
headers=headers,
**kwargs)
except requests.ConnectionError:
msg = 'Unable to establish connection to %s' % url
raise exceptions.ClientException(msg)
if debug:
logger.debug("RESP: [%s] %s\nRESP BODY: %s\n",
resp.status_code, resp.headers, resp.text)
if resp.status_code >= 400:
logger.debug("Request returned failure status: %s",
resp.status_code)
raise exceptions.from_response(resp, method, url)
return resp | 5,358,472 |
def AddDestAddressGroups(parser):
"""Adds a destination address group to this rule."""
parser.add_argument(
'--dest-address-groups',
type=arg_parsers.ArgList(),
metavar='DEST_ADDRESS_GROUPS',
required=False,
hidden=True,
help=(
'Dest address groups to match for this rule. '
'Can only be specified if DIRECTION is engress.'
)) | 5,358,473 |
def adjust_seconds_fr(samples_per_channel_in_frame,fs,seconds_fr,num_frame):
"""
Get the timestamp for the first sample in this frame.
Parameters
----------
samples_per_channel_in_frame : int
number of sample components per channel.
fs : int or float
sampling frequency.
seconds_fr : int or float
seconds for this frame (from frame header)
num_frame : int
frame number (from frame header).
Returns
-------
time_first_frame : float
timestamp [s] corresponding to the first sample of this frame.
"""
seconds_per_frame=samples_per_channel_in_frame/float(fs)
time_first_sample=float(seconds_fr)+num_frame*seconds_per_frame
return(time_first_sample) | 5,358,474 |
def create_parser():
"""
Construct the program options
"""
parser = argparse.ArgumentParser(
prog="xge",
description="Extract, transform and load GDC data onto UCSC Xena",
)
parser.add_argument(
"--version",
action="version",
version="%(prog)s {v}".format(v=__version__),
)
subparsers = parser.add_subparsers(
help="Sub-parsers for xena-gdc-ETL", dest="subcomm"
)
# equal_matrices subparser
equality_parser = subparsers.add_parser(
"xena-eql", help="Test the equality of 2 Xena matrices."
)
equality_parser.add_argument(
"df1", type=str, help='Directory for the first matrix.'
)
equality_parser.add_argument(
"df2", type=str, help='Directory for the second matrix.'
)
# gdc_check_new subparser
gdc_check_new_parser = subparsers.add_parser(
"gdc-check-new",
description="Check GDC's list of updated files and summarize "
"impacted project(s), data_type(s) and "
"analysis.workflow_type(s).",
)
gdc_check_new_parser.add_argument(
'url',
type=str,
metavar='URL',
help='URL for GDC\'s list of updated files. It can be a compressed '
'file with a supported extension, which includes ".gz", ".bz2", '
'".zip", or "xz". New files should be listed under a column named'
' by "New File UUID".',
)
# merge-xena subparser
merge_xena_subparser = subparsers.add_parser(
"merge-xena",
description='Pipeline for merging Xena matrices of the same data'
'type.',
)
merge_xena_subparser.add_argument(
'-f',
'--files',
type=str,
nargs='+',
required=True,
help='A list of paths for Xena matrices files to be merged. All paths '
'in this list support UNIX style pathname pattern expansion with '
'"glob". Files will be read by pandas.read_csv with sep="\t".',
)
merge_xena_subparser.add_argument(
'-t',
'--datatype',
type=str,
required=True,
help='One data type code indication the data type in matrices to be '
'merged. Supported data type codes include: {}'.format(
str(valid_dtype)
),
)
merge_xena_subparser.add_argument(
'-o',
'--outdir',
type=str,
default='.',
help='A directory to put the merged matrix. Defaults to the current '
'working directory of python.',
)
merge_xena_subparser.add_argument(
'-n',
'--name',
type=str,
default=None,
help='Filename for the merged matrix. Defaults to None. If None, the '
'filename will be derived from the cohort name and the data type. '
'Check "-t" and "-c" options for details.',
)
merge_xena_subparser.add_argument(
'-c',
'--cohort',
type=str,
default=None,
help='A cohort name for the merged matrix. Defaults to None. If '
'None, it will be set to a format of "MergedCohort<date>" by default. '
'For example, "MergedCohort{}".'.format(
date.today().strftime('%m%d%Y')
),
)
# Subcommand for full ETL (download, transform, and metadata)
etlparser = subparsers.add_parser(
'etl',
help='Download and transform GDC data into Xena matrix, '
'and generate corresponding metadata.',
epilog='Supported data types are: {}'.format(str(valid_dtype)),
)
etlparser.add_argument(
'-r',
'--root',
type=str,
default='.',
help='Root directory for imported data.',
)
etlparser.add_argument(
'-D',
'--delete',
action='store_true',
help='Deletes raw data upon generation of Xena_matrix.',
)
projects_group = etlparser.add_mutually_exclusive_group()
projects_group.add_argument(
'-p',
'--projects',
type=str,
nargs='+',
help='GDC project ID(s) to be imported; or "all" if all projects on'
'GDC are going to be imported. Defaults to "all".',
default=['all'],
)
projects_group.add_argument(
'-P',
'--not-projects',
type=str,
nargs='+',
help='Import all projects on GDC except projects specified by this'
'option. This option and the "-p" option are mutually exclusive.',
default=[],
)
datatype_group = etlparser.add_mutually_exclusive_group()
datatype_group.add_argument(
'-t',
'--datatype',
type=str,
nargs='+',
help='Data type code(s) to be imported; or "all" if all supported'
'types are going to be imported. Defaults to "all".',
default=['all'],
)
datatype_group.add_argument(
'-T',
'--not-datatype',
type=str,
nargs='+',
help='Import all supported types except projects specified by this'
'option. This option and the "-t" option are mutually exclusive.',
default=[],
)
# Subcommand for making metadata
metaparser = subparsers.add_parser(
'metadata',
help='Generate metadata for a Xena matrix',
epilog='Supported data types are: {}'.format(str(valid_dtype)),
)
metaparser.add_argument(
'-p',
'--project',
type=str,
required=True,
help='The project of the matrix.',
)
metaparser.add_argument(
'-t',
'--datatype',
type=str,
required=True,
help='One data type code for the matrix.',
)
metaparser.add_argument(
'-m', '--matrix', type=str, required=True, help='Path to a Xena matrix'
)
metaparser.add_argument(
'-r',
'--release',
type=float,
required=True,
help='GDC data release number.',
)
return parser | 5,358,475 |
def add_header(unicode_csv_data, new_header):
"""
Given row, return header with iterator
"""
final_iterator = [",".join(new_header)]
for row in unicode_csv_data:
final_iterator.append(row)
return iter(final_iterator) | 5,358,476 |
def get_most_energetic(particles):
"""Get most energetic particle. If no particle with a non-NaN energy is
found, returns a copy of `NULL_I3PARTICLE`.
Parameters
----------
particles : ndarray of dtyppe I3PARTICLE_T
Returns
-------
most_energetic : shape () ndarray of dtype I3PARTICLE_T
"""
return get_best_filter(
particles=particles, filter_function=true_filter, cmp_function=more_energetic,
) | 5,358,477 |
def parse_file(path):
"""Parses a file for ObjectFiles.
Args:
path: String path to the file.
Returns:
List of ObjectFile objects parsed from the given file.
"""
if sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# Assume Linux has GNU objdump. This has the options:
# -t (list symbols), -C (de-mangle symbol names)
objdump_args = ['objdump', '-t', '-C']
elif sys.platform.startswith('darwin'):
# Assume OSX has LLVM objdump. This has the options:
# -t (list symbols)
objdump_args = ['objdump', '-t']
objdump_args.append(path)
with StreamingProcess(objdump_args) as proc:
# Find the first non-blank line.
first_line = proc.peek()
while not first_line:
try:
proc.next()
first_line = proc.peek()
except StopIteration:
return []
# Is this an archive?
match = re.match(r'^.*[Aa]rchive\s+(.+):$', first_line)
if match:
# In this format we have to skip this descriptive line.
proc.next()
return parse_archive(match.group(1), proc)
# Some objdumps format archives differently.
match = re.match(r'^(.+)\((.+)\):\s+file format', first_line)
if match:
return parse_archive(match.group(1), proc)
# Otherwise maybe it's an object file?
match = re.match(r'^(.+):\s+file format', first_line)
if match:
return [parse_object_file(match.group(1), proc)]
# Otherwise it's not an archive or object file.
return [] | 5,358,478 |
def stack(tup, axis=0, out=None):
"""Stacks arrays along a new axis.
Args:
tup (sequence of arrays): Arrays to be stacked.
axis (int): Axis along which the arrays are stacked.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: Stacked array.
.. seealso:: :func:`numpy.stack`
"""
return concatenate([cupy.expand_dims(x, axis) for x in tup], axis, out) | 5,358,479 |
def winning_pipeline(mydata,mytestdata,myfinalmodel,feature_selection_done = True,myfeatures =None,numerical_attributes = None):
"""
If feature _selection has not been performed:
Function performs Cross Validation (with scaling within folds) on the data passed through.
Scales the data with RobustScaler() and Imputes the data with IterativeImputer(). Additionally adds clusters for the cities latitude and longitude
Else:
Performs Cross-Validation given the estimator on a subset of the features of mydata which were passed through to myfeatures
Arguments
@myestimator: sklearn estimator
@mydata: training data with missing values and is not scaled)
@myfolds: number of folds for cross validation
@feature_selection_done: Boolean flag indicating if feature_selection has been done to the data in `mydata`
@myfeatures: list of informative features from features
@checknoise: Whether scoring for Cross-Validation should be Explained Variance
"""
# part 1 create location feature for training data using optics clustering
optics_df = mydata[['Latitude','Longitude']].copy()
clust = OPTICS(min_samples=50, xi=.05, min_cluster_size=.05)
clust.fit(optics_df)
#
optics_df['clust_label'] = clust.labels_
#
location_max = np.max(optics_df.clust_label.unique())
#optics labels noisy samples as -1 need to replace for successful onehotencoding
optics_df['clust_label'].replace([-1],location_max+1,inplace=True)
#one hot encoding and combining to mydata
enc = OneHotEncoder(categories='auto')
optics_df_1hot = enc.fit_transform(optics_df[['clust_label']])
location_labels = ['cluster' + str(l) for l in optics_df.clust_label.unique()]
optics_df_1hot = pd.DataFrame(optics_df_1hot.todense(),index = optics_df.index,columns= location_labels )
#part1done cluster columns added
mydata = pd.concat([mydata,optics_df_1hot],axis=1)
#part 2 drop unneccessary columns in our case
mydata_labels = mydata['med_rental_rate'].copy()
mydata = mydata.drop('med_rental_rate',axis =1)
if feature_selection_done:
mydata = mydata.loc[:,myfeatures].copy()
else:
mydata = mydata.drop(['city','Latitude','Longitude','change_hunits','studio_1000_1499', 'studio_1500_more',
'studio_750_999', 'onebed_1000_1499', 'onebed_1500_more',
'onebed_750_999', 'twobed_1000_1499', 'twobed_1500_more',
'twobed_750_999', 'threebed_1000_1499', 'threebed_1500_more',
'threebed_750_999'],axis=1)
imputer = IterativeImputer(max_iter = 10 ,random_state =22,min_value=0)
imputed_dat = imputer.fit_transform(mydata)
#scale only numerical attrbs which are everything but the columns which were appended earlier
imputed_dat = pd.DataFrame(imputed_dat,columns=mydata.columns)
ct = ColumnTransformer(
[('scale1',RobustScaler(),numerical_attributes)],
remainder = 'passthrough')
X_train_prepped = ct.fit_transform(imputed_dat)
#to pickle
processed_training_data = X_train_prepped.copy()
#nowfor the test data
# part 1 create location feature for test data using optics clustering
optics_df = mytestdata[['Latitude','Longitude']].copy()
clust = OPTICS(min_samples=50, xi=.05, min_cluster_size=.05)
clust.fit(optics_df)
#
optics_df['clust_label'] = clust.labels_
#
location_max = np.max(optics_df.clust_label.unique())
#optics labels noisy samples as -1 need to replace for successful onehotencoding
optics_df['clust_label'].replace([-1],location_max+1,inplace=True)
#one hot encoding and combining to mydata
enc = OneHotEncoder(categories='auto')
optics_df_1hot = enc.fit_transform(optics_df[['clust_label']])
location_labels = ['cluster' + str(l) for l in optics_df.clust_label.unique()]
optics_df_1hot = pd.DataFrame(optics_df_1hot.todense(),index = optics_df.index,columns= location_labels )
#part1done cluster columns added
mytestdata = pd.concat([mytestdata,optics_df_1hot],axis=1)
#part 2 drop unneccessary columns in our case
mytest_data_labels = mytestdata['med_rental_rate'].copy()
mytestdata = mytestdata.drop('med_rental_rate',axis =1)
if feature_selection_done:
mytestdata = mytestdata.loc[:,myfeatures].copy()
else:
mydata = mydata.drop(['city','Latitude','Longitude','change_hunits','studio_1000_1499', 'studio_1500_more',
'studio_750_999', 'onebed_1000_1499', 'onebed_1500_more',
'onebed_750_999', 'twobed_1000_1499', 'twobed_1500_more',
'twobed_750_999', 'threebed_1000_1499', 'threebed_1500_more',
'threebed_750_999'],axis=1)
#prepare testdata them
imputed_testdata = imputer.transform(mytestdata)
imputed_testdata = pd.DataFrame(imputed_testdata,columns=mytestdata.columns)
mytestdata_prepared = ct.transform(imputed_testdata)
#to pickle
processed_test_data = mytestdata_prepared.copy()
#make final predictions
myfinalmodel.fit(X_train_prepped,mydata_labels)
final_predictions = myfinalmodel.predict(mytestdata_prepared)
final_mse = mean_squared_error(mytest_data_labels,final_predictions)
final_rmse = np.sqrt(final_mse)
final_expvar = explained_variance_score(mytest_data_labels,final_predictions)
return {'final_rmse':final_rmse,'final_predictions':final_predictions,'final_expvar':final_expvar,'myfinalmodel':myfinalmodel,
'processed_training_data':processed_training_data,'processed_test_data':processed_test_data} | 5,358,480 |
def calc_single_d(chi_widths, chis, zs, z_widths, z_SN, use_chi=True):
"""Uses single_m_convergence with index starting at 0 and going along the entire line of sight.
Inputs:
chi_widths -- the width of the comoving distance bins.
chis -- the mean comoving distances of each bin.
zs -- the mean redshift of each bin, for the scale factor.
z_SN -- the reshift of the SN.
use_chi -- boolean that determined whether equal comoving distance or redshift bins are used.
"""
comoving_to_SN = b_comoving(0, z_SN)
chi_SN = comoving_to_SN[-1]
convergence = np.linspace(0, 0, len(chis))
mass = MSOL * 10 ** 15
for i in range(0, len(chis)):
if use_chi:
convergence[i] = single_d_convergence(chi_widths, chis, zs, i, 1, chi_SN)
else:
convergence[i] = single_d_convergence_z(z_widths, chis, zs, i, 1, chi_SN)
return convergence | 5,358,481 |
def test_info__pkg_not_found(reset_sys_argv, capfd):
"""Ensure the error message when a package is not found."""
sys.argv = ["py-info", "some-random-non-existant-package"]
commands.info()
out, err = capfd.readouterr()
assert not out
assert "The package some-random-non-existant-package was not found." in err | 5,358,482 |
def ppo(
client, symbol, timeframe="6m", col="close", fastperiod=12, slowperiod=26, matype=0
):
"""This will return a dataframe of Percentage Price Oscillator for the given symbol across the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
col (string): column to use to calculate
fastperiod (int): fast period to calculate across
slowperiod (int): slow period to calculate across
matype (int): moving average type (0-sma)
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
ppo = t.PPO(df[col].values, fastperiod, slowperiod, matype)
return pd.DataFrame({col: df[col].values, "ppo": ppo}) | 5,358,483 |
def ellipse(x, y, width, height, fill_color="", stroke_color="", stroke_width=-1):
"""Draws an ellipse on the given coordinate with the given width, height and color
Args:
x (float): Horizontal coordinate.
y (float): Vertical coordinate.
width (float): Width of the ellipse.
height (float): Height of the ellipse.
fill_color (Color, optional): The color by which to fill the ellipse.
stroke_color (Color, optional): The color of the ellipse's outline
stroke_width (float, optional): The width of the outline.
"""
raise NotImplementedError("ellipse() not implemented") | 5,358,484 |
def getRandomCoin():
""" returns a randomly generated coin """
coinY = random.randrange(20, int(BASEY * 0.6))
coinX = SCREENWIDTH + 100
return [
{'x': coinX, 'y': coinY},
] | 5,358,485 |
def is_success(msg):
"""
Whether message is success
:param msg:
:return:
"""
return msg['status'] == 'success' | 5,358,486 |
def scrape_data(url):
"""
scrapes relevant data from given url
@param {string} url
@return {dict} {
url : link
links : list of external links
title : title of the page
description : sample text
}
"""
http = httplib2.Http()
try:
status, response = http.request(url)
except Exception as e:
return None
# get links
links = []
for link in BeautifulSoup(response, "html.parser", parse_only=SoupStrainer('a')):
if link.has_attr('href'):
links.append(link['href'])
# get description
soup = BeautifulSoup(response, "html.parser")
description = soup.find('meta', attrs={'name':'og:description'}) or soup.find('meta', attrs={'property':'description'}) or soup.find('meta', attrs={'name':'description'})
if description:
description = description.get('content')
# return dictionary
return {
"url" : url,
"links" : links,
"title" : BeautifulSoup(response, "html.parser"),
"description" : description
} | 5,358,487 |
def list_strip_comments(list_item: list, comment_denominator: str = '#') -> list:
"""
Strips all items which are comments from a list.
:param list_item: The list object to be stripped of comments.
:param comment_denominator: The character with which comment lines start with.
:return list: A cleaned list object.
"""
_output = list()
for _item in list_item:
if not _item[0] == comment_denominator:
_output.append(_item)
return _output | 5,358,488 |
def guess_number(name):
"""User defined function which performs the all the operations and prints the result"""
guess_limit = 0
magic_number = randint(1, 20)
while guess_limit < 6: # perform the multiple guess operations and print output
user_guess = get_input("Take a guess: ")
if 0 < user_guess <= 20: # condition that allows the numbers only if in between 1 to 20
guess_limit += 1
if user_guess == magic_number:
print(f"Good job, {name}! You guessed my number in {guess_limit} guesses!")
break
elif user_guess < magic_number:
print("Your Guess is too low")
elif user_guess > magic_number:
print("Your Guess is too high")
else:
print("Try again, Your number must have be in the range of 1 to 20!!")
else:
print(f"The number I was thinking of was {magic_number}")
return 0 | 5,358,489 |
def start(start_running_time, coins_for_benchmark, df_simulation, positions_history, capital_history):
"""
function "get statistics" was build before in version one. and this function arrange the dataframe to
be sent to "get statistics" correctly. acc = [{date_time,capital},..],benchmark = [{date_time,capital},..]
:param simulation_result_with_benchmark: dataframe with acc capital and benchmarks capital example pd.DataFrame(
# data={'date_time': ['1483228800', '1483232400', '1483236000', '1483239600'],
# 'capital': [123, 213, 342, 44324], 'benchmark_BTC': [222, 222, 222, 222],
:return: dataframe {'subject': ['ACC-BTC', 'ACC-ETH'],
# 'alpha': [123, 213, 342, 44324], 'betta': [222, 222, 222, 222],
# 'benchmark_ETH': [222, 222, 222, 222], 'rsquared': [222, 222, 222, 222],
"""
# simulation_result_with_benchmark = pd.DataFrame(
# data={'date_time': ['1483228800', '1483232400', '1483236000', '1483239600'],
# 'capital': [123, 213, 342, 44324], 'benchmark_BTC': [222, 222, 222, 222],
# 'benchmark_ETH': [222, 222, 222, 222]})
# final_result1 = {'subject': ['ACC-BTC', 'ACC-ETH'],
# 'alpha': [123, 213, 342, 44324], 'betta': [222, 222, 222, 222],
# 'benchmark_ETH': [222, 222, 222, 222], 'rsquared': [222, 222, 222, 222],
# 'standard_deviation': [222, 222, 222, 222], 'sharp_ratio': [222, 222, 222, 222]}
path = os.path.join(Consts.PATH_TO_WRITE_RESULT,
TimeHelper.epoch_to_date_time(start_running_time).strftime(Consts.WRITE_DATE_FORMAT),
str(df_simulation.index[0]))
os.makedirs(path, exist_ok=True)
positions_history.to_csv(os.path.join(path, 'positions.csv'), index=False)
df_simulation.index.names = ['simulation_id']
df_simulation.to_csv(os.path.join(path, 'simulation.csv'))
capital_history['date_time'] = pd.to_datetime(capital_history['date_time'], format=Consts.READ_DATE_FORMAT).apply(
lambda x: TimeHelper.datetime_to_epoch(x))
capital_history['capital'] = capital_history['liquid_capital'] + capital_history['shorts_capital'] + \
capital_history['long_capital']
# acc_capital = capital_history.drop(capital_history.columns.difference(['capital', 'date_time']), 1).to_dict(
# 'record')
acc_capital_with_banchmark_df, benchmark_column_names = add_benchmarks(capital_history, coins_for_benchmark,
df_simulation['amount_of_capital'].iloc[0])
acc_capital_with_banchmark_df['date_time'] = acc_capital_with_banchmark_df['date_time'].apply(
lambda x: TimeHelper.epoch_to_date_time(x))
acc_capital_with_banchmark_df.set_index('date_time', inplace=True)
acc_capital_with_banchmark_df.sort_index(inplace=True)
acc_capital_with_banchmark_df.to_csv(os.path.join(path, 'capital_history.csv'))
analytics_result_list = []
for column_benchmark in benchmark_column_names:
temp_obj = get_statistics(df_simulation.index[0], 'ACC-{}'.format(column_benchmark),
acc_capital_with_banchmark_df['capital'].values,
acc_capital_with_banchmark_df[column_benchmark].values)
analytics_result_list.append(temp_obj)
analytics_result_df = pd.DataFrame(
data=analytics_result_list)
analytics_result_df['ACC_ROI'] = (acc_capital_with_banchmark_df['capital'].values[-1] /
acc_capital_with_banchmark_df['capital'].values[0]) - 1
last_row_capital_history = capital_history.tail(1).copy()
last_row_capital_history['simulation_id'] = df_simulation.index[0]
last_row_capital_history.set_index(['simulation_id'], inplace=True)
analytics_result_df.set_index(['simulation_id'], inplace=True)
analytics_result_df = analytics_result_df.join(df_simulation).join(last_row_capital_history)
path_to_analytics = os.path.join(Consts.PATH_TO_WRITE_RESULT,
TimeHelper.epoch_to_date_time(start_running_time).strftime(
Consts.WRITE_DATE_FORMAT), 'analytics')
os.makedirs(path_to_analytics, exist_ok=True)
path_to_create_result = os.path.join(path_to_analytics, 'Simulation__{}__Analytics.csv'.format(df_simulation.index[0]))
analytics_result_df.to_csv(path_to_create_result , index=True)
logger.info('Finish creating analytics file for simulation ID: {}, you can look in: {}'.format(df_simulation.index[0], path_to_create_result)) | 5,358,490 |
def writerformat(extension):
"""Returns the writer class associated with the given file extension."""
return writer_map[extension] | 5,358,491 |
def index_like(index):
"""
Does index look like a default range index?
"""
return not (isinstance(index, pd.RangeIndex) and
index._start == 0 and
index._stop == len(index) and
index._step == 1 and index.name is None) | 5,358,492 |
def zplsc_c_absorbtion(t, p, s, freq):
"""
Description:
Calculate Absorption coeff using Temperature, Pressure and Salinity and transducer frequency.
This Code was from the ASL MatLab code LoadAZFP.m
Implemented by:
2017-06-23: Rene Gelinas. Initial code.
:param t:
:param p:
:param s:
:param freq: Frequency in KHz
:return: sea_abs
"""
# Calculate relaxation frequencies
t_k = t + 273.0
f1 = 1320.0*t_k * np.exp(-1700/t_k)
f2 = 1.55e7*t_k * np.exp(-3052/t_k)
# Coefficients for absorption equations
k = 1 + p/10.0
a = 8.95e-8 * (1 + t*(2.29e-2 - 5.08e-4*t))
b = (s/35.0)*4.88e-7*(1+0.0134*t)*(1-0.00103*k + 3.7e-7*(k*k))
c = 4.86e-13*(1+t*((-0.042)+t*(8.53e-4-t*6.23e-6)))*(1+k*(-3.84e-4+k*7.57e-8))
freqk = freq*1000
sea_abs = (a*f1*(freqk**2))/((f1*f1)+(freqk**2))+(b*f2*(freqk**2))/((f2*f2)+(freqk**2))+c*(freqk**2)
return sea_abs | 5,358,493 |
def contrastive_img_summary(episode_tuple, agent, summary_writer, train_step):
"""Generates image summaries for the augmented images."""
_, sim_matrix = agent.contrastive_metric_loss(
episode_tuple, return_representation=True)
sim_matrix = tf.expand_dims(tf.expand_dims(sim_matrix, axis=0), axis=-1)
with summary_writer.as_default(), \
common.soft_device_placement(), \
tf.compat.v2.summary.record_if(True):
tf.summary.image('Sim matrix', sim_matrix, step=train_step) | 5,358,494 |
def main():
"""Main program driving measurement of benchmark size"""
# Establish the root directory of the repository, since we know this file is
# in that directory.
gp['rootdir'] = os.path.abspath(os.path.dirname(__file__))
# Parse arguments using standard technology
parser = build_parser()
args = parser.parse_args()
# Establish logging
setup_logging(args.logdir, 'size')
log_args(args)
# Check args are OK (have to have logging and build directory set up first)
validate_args(args)
# Find the benchmarks
benchmarks = find_benchmarks()
log_benchmarks(benchmarks)
# Collect the size data for the benchmarks
raw_data, rel_data = collect_data(benchmarks)
# We can't compute geometric SD on the fly, so we need to collect all the
# data and then process it in two passes. We could do the first processing
# as we collect the data, but it is clearer to do the three things
# separately. Given the size of datasets with which we are concerned the
# compute overhead is not significant.
if raw_data:
embench_stats(benchmarks, raw_data, rel_data)
log.info('All benchmarks sized successfully')
else:
log.info('ERROR: Failed to compute size benchmarks')
sys.exit(1) | 5,358,495 |
def estimate_quintic_poly(x, y):
"""Estimate degree 5 polynomial coefficients.
"""
return estimate_poly(x, y, deg=5) | 5,358,496 |
def add(A, B):
"""
Return the sum of Mats A and B.
>>> A1 = Mat([[1,2,3],[1,2,3]])
>>> A2 = Mat([[1,1,1],[1,1,1]])
>>> B = Mat([[2,3,4],[2,3,4]])
>>> A1 + A2 == B
True
>>> A2 + A1 == B
True
>>> A1 == Mat([[1,2,3],[1,2,3]])
True
>>> zero = Mat([[0,0,0],[0,0,0]])
>>> B + zero == B
True
"""
assert A.size == B.size
return Mat([[Acol + Bcol for index, (Acol, Bcol) in enumerate(zip(Arow, Brow))] for index, (Arow, Brow) in enumerate(zip(A.store, B.store))]) | 5,358,497 |
def hindu_lunar_event(l_month, tithi, tee, g_year):
"""Return the list of fixed dates of occurrences of Hindu lunar tithi
prior to sundial time, tee, in Hindu lunar month, l_month,
in Gregorian year, g_year."""
l_year = hindu_lunar_year(
hindu_lunar_from_fixed(gregorian_new_year(g_year)))
date1 = hindu_tithi_occur(l_month, tithi, tee, l_year)
date2 = hindu_tithi_occur(l_month, tithi, tee, l_year + 1)
return list_range([date1, date2],
gregorian_year_range(g_year)) | 5,358,498 |
def plot_featdrop_single(drop_curve, chance_level, font=20, title_font=30, title="Place Holder", single=True, verbose=False):
"""Plots a single feature dropping cure
Parameters:
-----------
single: bool
if True it will make a new figure within the function, defaults to True
"""
Len_Test = len(drop_curve)
Test1 = np.arange(0, Len_Test, 1)
Test2 = np.arange(0, Len_Test + 1, 1)
if verbose:
print("Chance Level is: ", chance_level)
# Test1 = Test1[::-1]
# Main Dropping Curve
# fig= plt.figure(figsize=(15,15))
if single:
plt.figure(figsize=(7, 7)) # Create Figure and Set Size
plt.plot(Test1[::-1], drop_curve, color='black', label='10 ms') # Main Drop Curve
# plt.errorbar(Test1, Syll_DC, yerr= Syll_StdERR, color= 'black', linestyle=' ') # Error Bars
# black_patch2 = mpatches.Patch(color='black', label='Bin Width = 10 ms') # Set Patches
# Plot Chance
plt.plot(Test2, chance_level * np.ones(Test2.shape), '--k', linewidth=5)
# Axis Labels
plt.title(title, fontsize=title_font)
plt.xlabel('No. of Channels', fontsize=font)
plt.ylabel('Accuracy', fontsize=font)
# Format Annotatitng Ticks
plt.tick_params(axis='both', which='major', labelsize=font)
plt.tick_params(axis='both', which='minor', labelsize=font)
plt.ylim(0, 1.0)
# plt.xlim(0,17) | 5,358,499 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.