content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def args2command(*args):
""" to convert positional arguments to string list """
try:
assert None not in args
assert "" not in args
except:
print("args:", args)
raise(ValueError("None values not allowed in args!"))
return [str(_).strip() for _ in args] | 688fed2c2146583f05deb75a5c832aac6c971cbd | 4,771 |
def selected_cells(self):
"""Get the selected cells. Synchronous, so returns a list.
Returns:
A list of Cells.
"""
cells = []
generator = self.selected_cells_async()
for chunk in generator:
for value in chunk.cells:
cells.append(value)
return cells | 523e77757acf8755b32ac0d283fd8864d6784ff1 | 4,772 |
def _trim_name(image):
"""Remove the slash at the end of the filename."""
return image[:-1] if image[-1] == '/' else image | 823dd63920673352a18d73f83190853d5a234483 | 4,773 |
import os
def check_for_pyd_so(file_path):
""" Checks if a file with .pyd or .so extension exists """
return True if os.path.isfile(file_path+'.pyd') or os.path.isfile(file_path+'.so') else False | a060f2350c11a3d34b59054c9cd95acc594b781b | 4,774 |
import os
import requests
def am_api_post_json(api_path, data):
"""
POST json to the Archivematica API
:param api_path: URL path to request (without hostname, e.g. /api/v2/location/)
:param data: Dict of data to post
:returns: dict of json data returned by request
"""
am_url = os.environ["ARCHIVEMATICA_URL"]
am_user = os.environ["ARCHIVEMATICA_USERNAME"]
am_api_key = os.environ["ARCHIVEMATICA_API_KEY"]
am_headers = {"Authorization": f"ApiKey {am_user}:{am_api_key}"}
url = f"{am_url}{api_path}"
print(f"URL: {url}; Data: {data}")
response = requests.post(url, json=data, headers=am_headers)
print(f"Response: {response}")
response_json = response.json()
print(f"Response JSON: {response_json}")
return response_json | 94428a059e322246c35d38e690667f52bf842663 | 4,775 |
import os
def collect_checkpoint_paths(checkpoint_dir):
"""
Generates a list of paths to each checkpoint file found in a folder.
Note:
- This function assumes, that checkpoint paths were written in relative.
Arguments:
checkpoint_dir (string):
Path to the models checkpoint directory from which to collect checkpoints.
Returns:
paths (:obj:`list` of :obj:`string`):
List of paths to each checkpoint file.
"""
listing_file = os.path.join(checkpoint_dir, 'checkpoint')
lines = []
# Collect all lines from the checkpoint listing file.
for line in open(listing_file, 'r'):
line = line.strip()
lines.append(line)
# Discard the first line since it only points to the latest checkpoint.
lines = lines[1:]
# Extract the checkpoints path and global step from each line.
# NOTE: This functions assumes, that all checkpoint paths are relative.
# all_model_checkpoint_paths: "model.ckpt-<global-step>"
# Remove "all_model_checkpoint_paths: " from each line.
lines = [line.replace('all_model_checkpoint_paths: ', '') for line in lines]
# Remove surrounding quotation marks (" .. ") from each line.
lines = [line.replace('"', '') for line in lines]
# Extract the global step from each line.
# steps = [int(line.split('-', 1)[-1]) for line in lines]
# Build absolute paths to each checkpoint file.
paths = [os.path.join(checkpoint_dir, line) for line in lines]
return paths | 8c477535a77dc989b31b30d3a4487c7219efbfb3 | 4,776 |
def extract_name_from_uri_or_curie(item, schema=None):
"""Extract name from uri or curie
:arg str item: an URI or curie
:arg dict schema: a JSON-LD representation of schema
"""
# if schema is provided, look into the schema for the label
if schema:
name = [record["rdfs:label"] for record in schema["@graph"] if record['@id'] == item]
if name:
return name[0]
else:
return extract_name_from_uri_or_curie(item)
# handle curie, get the last element after ":"
elif 'http' not in item and len(item.split(":")) == 2:
return item.split(":")[-1]
# handle URI, get the last element after "/"
elif len(item.split("//")[-1].split('/')) > 1:
return item.split("//")[-1].split('/')[-1]
# otherwise, rsise ValueError
else:
raise ValueError('{} should be converted to either URI or curie'.format(item)) | 08125457496c9d563f96a4f2a54a560c56c01af8 | 4,777 |
from datetime import datetime
def datetime_to_serial(dt):
"""
Converts the given datetime to the Excel serial format
"""
if dt.tzinfo:
raise ValueError("Doesn't support datetimes with timezones")
temp = datetime(1899, 12, 30)
delta = dt - temp
return delta.days + (float(delta.seconds) + float(delta.microseconds) / 1E6) / (60 * 60 * 24) | 3142bfc9d33ddf782c0a6485898e6ed6bcc00418 | 4,778 |
from copy import deepcopy
def compute_transitive_closure(graph):
"""Compute the transitive closure of a directed graph using Warshall's
algorithm.
:arg graph: A :class:`collections.abc.Mapping` representing a directed
graph. The dictionary contains one key representing each node in the
graph, and this key maps to a :class:`collections.abc.MutableSet` of
nodes that are connected to the node by outgoing edges. This graph may
contain cycles. This object must be picklable. Every graph node must
be included as a key in the graph.
:returns: The transitive closure of the graph, represented using the same
data type.
.. versionadded:: 2020.2
"""
# Warshall's algorithm
closure = deepcopy(graph)
# (assumes all graph nodes are included in keys)
for k in graph.keys():
for n1 in graph.keys():
for n2 in graph.keys():
if k in closure[n1] and n2 in closure[k]:
closure[n1].add(n2)
return closure | 62a7191759614f495f5297379544fa3cdf77fcfa | 4,779 |
def dereference(reference_buffer, groups):
"""
find a reference within a group
"""
if len(reference_buffer)>0:
ref_number = int(''.join(reference_buffer))-1
return groups[ref_number % len(groups)] +' '
return '' | c76234051e81a16f44690de46435e9856996d677 | 4,780 |
from typing import List
def _get_public_props(obj) -> List[str]:
"""Return the list of public props from an object."""
return [prop for prop in dir(obj) if not prop.startswith('_')] | 7b3be3e186bc009329ed417c6685fb2503a7c993 | 4,781 |
def remap(value, oldMin, oldMax, newMin, newMax):
"""
Remaps the value to a new min and max value
Args:
value: value to remap
oldMin: old min of range
oldMax: old max of range
newMin: new min of range
newMax: new max of range
Returns:
The remapped value in the new range
"""
return newMin + (((value - oldMin) / (oldMax - oldMin)) * (newMax - newMin)) | c0e53ce2b2169b08d271f7077e552762c572cf1f | 4,783 |
def _get_active_tab(visible_tabs, request_path):
"""
return the tab that claims the longest matching url_prefix
if one tab claims
'/a/{domain}/data/'
and another tab claims
'/a/{domain}/data/edit/case_groups/'
then the second tab wins because it's a longer match.
"""
matching_tabs = sorted(
(url_prefix, tab)
for tab in visible_tabs
for url_prefix in tab.url_prefixes
if request_path.startswith(url_prefix)
)
if matching_tabs:
_, tab = matching_tabs[-1]
return tab | ac9cd34d4b4ee1c1c0356499b389c1f6a7195585 | 4,785 |
import os
def path_normalize(path, target_os=None):
"""Normalize path (like os.path.normpath) for given os.
>>> from piecutter.engines.jinja import path_normalize
>>> path_normalize('foo/bar')
'foo/bar'
>>> path_normalize('foo/toto/../bar')
'foo/bar'
Currently, this is using os.path, i.e. the separator and rules for the
computer running Jinja2 engine. A NotImplementedError exception will be
raised if 'os' argument differs from 'os.name'.
>>> import os
>>> os.name == 'posix' # Sorry if you are running tests on another OS.
True
>>> path_normalize('foo/bar', target_os='nt') # Doctest: +ELLIPSIS
Traceback (most recent call last):
...
NotImplementedError: Cannot join path with "nt" style. Host OS is "posix".
"""
if target_os and target_os is not os.name:
raise NotImplementedError('Cannot join path with "{target}" style. '
'Host OS is "{host}".'.format(
target=target_os,
host=os.name))
return os.path.normpath(path) | 581713d5ffa48db4f0c368a69ad2cfc932f92a51 | 4,786 |
import io
import gzip
def gzip_bytes(bytes_obj):
"""byte: Compress a string as gzip in memory.
"""
if isinstance(bytes_obj, (str,)):
bytes_obj = bytes_obj.encode()
out_ = io.BytesIO()
with gzip.GzipFile(fileobj=out_, mode='w') as fo:
fo.write(bytes_obj)
return out_ | 68d0a6b3c64b8633a3084114f617ccd792a688f9 | 4,791 |
def _get_pattern_nts(rule):
"""
Return a list of NT names present in given rule.
"""
nt_names = []
for bt in rule.ipattern.bits:
if bt.is_nonterminal():
nt_name = bt.nonterminal_name()
nt_names.append(nt_name)
return nt_names | e690e9187aaff0cf3138444db085e15adfda3847 | 4,792 |
def stopping_player(bot, state):
""" A Player that just stands still. """
return bot.position | 72628e39d26760eedc9a0e85a8279ac530ab851d | 4,793 |
def before_after_text(join_set, index, interval_list):
"""
Extracts any preceeding or following markup to be joined to an interval's text.
"""
before_text, after_text = '', ''
# Checking if we have some preceeding or following markup to join with.
if join_set:
if index > 0:
before_text = ''.join(character
for character in interval_list[index - 1][2]
if character in join_set)
if index < len(interval_list) - 1:
after_text = ''.join(character
for character in interval_list[index + 1][2]
if character in join_set)
return before_text, after_text | b2c63fe1e7ea5bb204e41b27bc79d2c81964369a | 4,795 |
import typing
import requests
def download_file_from_google_drive(
gdrive_file_id: typing.AnyStr,
destination: typing.AnyStr,
chunk_size: int = 32768
) -> typing.AnyStr:
"""
Downloads a file from google drive, bypassing the confirmation prompt.
Args:
gdrive_file_id: ID string of the file to download from google drive.
destination: where to save the file.
chunk_size: chunk size for gradual downloads.
Returns:
The path to the downloaded file.
"""
# taken from this StackOverflow answer: https://stackoverflow.com/a/39225039
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={'id': gdrive_file_id}, stream=True)
token = None
for key, value in response.cookies.items():
if key.startswith('download_warning'):
token = value
if token:
params = {'id': gdrive_file_id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
with open(destination, "wb") as f:
for chunk in response.iter_content(chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return destination | 29cdcc509aa21a6f2ae14ed18f2c0523bbdbd5a4 | 4,796 |
def run_epoch(session, model, eval_op=None, verbose=False):
"""Runs the model on the given data."""
costs = 0.0
iters = 0
state = session.run(model.initial_state)
fetches = {
"cost": model.cost,
"final_state": model.final_state,
"accuracy":model.accuracy,
"y_new":model.y_new,
"y_target":model.y_target
}
accuracys = 0.0
if eval_op is not None:
fetches["eval_op"] = eval_op
output_y = []
for step in range(model.input.epoch_size):
feed_dict = {}
feed_dict[model.initial_state] = state
vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
accuracy = vals["accuracy"]
y_new = vals["y_new"]
y_target = vals["y_target"]
costs += cost
accuracys += accuracy
#iters += model.input.num_steps
iters = iters + 1
for i in range(model.input.batch_size):
if y_new[i,0] == 0:
output_y.append(1)
else:
output_y.append(0)
return costs, accuracys / iters, output_y | a69ed33e930245118e0d4054a10d6c1fd61cc0da | 4,797 |
def lines_in_file(filename: str) -> int:
"""
Count the number of lines in a file
:param filename: A string containing the relative or absolute path to a file
:returns: The number of lines in the file
"""
with open(filename, "r") as f:
return len(f.readlines()) | d71b5c8de1b4eb9a45988e06c17a129f4a19f221 | 4,799 |
import click
def validate_input_parameters(live_parameters, original_parameters):
"""Return validated input parameters."""
parsed_input_parameters = dict(live_parameters)
for parameter in parsed_input_parameters.keys():
if parameter not in original_parameters:
click.echo(
click.style('Given parameter - {0}, is not in '
'reana.yaml'.format(parameter),
fg='red'),
err=True)
del live_parameters[parameter]
return live_parameters | 226b95d0d9b42e586e395107def239d4e61c057a | 4,800 |
import re
def md_changes(seq, md_tag):
"""Recreates the reference sequence of a given alignment to the extent that the
MD tag can represent.
Note:
Used in conjunction with `cigar_changes` to recreate the
complete reference sequence
Args:
seq (str): aligned segment sequence
md_tag (str): MD tag for associated sequence
Returns:
ref_seq (str): a version of the aligned segment's reference sequence given \
the changes reflected in the MD tag
Raises:
ValueError: if MD tag is None
Example:
>>> md_changes('CTTATATTGGCCTT', '3C4AT4')
'CTTCTATTATCCTT'
"""
if md_tag is None:
raise ValueError('No MD tag found or given for sequence')
ref_seq = ''
last_md_pos = 0
for mo in re.finditer(r'(?P<matches>\d+)|(?P<del>\^\w+?(?=\d))|(?P<sub>\w)', md_tag):
mo_group_dict = mo.groupdict()
if mo_group_dict['matches'] is not None:
matches = int(mo_group_dict['matches'])
ref_seq += seq[last_md_pos:last_md_pos + matches]
last_md_pos += matches
elif mo_group_dict['del'] is not None:
deletion = mo_group_dict['del']
ref_seq += deletion[1:]
elif mo_group_dict['sub'] is not None:
substitution = mo_group_dict['sub']
ref_seq += substitution
last_md_pos += 1
else:
pass
return ref_seq | f8591d0084f6c10c9bbd1a39b3f9e13cfe952e68 | 4,801 |
def get_auto_scaling_group(asg, asg_name: str):
"""Get boto3 Auto Scaling Group by name or raise exception"""
result = asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])
groups = result["AutoScalingGroups"]
if not groups:
raise Exception("Auto Scaling Group {} not found".format(asg_name))
return groups[0] | 07176e538cdb265ae86b16a5d36bf1b274f45c19 | 4,802 |
def join_epiweek(year, week):
""" return an epiweek from the (year, week) pair """
return year * 100 + week | fdbc50f8a953ef7307e9558019b3c2b50bc65be4 | 4,803 |
def cleanline(line):
"""去除讀入資料中的換行符與 ',' 結尾
"""
line = line.strip('\n')
line = line.strip(',')
return line | a4149663e2c3966c5d9be22f4aa009109e4a67ca | 4,804 |
def _is_match(option, useful_options, find_perfect_match):
"""
returns True if 'option' is between the useful_options
"""
for useful_option in useful_options:
if len(option) == sum([1 for o in option if o in useful_option]):
if not find_perfect_match or len(set(useful_option)) == len(set(option)):
return True
return False | bff60e1320744c16747926071afb3ee02022c55c | 4,805 |
def _map_channels_to_measurement_lists(snirf):
"""Returns a map of measurementList index to measurementList group name."""
prefix = "measurementList"
data_keys = snirf["nirs"]["data1"].keys()
mls = [k for k in data_keys if k.startswith(prefix)]
def _extract_channel_id(ml):
return int(ml[len(prefix) :])
return {_extract_channel_id(ml): ml for ml in mls} | d6d83c01baec5f345d58fff8a0d0107a40b8db37 | 4,806 |
import sys
def delcolumn(particles, columns, metadata):
"""
With dataframes, stating dataframe1 = dataframe2 only creates
a reference. Therefore, we must create a copy if we want to leave
the original dataframe unmodified.
"""
nocolparticles = particles.copy()
#Loop through each passed column to delete them
for c in columns:
#Check if the column doesn't exist.
#Consider doing the check in decisiontree.py
if c not in nocolparticles:
print("\n>> Error: the column \"" + c + "\" does not exist.\n")
sys.exit()
"""
The .drop can be used to drop a whole column.
The "1" tells .drop that it is the column axis that we want to drop
inplace means we want the dataframe to be modified instead of creating an assignment
"""
nocolparticles.drop(c, 1, inplace=True)
#We nead to remove that column header too. The heads are the third
#metadata (i.e. metadata[3])
metadata[3].remove(c)
return(nocolparticles, metadata) | cff587aa460d0478f750a3323b66e20d9c52f85a | 4,807 |
def partition(lst, size):
"""Partition list @lst into eveni-sized lists of size @size."""
return [lst[i::size] for i in range(size)] | af7071a5aac36a51f449f153df145d9218808a4a | 4,808 |
import functools
import sys
import logging
def BestEffort(func):
"""Decorator to log and dismiss exceptions if one if already being handled.
Note: This is largely a workaround for the lack of support of exception
chaining in Python 2.7, this decorator will no longer be needed in Python 3.
Typical usage would be in |Close| or |Disconnect| methods, to dismiss but log
any further exceptions raised if the current execution context is already
handling an exception. For example:
class Client(object):
def Connect(self):
# code to connect ...
@exc_util.BestEffort
def Disconnect(self):
# code to disconnect ...
client = Client()
try:
client.Connect()
except:
client.Disconnect()
raise
If an exception is raised by client.Connect(), and then a second exception
is raised by client.Disconnect(), the decorator will log the second exception
and let the original one be re-raised.
Otherwise, in Python 2.7 and without the decorator, the second exception is
the one propagated to the caller; while information about the original one,
usually more important, is completely lost.
Note that if client.Disconnect() is called in a context where an exception
is *not* being handled, then any exceptions raised within the method will
get through and be passed on to callers for them to handle in the usual way.
The decorator can also be used on cleanup functions meant to be called on
a finally block, however you must also include an except-raise clause to
properly signal (in Python 2.7) whether an exception is being handled; e.g.:
@exc_util.BestEffort
def cleanup():
# do cleanup things ...
try:
process(thing)
except:
raise # Needed to let cleanup know if an exception is being handled.
finally:
cleanup()
Failing to include the except-raise block has the same effect as not
including the decorator at all. Namely: exceptions during |cleanup| are
raised and swallow any prior exceptions that occurred during |process|.
"""
@functools.wraps(func)
def Wrapper(*args, **kwargs):
exc_type = sys.exc_info()[0]
if exc_type is None:
# Not currently handling an exception; let any errors raise exceptions
# as usual.
func(*args, **kwargs)
else:
# Otherwise, we are currently handling an exception, dismiss and log
# any further cascading errors. Callers are responsible to handle the
# original exception.
try:
func(*args, **kwargs)
except Exception: # pylint: disable=broad-except
logging.exception(
'While handling a %s, the following exception was also raised:',
exc_type.__name__)
return Wrapper | dec08ab8fc1d367203df2e6c2f0507bf880ba503 | 4,809 |
import os
def get_ext(path):
"""
Given a path return the file extension.
**Positional Arguments:**
path: The file whose path we assess
"""
return os.path.splitext(path)[1] | f088e63bde8924fc2bac50950e05384878f637b7 | 4,810 |
def get_biggan_stats():
""" precomputed biggan statistics """
center_of_mass = [137 / 255., 127 / 255.]
object_size = [213 / 255., 210 / 255.]
return center_of_mass, object_size | 6576e13b7a68369e90b2003171d946453bafd212 | 4,812 |
def get_input_var_value(soup, var_id):
"""Get the value from text input variables.
Use when you see this HTML format:
<input id="wired_config_var" ... value="value">
Args:
soup (soup): soup pagetext that will be searched.
var_id (string): The id of a var, used to find its value.
Returns:
(string): The value of the variable
"""
try:
var_value = soup.find('input', {'id': var_id}).get('value')
return var_value
except AttributeError:
print('\nERROR: <' + var_id + '> not found!\nPagesoup:\n\n', soup)
raise LookupError | 5a9dd65a285c62e0e5e79584858634cb7b0ece75 | 4,813 |
import os
def _create_file(path):
"""Opens file in write mode. It also creates intermediate directories if
necessary.
"""
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
return open(path, 'w') | 448e26c24c48bf654402a9fe35ef28eb7906dd31 | 4,814 |
def prob_get_expected_after_certain_turn(turns_later: int, turns_remain: int,
tiles_expect: int) -> float:
"""The probability of get expected tile after `turns_later` set of turns.
:param turns_later: Get the expected tile after `turns_after` set of turns
:param turns_remain: The remaining turns
:param tiles_expect: The number of expected tiles
:return: Probability
"""
tiles_remain = 4 * turns_remain + 14
if tiles_expect > turns_later:
greater = tiles_remain - turns_later
less = tiles_remain - tiles_expect
else:
greater = tiles_remain - tiles_expect
less = tiles_remain - turns_later
numerator, denominator = 1, 1
i, j = less, greater
while i > tiles_remain - turns_later - tiles_expect:
numerator = numerator * i
i = i - 1
while j > greater:
denominator = denominator * j
j = j - 1
return numerator / denominator | 6575c22302b73b58b2bd9aad5068ffe723fb5fe3 | 4,815 |
def table_from_bool(ind1, ind2):
"""
Given two boolean arrays, return the 2x2 contingency table
ind1, ind2 : array-like
Arrays of the same length
"""
return [
sum(ind1 & ind2),
sum(ind1 & ~ind2),
sum(~ind1 & ind2),
sum(~ind1 & ~ind2),
] | 497ce6ad1810386fedb6ada9ba87f0a5baa6318a | 4,818 |
import pandas
def get_sub_title_from_series(ser: pandas.Series, decimals: int = 3) -> str:
"""pandas.Seriesから、平均値、標準偏差、データ数が記載されたSubTitleを生成する。"""
mean = round(ser.mean(), decimals)
std = round(ser.std(), decimals)
sub_title = f"μ={mean}, α={std}, N={len(ser)}"
return sub_title | 45c227e7ddd203872f015e4a95532c8acb80d54f | 4,819 |
def human_size(numbytes):
"""converts a number of bytes into a readable string by humans"""
KB = 1024
MB = 1024*KB
GB = 1024*MB
TB = 1024*GB
if numbytes >= TB:
amount = numbytes / TB
unit = "TiB"
elif numbytes >= GB:
amount = numbytes / GB
unit = "GiB"
elif numbytes >= MB:
amount = numbytes / MB
unit = "MiB"
elif numbytes >= KB:
amount = numbytes / KB
unit = "KiB"
else:
amount = numbytes
unit = "B"
return "%.3f%s" % (amount, unit) | 733fdff47350072b9cfcaf72a2de85f8a1d58cc6 | 4,820 |
import argparse
import time
def parse_args():
"""
Parse command-line arguments to train and evaluate a multimodal network for activity recognition on MM-Fit.
:return: Populated namespace.
"""
parser = argparse.ArgumentParser(description='MM-Fit Demo')
parser.add_argument('--data', type=str, default='mm-fit/',
help='location of the dataset')
parser.add_argument('--unseen_test_set', default=False, action='store_true',
help='if set to true the unseen test set is used for evaluation')
parser.add_argument('--epochs', type=int, default=25,
help='number of training epochs')
parser.add_argument('--lr', type=float, default=1e-3,
help='learning rate')
parser.add_argument('--batch_size', type=int, default=128,
help='batch size')
parser.add_argument('--eval_every', type=int, default=1,
help='how often to eval model (in epochs)')
parser.add_argument('--early_stop', type=int, default=20,
help='stop after this number of epoch if the validation loss did not improve')
parser.add_argument('--checkpoint', type=int, default=10,
help='how often to checkpoint model parameters (epochs)')
parser.add_argument('--multimodal_ae_wp', type=str, default='',
help='file path for the weights of the multimodal autoencoder part of the model')
parser.add_argument('--model_wp', type=str, default='',
help='file path for weights of the full model')
parser.add_argument('--window_length', type=int, default=5,
help='length of data window in seconds')
parser.add_argument('--window_stride', type=float, default=0.2,
help='length of window stride in seconds')
parser.add_argument('--target_sensor_sampling_rate', type=float, default=50,
help='Sampling rate of sensor input signal (Hz)')
parser.add_argument('--skeleton_sampling_rate', type=float, default=30,
help='sampling rate of input skeleton data (Hz)')
parser.add_argument('--layers', type=int, default=3,
help='number of FC layers')
parser.add_argument('--hidden_units', type=int, default=200,
help='number of hidden units')
parser.add_argument('--ae_layers', type=int, default=3,
help='number of autoencoder FC layers')
parser.add_argument('--ae_hidden_units', type=int, default=200,
help='number of autoencoder hidden units')
parser.add_argument('--embedding_units', type=int, default=100,
help='number of hidden units')
parser.add_argument('--dropout', type=float, default=0.0,
help='dropout percentage')
parser.add_argument('--ae_dropout', type=float, default=0.0,
help='multimodal autoencoder dropout percentage')
parser.add_argument('--num_classes', type=int, default=None,
help='number of output classes')
parser.add_argument('--name', type=str, default='mmfit_demo_' + str(int(time.time())),
help='name of experiment')
parser.add_argument('--output', type=str, default='output/',
help='path to output folder')
return parser.parse_args() | 6be79c2b83a294dc9f34da4acdbd6c8b0e568a8b | 4,821 |
def sample_duration(sample):
"""Returns the duration of the sample (in seconds)
:param sample:
:return: number
"""
return sample.duration | 9aaddb69b106ad941e3d1172c8e789b4969da99d | 4,824 |
import win32com.client as win32
def excel_col_w_fitting(excel_path, sheet_name_list):
"""
This function make all column widths of an Excel file auto-fit with the column content.
:param excel_path: The Excel file's path.
:param sheet_name_list: The sheet names of the Excel file.
:return: File's column width correctly formatted.
"""
excel = win32.gencache.EnsureDispatch('Excel.Application')
work_book = excel.Workbooks.Open(excel_path)
for sheet_name in sheet_name_list:
work_sheet = work_book.Worksheets(sheet_name)
work_sheet.Columns.AutoFit()
work_book.Save()
excel.Application.Quit()
return None | 57de5aa63317d4fae4c1f60b607082b8de1f5f91 | 4,825 |
def annotate(f, expr, ctxt):
"""
f: function argument
expr: expression
ctxt: context
:returns: type of expr
"""
t = f(expr, ctxt)
expr.type = t
return t | d8fb524f6ca2fbddef78aa150733e768d0e3da01 | 4,826 |
def report_count_table_sort(s1, s2):
""" """
# Sort order: Class and scientific name.
columnsortorder = [0, 2, 3, 6] # Class, species, size class and trophy.
#
for index in columnsortorder:
s1item = s1[index]
s2item = s2[index]
# Empty strings should be at the end.
if (s1item != '') and (s2item == ''): return -1
if (s1item == '') and (s2item != ''): return 1
if s1item < s2item: return -1
if s1item > s2item: return 1
#
return 0 | cf207e4e8f524e48f99422017b17e643b66a9e78 | 4,827 |
def zero_check(grid):
"""Take a 2d grid and calculates number of 0 entries."""
zeros = 0
for row in grid:
for element in row:
if element == 0:
zeros += 1
return zeros | 0d69a948eef96937f8a5033256c3c4d9f22ce14d | 4,828 |
def __str__(self, indent=0, func_role="obj"):
"""
our own __str__
"""
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
out += self._str_param_list('Parameters')
out += self._str_options('Options')
out += self._str_returns()
for param_list in ('Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out) | 3e55fccb76f8e200ef7e57366c2ccd9609975959 | 4,829 |
def create_gw_response(app, wsgi_env):
"""Create an api gw response from a wsgi app and environ.
"""
response = {}
buf = []
result = []
def start_response(status, headers, exc_info=None):
result[:] = [status, headers]
return buf.append
appr = app(wsgi_env, start_response)
close_func = getattr(appr, 'close', None)
try:
buf.extend(list(appr))
finally:
close_func and close_func()
response['body'] = ''.join(buf)
response['statusCode'] = result[0].split(' ', 1)[0]
response['headers'] = {}
for k, v in result[1]:
response['headers'][k] = v
if 'Content-Length' not in response['headers']:
response['headers']['Content-Length'] = str(len(response['body']))
if 'Content-Type' not in response['headers']:
response['headers']['Content-Type'] = 'text/plain'
return response | 73dd8459cbf9b79655137536ff42195ba62c1372 | 4,830 |
import json
def decode_classnames_json(preds, top=5):
"""
Returns class code, class name and probability for each class amongst top=5 for each prediction in preds
e.g.
[[('n01871265', 'tusker', 0.69987053), ('n02504458', 'African_elephant', 0.18252705), ... ]]
"""
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_classnames_json` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
with open('imagenet_class_index.json') as data_file:
data = json.load(data_file)
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(data[str(i)]) + (pred[i],) for i in top_indices]
results.append(result)
return results | 807bed051300801a5e6a92bbc96324a66050f6c0 | 4,831 |
def _has_letter(pw):
"""
Password must contain a lowercase letter
:param pw: password string
:return: boolean
"""
return any(character.isalpha() for character in pw) | 2f8eea521e8ca88001b2ecc3bc2501af8b14bbc8 | 4,832 |
import math
def closest_power_of_two(n):
"""Returns the closest power of two (linearly) to n.
See: http://mccormick.cx/news/entries/nearest-power-of-two
Args:
n: Value to find the closest power of two of.
Returns:
Closest power of two to "n".
"""
return pow(2, int(math.log(n, 2) + 0.5)) | 50d78d2a6de4f689ce268a95df97aae72dbd81ac | 4,833 |
def divisors(num):
"""
Takes a number and returns all divisors of the number, ordered least to greatest
:param num: int
:return: list (int)
"""
list = []
x = 0
for var in range(0, num):
x = x + 1
if num % x == 0:
list.append(x)
return list | 848ed77fa92ae1c55d90a5236f0d9db6ae2f377c | 4,834 |
def NumVisTerms(doc):
"""Number of visible terms on the page"""
_, terms = doc
return len(terms) | a6b762f314732d90c2371adf9472cf80117adae5 | 4,835 |
def getIpAddress():
"""Returns the IP address of the computer the client is running on,
as it appears to the client.
See also: system.net.getExternalIpAddress().
Returns:
str: Returns the IP address of the local machine, as it sees it.
"""
return "127.0.0.1" | d6aefaa4027a899344c762bc7df5ce40a5dbde4e | 4,836 |
import re
def verify_message( message ):
"""Verifies that a message is valid. i.e. it's similar to: 'daily-0400/20140207041736'"""
r = re.compile( "^[a-z]+(-[0-9])?-([a-z]{3})?[0-9]+/[0-9]+" )
return r.match( message ) | f25a37a5e3f076a647c0a03c26d8f2d2a8fd7b2e | 4,838 |
def get_all_unicode_chars():
"""Get all unicode characters."""
all_unicode_chars = []
i = 0
while True:
try:
all_unicode_chars.append(chr(i))
except ValueError:
break
i += 1
return all_unicode_chars | da63b26dd082987937b17fdfffb1219726d9d2c6 | 4,839 |
def ModifyListRequest(instance_ref, args, req):
"""Parse arguments and construct list backups request."""
req.parent = instance_ref.RelativeName()
if args.database:
database = instance_ref.RelativeName() + '/databases/' + args.database
req.filter = 'database="{}"'.format(database)
return req | fde5a06cde30ed1cf163299dc8ae5f0e826e3f9d | 4,840 |
import sys
import locale
def output_encoding(outfile=None):
"""Determine the encoding to use for output written to `outfile` or stdout."""
if outfile is None:
outfile = sys.stdout
encoding = (
getattr(outfile, "encoding", None) or
getattr(sys.__stdout__, "encoding", None) or
locale.getpreferredencoding()
)
return encoding | 872f8147a139c3747dda31254cf0a31d397baad7 | 4,841 |
from datetime import datetime
def day_start(src_time):
"""Return the beginning of the day of the specified datetime"""
return datetime(src_time.year, src_time.month, src_time.day) | 2bcc7b136e5cb1e7929e6655daf67b07dbbaa542 | 4,842 |
def fixextensions(peeps, picmap, basedir="."):
"""replaces image names with ones that actually exist in picmap"""
fixed = [peeps[0].copy()]
missing = []
for i in range(1, len(peeps)):
name, ext = peeps[i][2].split(".", 1)
if (name in picmap):
fixed.append(peeps[i].copy())
fixed[i][2] = picmap[name]
else:
missing.append(i)
return fixed, missing | d2af911aacea80f7e25cbdde0f5dfad0f1757aee | 4,843 |
def align_decision_ref(id_human, title):
""" In German, decisions are either referred to as 'Beschluss' or
'Entscheidung'. This function shall align the term used in the
title with the term used in id_human.
"""
if 'Beschluss' in title:
return id_human
return id_human.replace('Beschluss ', 'Entscheidung ') | ac4f584b8e008576816d9a49dba58bc9c9a6dbc4 | 4,845 |
def get_headers(soup):
"""get nutrient headers from the soup"""
headers = {'captions': [], 'units': []}
footer = soup.find('tfoot')
for cell in footer.findAll('td', {'class': 'nutrient-column'}):
div = cell.find('div')
headers['units'].append(div.text)
headers['captions'].append(div.previous_sibling.strip())
return headers | 5e7772a8830271f800791c75ef7ceecc98aba2bb | 4,846 |
def odd_numbers_list(n):
""" Returns the list of n first odd numbers """
return [2 * k - 1 for k in range(1, n + 1)] | 2066cf07e926e41d358be0012a7f2a248c5987a7 | 4,847 |
def domain_delete(domainName): # noqa: E501
"""domain_delete
Remove the domain # noqa: E501
:param domainName:
:type domainName: str
:rtype: DefaultMessage
"""
return 'do some magic!' | a0865aa2ff4902ac5cf8a8c0ea9eb62e792af56b | 4,848 |
def _aprime(pHI,pFA):
"""recursive private function for calculating A'"""
pCR = 1 - pFA
# use recursion to handle
# cases below the diagonal defined by pHI == pFA
if pFA > pHI:
return 1 - _aprime(1-pHI ,1-pFA)
# Pollack and Norman's (1964) A' measure
# formula from Grier 1971
if pHI == 0 or pFA == 1:
# in both of these cases pHI == pFA
return .5
return .5 + (pHI - pFA)*(1 + pHI - pFA)/(4*pHI*(1 - pFA)) | 3694dcdbc5da2c12bece51e85988245a60ebe811 | 4,849 |
import os
import yaml
from typing import OrderedDict
import sys
def get_tags_from_playbook(playbook_file):
"""Get available tags from Ansible playbook"""
tags = []
playbook_path = os.path.dirname(playbook_file)
with open(playbook_file) as playbook_fp:
playbook = yaml.safe_load(playbook_fp)
for item in playbook:
if 'import_playbook' in item:
import_playbook = os.path.join(playbook_path,
item['import_playbook'])
imported_tags = get_tags_from_playbook(import_playbook)
tags.extend(imported_tags)
elif 'tags' in item:
if isinstance(item['tags'], (list, )):
tags.extend(item['tags'])
else:
tags.append(item['tags'])
else:
print(item)
# Remove duplicates while maintaining order
tags = list(OrderedDict.fromkeys(tags))
if tags.count('always') > 0:
tags.remove('always')
if len(tags) == 0:
sys.stderr.write('%s has no tags\n' % playbook_file)
return tags | 0ecb9b80b29a776729bebc8f0f4da135e12c116a | 4,850 |
import math
def rads_to_degs(rad):
"""Helper radians to degrees"""
return rad * 180.0 / math.pi | 1be742aa4010e2fc5678e6f911dcb21b0b4d1b59 | 4,853 |
import torch
import itertools
def splat_feat_nd(init_grid, feat, coords):
"""
Args:
init_grid: B X nF X W X H X D X ..
feat: B X nF X nPt
coords: B X nDims X nPt in [-1, 1]
Returns:
grid: B X nF X W X H X D X ..
"""
wts_dim = []
pos_dim = []
grid_dims = init_grid.shape[2:]
B = init_grid.shape[0]
F = init_grid.shape[1]
n_dims = len(grid_dims)
grid_flat = init_grid.view(B, F, -1)
for d in range(n_dims):
pos = coords[:, [d], :] * grid_dims[d] / 2 + grid_dims[d] / 2
pos_d = []
wts_d = []
for ix in [0, 1]:
pos_ix = torch.floor(pos) + ix
safe_ix = (pos_ix > 0) & (pos_ix < grid_dims[d])
safe_ix = safe_ix.type(pos.dtype)
wts_ix = 1 - torch.abs(pos - pos_ix)
wts_ix = wts_ix * safe_ix
pos_ix = pos_ix * safe_ix
pos_d.append(pos_ix)
wts_d.append(wts_ix)
pos_dim.append(pos_d)
wts_dim.append(wts_d)
l_ix = [[0, 1] for d in range(n_dims)]
for ix_d in itertools.product(*l_ix):
wts = torch.ones_like(wts_dim[0][0])
index = torch.zeros_like(wts_dim[0][0])
for d in range(n_dims):
index = index * grid_dims[d] + pos_dim[d][ix_d[d]]
wts = wts * wts_dim[d][ix_d[d]]
index = index.long()
grid_flat.scatter_add_(2, index.expand(-1, F, -1), feat * wts)
grid_flat = torch.round(grid_flat)
return grid_flat.view(init_grid.shape) | 24e798dd9cdaf51988c5229442bef4ebed14c4be | 4,854 |
def scan(this, accumulator, seed=None):
"""Applies an accumulator function over an observable sequence and
returns each intermediate result. The optional seed value is used as
the initial accumulator value.
For aggregation behavior with no intermediate results, see OutputThing.aggregate.
1 - scanned = source.scan(lambda acc, x: acc + x)
2 - scanned = source.scan(lambda acc, x: acc + x, 0)
Keyword arguments:
accumulator -- An accumulator function to be invoked on each element.
seed -- [Optional] The initial accumulator value.
Returns an observable sequence containing the accumulated values.
"""
has_seed = False
if seed is not None:
has_seed = True
has_accumulation = [False]
accumulation = [None]
def calculate(x):
if has_accumulation[0]:
accumulation[0] = accumulator(accumulation[0], x)
else:
accumulation[0] = accumulator(seed, x) if has_seed else x
has_accumulation[0] = True
return accumulation[0]
return this.map(calculate) | 4d69686f41c93549208b2e0721e14d95c7c52321 | 4,855 |
def _is_si_object(storage_instance):
"""
Helper method for determining if a storage instance is object.
Args:
storage_instance:
Returns: (Bool) True if object, False if not.
"""
si_type = storage_instance.get("service_configuration", None)
if si_type is None:
# object not supported on storage instance
return False
elif si_type == "object":
return True
else:
return False | 3cc2591bb0391e6d9d62197d0bb593f5006215c8 | 4,856 |
import os
def bookmark_fn(outdir):
"""Single line text file storing the epoch,brick,batch number of last ckpt"""
return os.path.join(outdir,'ckpts',
'last_epoch_brick_batch.txt') | 38680f4f723e8d724c115dddd6dbd0b7100d909a | 4,858 |
def set_model_weights(model, weights):
"""Set the given weights to keras model
Args:
model : Keras model instance
weights (dict): Dictionary of weights
Return:
Keras model instance with weights set
"""
for key in weights.keys():
model.get_layer(key).set_weights(weights[key])
return model | 0adb7294348af379df0d2a7ce2101a6dc3a43be4 | 4,859 |
def compare_two_data_lists(data1, data2):
"""
Gets two lists and returns set difference of the two lists.
But if one of them is None (file loading error) then the return value is None
"""
set_difference = None
if data1 is None or data2 is None:
set_difference = None
else:
set_difference = len(set(data1).difference(data2))
return set_difference | 6e926d3958544d0d8ce1cbcb54c13535c74ab66b | 4,860 |
from typing import Any
from datetime import datetime
def get_on_request(field: Any, default_value: Any) -> Any:
"""
Функция получения значений
Args:
field: поле
default_value: если пустое то подставим это значение
Return:
значение поля или дефолтное
"""
if isinstance(field, datetime):
if field.timestamp() < 10:
return default_value
if field:
return field
return default_value | 598f47d996618cfcf3790fe7497c6d51508efc48 | 4,861 |
def envfile_to_params(data):
"""
Converts environment file content into a dictionary with all the parameters.
If your input looks like:
# comment
NUMBER=123
KEY="value"
Then the generated dictionary will be the following:
{
"NUMBER": "123",
"KEY": "value"
}
"""
params = filter(lambda x: len(x) == 2, map(lambda x: x.strip().split("="), data.splitlines()))
return { k: v[1:-1] if v.startswith('"') and v.endswith('"') else v for (k, v) in params } | 03d3b4eb7ea5552938e6d42dcfd4554a1fe89422 | 4,862 |
def check_records(msg: dict) -> int:
"""
Returns the number of records
sent in the SQS message
"""
records = 0
if msg is not None:
records = len(msg[0])
if records != 1:
raise ValueError("Not expected single record")
return records | 7036f943b733ca34adaaa5ff917b3eb246075422 | 4,863 |
import random
def eight_ball():
""" Magic eight ball.
:return: A random answer.
:rtype: str
"""
answers = [
'It is certain', 'It is decidedly so', 'Not a fucking chance!', 'without a doubt', 'Yes definitely',
'I suppose so', 'Maybe', ' No fucking way!', 'Sure :D', 'hahahaha no you plank! :P ', 'Ohhh yes!',
'You may rely on it', 'As I see it, yes', 'Most likely', 'Outlook good', 'Yes', 'Signs point to yes',
'Try again', 'Ask again later', 'Better not tell you now as you may cry like a little girl',
'Cannot predict now', 'Fucking dead right!', 'Ohhhh most definitely',
'Concentrate and ask again', 'Don\'t count on it', 'My reply is no', 'My sources say no',
'Outlook not so good', 'Very doubtful', 'Possibly, but I think you need to chillout!'
]
return random.choice(answers) | 728aea44a111a25d878ec7686038d993fe49f71c | 4,864 |
def valid_octet (oct):
""" Validates a single IP address octet.
Args:
oct (int): The octet to validate
Returns:
bool: True if the octet is valid, otherwise false
"""
return oct >= 0 and oct <= 255 | 9dd2346bb5df5bc00bb360013abe40b8039bdc45 | 4,865 |
def str_to_bool(string):
"""
Parses string into boolean
"""
string = string.lower()
return True if string == "true" or string == "yes" else False | e7c1645ab3ba59fc4721872df76f406c571cab8f | 4,866 |
import glob
def does_name_exist(name):
""" check if a file with that name already exists """
return len(glob.glob('./photos/'+name+'.*')) > 0 | c377f5fdb15d1d88ba6082c9be0e0400f5a8094d | 4,867 |
def return_one(result):
"""return one statement"""
return " return " + result | 94298fd5811877fa9e6a84cb061fc6244f3fda3b | 4,869 |
import functools
import warnings
def warns(message, category=None):
"""警告装饰器
:param message: 警告信息
:param category: 警告类型:默认是None
:return: 装饰函数的对象
"""
def _(func):
@functools.wraps(func)
def warp(*args, **kwargs):
warnings.warn(message, category, stacklevel=2)
return func(*args, **kwargs)
return warp
return _ | 4c481dc7eeb42751aef07d87ab9da34b04c573f4 | 4,873 |
def build_sub_lattice(lattice, symbol):
"""Generate a sub-lattice of the lattice based on equivalent atomic species.
Args:
lattice (ASE crystal class): Input lattice
symbol (string): Symbol of species identifying sub-lattice
Returns:
list of lists:
sub_lattice: Cartesian coordinates of the sub-lattice of symbol
"""
sub_lattice = []
i = 0
atomic_labels = lattice.get_chemical_symbols()
positions = lattice.get_scaled_positions()
for atom in atomic_labels:
if atom == symbol:
sub_lattice.append(positions[i])
i = i + 1
return sub_lattice | 7e7748c31f7f082b2e5ec6f21d0a56f60d5ec06c | 4,874 |
def format_percent(percentage, pos):
"""
Formats percentages for the 'x' axis of a plot.
:param percentage: The fraction between 0.0 and 1.0
:type percentage: float
:param pos: The position argument
:type pos: int
:return: A formatted percentage string
:rtype: str
"""
# pylint: disable=unused-argument
return '{:.0f}%'.format(percentage * 100.) | d8566ce36b21adb351141ac72413b927e0f02c11 | 4,875 |
def get_transpose_graph(graph):
"""Get the transpose graph"""
transpose = {node: set() for node in graph.keys()}
for node, target_nodes in graph.items():
for target_node in target_nodes:
transpose[target_node].add(node)
return transpose | f7f8e083659e4214d79472961c7240778f37268d | 4,878 |
import re
def check_date_mention(tweet):
"""Check the tweet to see if there is a valid date mention for the
three dates of pyconopenspaces: 5/11, 5/12, 5/13. Quick fix to override
SUTime defaulting to today's date and missing numeric info about event's date
"""
date_pat = re.compile("([5]{1}\/\d{2})")
valid_dates = ["5/11", "5/12", "5/13"]
dates = [d for d in tweet.split() if date_pat.match(d) and d in valid_dates]
return dates if len(dates) == 1 else False | 67c0de3beac5036d8b7aefa161b82a15257da04f | 4,879 |
import argparse
def parse(args):
"""[--starved <int>] [--control <int>] [--other <int>]"""
parser = argparse.ArgumentParser()
parser.add_argument('--control', metavar='level', type=int, default=2)
parser.add_argument('--other', metavar='level', type=int, default=1)
parser.add_argument('--starved', metavar='level', type=int, default=0)
return parser.parse_args(args) | 6e316e3337406a4b7918474a6497c8fa03d02696 | 4,880 |
def get_all_with_given_response(rdd, response='404'):
"""
Return a rdd only with those requests
that received the response code entered.
Default set to '404'.
return type: pyspark.rdd.PipelinedRDD
"""
def status_iterator(ln):
try:
status = ln.split(' ')[-2]
return True if status == response else False
except:
pass
return rdd.filter(status_iterator) | 8268095938bbc35a6418f557af033a458f041c89 | 4,881 |
def s3_put_bucket_website(s3_obj, bucketname, website_config):
"""
Boto3 client based Put bucket website function
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
website_config (dict): Website configuration info
Returns:
dict : PutBucketWebsite response
"""
return s3_obj.s3_client.put_bucket_website(
Bucket=bucketname, WebsiteConfiguration=website_config
) | a60d95ef43e5a3643edeb6dacb2b149fef1892d9 | 4,883 |
def pretvori_v_sekunde(niz):
"""
Pretvori niz, ki predstavlja dolžino skladbe v formatu hh:mm:ss v število sekund.
"""
h, m, s = map(int, niz.split(":"))
return s + m*60 + h*3600 | db0cc5872109b15e635b2b1e8731a5343d63f518 | 4,885 |
import logging
def _get_profiling_data(filename):
"""Read a given file and parse its content for profiling data."""
data, timestamps = [], []
try:
with open(filename, "r") as f:
file_data = f.readlines()
except Exception:
logging.error("Could not read profiling data.", exc_info=True)
raise SystemExit(1)
for line in file_data:
if line == "\n":
continue
line = line.strip()
line_data = line.split(" ")
if len(line_data) != 3:
continue
_, mem_usage, timestamp = line.split(" ")
data.append(float(mem_usage))
timestamps.append(float(timestamp))
if not data:
logging.error("No samples to parse in {}.".format(filename))
raise SystemExit(1)
return {"data": data, "timestamp": timestamps} | 85f434c9aa22d60bae06205162623cde83e5a716 | 4,886 |
import os
import glob
def get_analytics_zoo_classpath():
"""
Get and return the jar path for analytics-zoo if exists.
"""
if os.getenv("BIGDL_CLASSPATH"):
return os.environ["BIGDL_CLASSPATH"]
jar_dir = os.path.abspath(__file__ + "/../../")
jar_paths = glob.glob(os.path.join(jar_dir, "share/lib/*.jar"))
if jar_paths:
assert len(jar_paths) == 1, "Expecting one jar: %s" % len(jar_paths)
return jar_paths[0]
return "" | e56bf7e81d42de6a20e8f77159a39e78ac150804 | 4,887 |
def make_title(raw_input):
"""Capitalize and strip"""
return raw_input.title().strip() | 517977638d72a8e5c8026147246739231be6258f | 4,889 |
import math
def get_goal_sample_rate(start, goal):
"""Modifie la probabilité d'obtenir directement le but comme point selon la distance entre le départ et le but.
Utile pour la précision et les performances."""
try :
dx = goal[0]-start[0]
dy = goal[1]-start[1]
d = math.sqrt(dx * dx + dy * dy)
except TypeError:
goal_sample_rate = 5
return goal_sample_rate
if d < 600 :
goal_sample_rate = (10-d/140)**2
else :
goal_sample_rate = 30
return goal_sample_rate | a48ad7adba534455a149142cfeae9c47e3a25677 | 4,890 |
def get_key(item, key_length):
"""
key + value = item
number of words of key = key_length
function returns key
"""
word = item.strip().split()
if key_length == 0: # fix
return item
elif len(word) == key_length:
return item
else:
return ' '.join(word[0:key_length]) | 6407d98d62a4d83bf577e82be696b6aee1f6d2e8 | 4,892 |
from datetime import datetime
import pytz
def get_timestamp(request):
""" hhs_oauth_server.request_logging.RequestTimeLoggingMiddleware
adds request._logging_start_dt
we grab it or set a timestamp and return it.
"""
if not hasattr(request, '_logging_start_dt'):
return datetime.now(pytz.utc).isoformat()
else:
return request._logging_start_dt | f3117a66ebfde0b1dc48591e0665c3d7120826fd | 4,894 |
from typing import Optional
from typing import List
def human_size(bytes: int | float, units: Optional[List[str]] = None) -> str:
"""
Convert bytes into a more human-friendly format
:param bytes: int
Number of bytes
:param units: Optional[List[str]]
units used
:return: str
Return size in human friendly format: <number> <size_unit>
"""
if units is None:
units = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']
return f"{round(bytes, 2)} " + units[0] if bytes < 1024 else human_size(bytes / 1024, units[1:]) | 9b652f0a09024c22dcefa5909c17f7b14d0183f4 | 4,895 |
def average_price(offers):
"""Returns the average price of a set of items.
The first item is ignored as this is hopefully underpriced.
The last item is ignored as it is often greatly overpriced.
IMPORTANT: It is important to only trade items with are represented on the market in great numbers.
This is due to the fact that with lower competition between sellers, the prices are often non-competitive.
Keyword arguments:
offers -- A list of offers from which to find the average price."""
if len(offers) > 1:
remove_last_item = (True if (len(offers) > 3) else False)
cheapest_item = offers[0]['price']
if remove_last_item:
sum_ = sum(x['price'] for x in offers[1:-1])
else:
sum_ = sum(x['price'] for x in offers[1:])
return sum_ / (len(offers) - (2 if remove_last_item else 1)) | 4849996d13e4c00d845f5fb6a5a150397c9b84f0 | 4,896 |
def convert_mg_l_to_mymol_kg(o2, rho_0=1025):
"""Convert oxygen concentrations in ml/l to mymol/kg."""
converted = o2 * 1/32000 * rho_0/1000 * 1e6
converted.attrs["units"] = "$\mu mol/kg$"
return converted | 5925cf1f5629a0875bdc777bc3f142b9a664a144 | 4,900 |
def make_f_beta(beta):
"""Create a f beta function
Parameters
----------
beta : float
The beta to use where a beta of 1 is the f1-score or F-measure
Returns
-------
function
A function to compute the f_beta score
"""
beta_2 = beta**2
coeff = (1 + beta_2)
def f(global_, local_, node):
"""Compute the f-measure
Parameters
----------
global_ : np.array
All of the scores for a given query
local_ : np.array
The scores for the query at the current node
node : skbio.TreeNode
The current node being evaluated
"""
p = len(global_) / len(local_)
r = len(local_) / node.ntips
return coeff * (p * r) / ((beta_2 * p) + r)
return f | f0e6993ac956171c58415e1605706c453d3e6d61 | 4,901 |
def sort_by_rank_change(val):
"""
Sorter by rank change
:param val: node
:return: nodes' rank value
"""
return abs(float(val["rank_change"])) | ff5730e7cc765949dcfcfd4a3da32947ce3a411a | 4,904 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.