content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def do_sizes_match(imgs):
"""Returns if sizes match for all images in list."""
return len([*filter(lambda x: x.size != x.size[0], imgs)]) > 0 | 7da30972ecfd4d3cac3d21ff380255865ec3b5c8 | 2,279 |
def cli(ctx, invocation_id):
"""Get a summary of an invocation, stating the number of jobs which succeed, which are paused and which have errored.
Output:
The invocation summary.
For example::
{'states': {'paused': 4, 'error': 2, 'ok': 2},
'model': 'WorkflowInvocation',
'id': 'a799d38679e985db',
'populated_state': 'ok'}
"""
return ctx.gi.invocations.get_invocation_summary(invocation_id) | 94197a9c55c0d37b311585fdfce9d615c6986cb5 | 2,280 |
import numpy as np
def remove_observations_mean(data,data_obs,lats,lons):
"""
Removes observations to calculate model biases
"""
### Import modules
### Remove observational data
databias = data - data_obs[np.newaxis,np.newaxis,:,:,:]
return databias | 8f0cf60137660878f57dc35caa8c23896944d6ab | 2,281 |
def price_sensitivity(results):
"""
Calculate the price sensitivity of a strategy
results
results dataframe or any dataframe with the columns
open, high, low, close, profit
returns
the percentage of returns sensitive to open price
Note
-----
Price sensitivity is calculated by
1) Calculating the profit in cases where open=high and open=low
2) Dividing these profits by the total profits
A high percentage indicates that most of your orders may not get
executed at the LIMIT price since the stock tends have a sharp
movement when open=low or open=high. A value of 1 indicates that
all returns are sensitive to prices
This is somewhat a rough measure and it doesn't take into account
whether you BUY or SELL
"""
profit = results["profit"].sum()
sen1 = results.query("open==low")["profit"].sum()
sen2 = results.query("open==high")["profit"].sum()
return (sen1 + sen2) / profit | 02ab811bf689e760e011db6d091dcb7c3079f0d1 | 2,282 |
def _understand_err_col(colnames):
"""Get which column names are error columns
Examples
--------
>>> colnames = ['a', 'a_err', 'b', 'b_perr', 'b_nerr']
>>> serr, terr = _understand_err_col(colnames)
>>> np.allclose(serr, [1])
True
>>> np.allclose(terr, [2])
True
>>> serr, terr = _understand_err_col(['a', 'a_nerr'])
Traceback (most recent call last):
...
ValueError: Missing positive error...
>>> serr, terr = _understand_err_col(['a', 'a_perr'])
Traceback (most recent call last):
...
ValueError: Missing negative error...
"""
shift = 0
serr = []
terr = []
for i, col in enumerate(colnames):
if col.endswith("_err"):
# The previous column, but they're numbered from 1!
# Plus, take shift into account
serr.append(i - shift)
shift += 1
elif col.endswith("_perr"):
terr.append(i - shift)
if len(colnames) == i + 1 or not colnames[i + 1].endswith('_nerr'):
raise ValueError("Missing negative error")
shift += 2
elif col.endswith("_nerr") and not colnames[i - 1].endswith('_perr'):
raise ValueError("Missing positive error")
return serr, terr | 2fab9346a3ea8fa6e84e406856eef8ad14ad9f66 | 2,283 |
import os
def _resolve_dir(env_name, dflt_dir):
"""Resolve a directory given the override env var and
its default directory. And if '~' is used to indicate
the home directory, then expand that."""
folder = os.environ.get(env_name, dflt_dir)
if folder is not None:
return os.path.expanduser(folder)
return None | 677c9b3bab970c56f1b3ea0ac8cff75d083e5328 | 2,284 |
import torch
def biband_mask(n: int, kernel_size: int, device: torch.device, v=-1e9):
"""compute mask for local attention with kernel size.
Args:
n (torch.Tensor): the input length.
kernel_size (int): The local attention kernel size.
device (torch.device): transformer mask to the device.
Returns: torch.Tensor. shape: [n,n]. The masked locations are -1e9
and unmasked locations are 0.
"""
if kernel_size is None:
return None
half = kernel_size // 2
mask1 = torch.ones(n, n).triu(diagonal=-half)
mask2 = torch.ones(n, n).tril(diagonal=half)
mask = mask1 * mask2
mask = (1 - mask) * v
return mask.to(device) | ab3a5f25f9fe0f83579d0492caa2913a13daa2d7 | 2,285 |
def get_gitlab_scripts(data):
"""GitLab is nice, as far as I can tell its files have a
flat hierarchy with many small job entities"""
def flatten_nested_string_lists(data):
"""helper function"""
if isinstance(data, str):
return data
elif isinstance(data, list):
return "\n".join([flatten_nested_string_lists(item) for item in data])
else:
raise ValueError(
f"unexpected data type {type(data)} in script section: {data}"
)
result = {}
for jobkey in data:
if not isinstance(data[jobkey], dict):
continue
for section in ["script", "before_script", "after_script"]:
if section in data[jobkey]:
script = data[jobkey][section]
result[f"{jobkey}/{section}"] = flatten_nested_string_lists(script)
return result | ad73c1ea6d4edcbce51eea18de317d7ab2d5e536 | 2,287 |
import os
def page_is_dir(path) -> bool:
"""
Tests whether a path corresponds to a directory
arguments:
path -- a path to a file
returns:
True if the path represents a directory else False
"""
return os.path.isdir(path) | bb52f6f09110e085fbb4cd8aeb9d03b36fe07b84 | 2,288 |
def str_cell(cell):
"""Get a nice string of given Cell statistics."""
result = f"-----Cell ({cell.x}, {cell.y})-----\n"
result += f"sugar: {cell.sugar}\n"
result += f"max sugar: {cell.capacity}\n"
result += f"height/level: {cell.level}\n"
result += f"Occupied by Agent {cell.agent.id if cell.agent else None}\n"
return result | d62801290321d5d2b8404dbe6243f2f0ae03ecef | 2,290 |
def get_reachable_nodes(node):
"""
returns a list with all the nodes from the tree with root *node*
"""
ret = []
stack = [node]
while len(stack) > 0:
cur = stack.pop()
ret.append(cur)
for c in cur.get_children():
stack.append(c)
return ret | c9ffaca113a5f85484433f214015bf93eea602d1 | 2,291 |
def get_type(k):
"""Takes a dict. Returns undefined if not keyed, otherwise returns the key type."""
try:
v = {
'score': '#text',
'applicant': 'str',
'applicant_sort': 'str',
'author': 'str',
'author_sort': 'str',
'brief': 'bool',
'city': 'str',
'daNumber': 'str',
'dateCommentPeriod': 'date',
'dateReplyComment': 'date',
'dateRcpt': 'date',
'disseminated': 'date',
'exParte': 'bool',
'fileNumber': 'str',
'id': 'long',
'lawfirm': 'str',
'lawfirm_sort': 'str',
'modified': 'date',
'pages': 'int',
'proceeding': 'str',
'reportNumber': 'str',
'regFlexAnalysis': 'bool',
'smallBusinessImpact': 'bool',
'stateCd': 'str',
'submissionType': 'str',
'text': 'str',
'viewingStatus': 'str',
'zip': 'str'
}[k]
except:
v = False
return v | fec3b7e04531dd202c46366f096f687160c68320 | 2,292 |
def f(i):
"""Add 2 to a value
Args:
i ([int]): integer value
Returns:
[int]: integer value
"""
return i + 2 | 72b5d99f3b2132054805ab56872cf2199b425b20 | 2,293 |
import argparse
def check_template_path(path):
"""
Argument checker, check if template exists and get the content
"""
try:
with open(path) as template:
tmp = template.read()
return tmp
except:
raise argparse.ArgumentTypeError("Invalid template path!") | 5af832dd38490a79c6fd014f0db2b839d866e838 | 2,295 |
def estimate_label_width(labels):
"""
Given a list of labels, estimate the width in pixels
and return in a format accepted by CSS.
Necessarily an approximation, since the font is unknown
and is usually proportionally spaced.
"""
max_length = max([len(l) for l in labels])
return "{0}px".format(max(60,int(max_length*7.5))) | 1e22ad939973373a669841dd5cc318d6927249ca | 2,299 |
def count_num_peps(filename):
"""
Count the number of peptide sequences in FASTA file.
"""
with open(filename) as f:
counter = 0
for line in f:
if line.startswith(">"):
counter += 1
return counter | c062a22cd925f29d8793ab364a74cf05cbae2a66 | 2,300 |
import re
def get_variables(examples):
"""Convert a code string to a list of variables.
We assume a variable is a 'word' with only alphanumeric characters in it."""
variables = [" ".join(re.split(r"\W+", text)) for text in examples["text"]]
return {"variables": variables} | 385a4fb3a73a432e6afa9aa69330f950246f48d0 | 2,301 |
def _stored_data_paths(wf, name, serializer):
"""Return list of paths created when storing data"""
metadata = wf.datafile(".{}.alfred-workflow".format(name))
datapath = wf.datafile(name + "." + serializer)
return [metadata, datapath] | 5f01d804db9f1848cc13e701a56e51c06dccdb31 | 2,302 |
def ascii_to_walls(char_matrix):
"""
A parser to build a gridworld from a text file.
Each grid has ONE start and goal location.
A reward of +1 is positioned at the goal location.
:param char_matrix: Matrix of characters.
:param p_success: Probability that the action is successful.
:param seed: The seed for the GridWorldMDP object.
:param skip_checks: Skips assertion checks.
:transition_matrix_builder_cls: The transition matrix builder to use.
:return:
"""
grid_size = len(char_matrix[0])
assert(len(char_matrix) == grid_size), 'Mismatch in the columns.'
for row in char_matrix:
assert(len(row) == grid_size), 'Mismatch in the rows.'
# ...
wall_locs = []
empty = []
for r in range(grid_size):
for c in range(grid_size):
char = char_matrix[r][c]
if char == '#':
wall_locs.append((r, c))
elif char == ' ':
empty.append((r, c))
else:
raise ValueError('Unknown character {} in grid.'.format(char))
# Attempt to make the desired gridworld.
return wall_locs, empty | 9f6520625623bd446923e374a1a5a557038dfd48 | 2,303 |
import re
def get_number_location(
input : str,
):
# endregion get_number_location header
# region get_number_location docs
"""
get the string indices of all numbers that occur on the string
format example: [ ( 0, 1 ), ( 4, 6 ), ( 9, 9 ) ]
both begin and end are inclusive, in contrast with the way the std_lib does it
which is begin(inclusive), end(exclusive)
"""
# endregion get_number_location docs
# region get_number_location implementation
locations = []
for match in re.finditer("\d+", input):
# match start is inclusive
position_start = match.start()
# match end is exclusive
position_end = match.end() - 1
locations.append((position_start, position_end))
...
return locations | de035f640dd33dc96b4072bdc925efc649285121 | 2,304 |
import re
def is_valid_slug(slug):
"""Returns true iff slug is valid."""
VALID_SLUG_RE = re.compile(r"^[a-z0-9\-]+$")
return VALID_SLUG_RE.match(slug) | 439349f0689cd53fb2f7e89b2b48b90aa79dae80 | 2,305 |
def get_customer_key():
""" Reutrn the key of the sample customer from file """
customer_file = open("sample_customer", "r")
customer_key = customer_file.readline().rstrip("\n")
customer_file.close()
return customer_key | 2b63c671aa6f8dd5fe6fbd9d58394e8c178901f5 | 2,306 |
def thesaurus(*args, sort=False) -> dict:
"""Формирует словарь, в котором ключи — первые буквы слов,
а значения — списки, содержащие слова, начинающиеся с соответствующей буквы
:param *args: перечень слов
:param sort: признак необходимости сортировки словаря по алфавиту (True - сортировать, False - не сортировать)
:return: словарь слов по первым буквам"""
if sort:
args = sorted(list(args)) # Changed in version 3.7: Dictionary order is guaranteed to be insertion order
dict_out = {}
for word in args:
dict_value = dict_out.setdefault(word[0], list())
if word not in dict_value:
dict_value.append(word)
dict_out[word[0]] = dict_value
return dict_out | 2e02e4f98a85eaa19a9374d5dfba82dd855b9636 | 2,307 |
def config(live_server, django_user_model):
"""Create a user and return an auth_token config matching that user."""
user = django_user_model.objects.create(
email='jathan@localhost', is_superuser=True, is_staff=True
)
data = {
'email': user.email,
'secret_key': user.secret_key,
'auth_method': 'auth_token',
'url': live_server.url + '/api',
# 'api_version': API_VERSION,
'api_version': '1.0', # Hard-coded.
}
return data | 031648b92a8347f8cc5e14213eda85c9ed73d3ee | 2,308 |
import os
def in_bazel() -> bool:
"""Return whether running under bazel."""
return os.environ.get("TEST_WORKSPACE", "") != "" | f0f697d894ed0e8bf7309591a6775632b76c2ec8 | 2,309 |
def note_favorite(note):
"""
get the status of the note as a favorite
returns True if the note is marked as a favorite
False otherwise
"""
if 'favorite' in note:
return note['favorite']
return False | 503f4e3abaab9d759070c725cdf783d62d7c05d2 | 2,310 |
import math
def erfc(x):
"""Complementary error function (via `http://bit.ly/zOLqbc`_)"""
z = abs(x)
t = 1. / (1. + z / 2.)
r = t * math.exp(-z * z - 1.26551223 + t * (1.00002368 + t * (
0.37409196 + t * (0.09678418 + t * (-0.18628806 + t * (
0.27886807 + t * (-1.13520398 + t * (1.48851587 + t * (
-0.82215223 + t * 0.17087277
)))
)))
)))
return 2. - r if x < 0 else r | fd2a44142042e81ef1fc5f649186a41ae4a152b0 | 2,311 |
def get_source_fields(client, source_table):
"""
Gets column names of a table in bigquery
:param client: BigQuery client
:param source_table: fully qualified table name.
returns as a list of column names.
"""
return [f'{field.name}' for field in client.get_table(source_table).schema] | abc161f252c03647a99a6d2151c00288b176a4e7 | 2,312 |
def has_user_based_permission(obj, user, allow_superuser=True, allow_staff=False):
"""
Based on obj.get_user(), checks if provided user is that user.
Accounts for superusers and staff.
"""
if hasattr(obj, "get_user"):
obj_user = obj.get_user()
# User is logged in
if user.is_authenticated:
# If staff or superuser or share a common group, then yes.
if (allow_staff and user.is_staff) \
or (allow_superuser and user.is_superuser) \
or obj_user == user:
return True
return False | bcedf697280a75575e9d0202d1a6a65161a873ad | 2,313 |
def select_id_from_scores_dic(id1, id2, sc_dic,
get_worse=False,
rev_filter=False):
"""
Based on ID to score mapping, return better (or worse) scoring ID.
>>> id1 = "id1"
>>> id2 = "id2"
>>> id3 = "id3"
>>> sc_dic = {'id1' : 5, 'id2': 3, 'id3': 3}
>>> select_id_from_scores_dic(id1, id2, sc_dic)
'id1'
>>> select_id_from_scores_dic(id1, id2, sc_dic, get_worse=True)
'id2'
>>> select_id_from_scores_dic(id1, id2, sc_dic, rev_filter=True, get_worse=True)
'id1'
>>> select_id_from_scores_dic(id1, id2, sc_dic, rev_filter=True)
'id2'
>>> select_id_from_scores_dic(id2, id3, sc_dic)
False
"""
sc_id1 = sc_dic[id1]
sc_id2 = sc_dic[id2]
if sc_id1 > sc_id2:
if rev_filter:
if get_worse:
return id1
else:
return id2
else:
if get_worse:
return id2
else:
return id1
elif sc_id1 < sc_id2:
if rev_filter:
if get_worse:
return id2
else:
return id1
else:
if get_worse:
return id1
else:
return id2
else:
return False | f2fa5f33eead47288c92715ce358581a72f18361 | 2,317 |
def add_args(parser):
"""Add arguments to the argparse.ArgumentParser
Args:
parser: argparse.ArgumentParser
Returns:
parser: a parser added with args
"""
# Training settings
parser.add_argument(
"--task",
type=str,
default="train",
metavar="T",
help="the type of task: train or denoise",
)
parser.add_argument(
"--datadir",
type=str,
metavar="DD",
help="data directory for training",
)
parser.add_argument(
"--noisy_wav",
type=str,
metavar="NW",
help="path to noisy wav",
)
parser.add_argument(
"--denoised_wav",
type=str,
default="denoised_sample.wav",
metavar="DW",
help="path to denoised wav",
)
parser.add_argument(
"--pretrained",
type=str,
default=None,
metavar="PT",
help="path to pre-trainedmodel",
)
parser.add_argument(
"--saved_model_path",
type=str,
default="model.pth",
metavar="SMP",
help="path to trained model",
)
parser.add_argument(
"--partition_ratio",
type=float,
default=1 / 3,
metavar="PR",
help="partition ratio for trainig (default: 1/3)",
)
parser.add_argument(
"--batch_size",
type=int,
default=5,
metavar="BS",
help="input batch size for training (default: 5)",
)
parser.add_argument(
"--lr",
type=float,
default=0.001,
metavar="LR",
help="learning rate (default: 0.3)",
)
parser.add_argument(
"--momentum",
type=float,
default=0.9,
metavar="M",
help="momentum (default: 0.9)",
)
parser.add_argument(
"--noise_amp",
type=float,
default=0.01,
metavar="NA",
help="amplitude of added noise for trainign (default: 0.01)",
)
parser.add_argument(
"--split_sec",
type=float,
default=1.0,
metavar="SS",
help="interval for splitting [sec]",
)
parser.add_argument(
"--epochs",
type=int,
default=5,
metavar="EP",
help="how many epochs will be trained",
)
parser.add_argument(
"--sampling_rate",
type=int,
default=16000,
metavar="SR",
help="sampling rate",
)
parser.add_argument(
"--log_interval",
type=int,
default=2,
metavar="LI",
help="log interval",
)
parser.add_argument(
"--path_to_loss",
type=str,
default=None,
metavar="PL",
help="path to png filw which shows the transtion of loss",
)
return parser | cfebbfb6e9821290efdc96aaf0f7a7470e927c70 | 2,318 |
import numpy
def interp_xzplane(y, u, y_target=0.0):
"""Perform linear interpolation of the 3D data at given y-location.
Parameters
----------
y : numpy.ndarray of floats
The y-coordinates along a vertical gridline as a 1D array.
u : numpy.ndarray of floats
The 3D data.
y_target : float (optional)
The y-coordinate at which to interpolate the data.
Returns
-------
u_target : numpy.ndarray of floats
The 2D interpolated data.
"""
idx = numpy.where(y >= y_target)[0][0]
y0, y1 = y[idx - 1], y[idx]
u0, u1 = u[:, idx - 1, :], u[:, idx, :]
u_target = u0 + (y_target - y0) * (u1 - u0) / (y1 - y0)
return u_target | 77f8b559c64eb2b33723a2a8e540f4d783364c84 | 2,321 |
def liste_vers_paires(l):
"""
Passer d'une structure en list(list(str)) ) list([str, str])
:param l:
:return:
"""
res = []
for i in l:
taille_i = len(i)
for j in range(taille_i-1):
for k in range(j+1, taille_i):
res.append([i[j], i[k]])
return res | 5f40e032fb9aba22656565d958ccfac828512b77 | 2,322 |
from typing import List
from typing import Dict
from typing import Any
def assert_typing(
input_text_word_predictions: List[Dict[str, Any]]
) -> List[Dict[str, str]]:
"""
this is only to ensure correct typing, it does not actually change anything
Args:
input_text_word_predictions: e.g. [
{"char_start": 0, "char_end": 7, "token": "example", "tag": "O"},
..
]
Returns:
input_text_word_predictions_str: e.g. [
{"char_start": "0", "char_end": "7", "token": "example", "tag": "O"},
..
]
"""
return [
{k: str(v) for k, v in input_text_word_prediction.items()}
for input_text_word_prediction in input_text_word_predictions
] | 0835bad510241eeb2ee1f69ac8abeca711ebbf53 | 2,323 |
import typing
def _sanitize_bool(val: typing.Any, /) -> bool:
"""Sanitize argument values to boolean."""
if isinstance(val, str):
return val.lower() == 'true'
return bool(val) | b41c52b6e61bcc6ec8b78138f4a5ee58f7284ca3 | 2,325 |
def isSameLinkedList(linked_list1, linked_list2):
"""
Check whether two linked lists are the same.
Args:
linked_list1: -
linked_list2: -
"""
while linked_list1:
if linked_list1.val != linked_list2.val:
return False
linked_list1, linked_list2 = linked_list1.next, linked_list2.next
return True | cb41ed64b61f49c97104939fc1b1869e872f8234 | 2,326 |
import os
import re
def get_date_folders():
"""
Return a list of the directories used for backing up the database.
"""
directories_in_curdir = list(filter(os.path.isdir, os.listdir(os.getcwd())))
date_folders = [
d for d in directories_in_curdir if re.match(r"([0-9]+(-[0-9]+)+)", d)
]
return date_folders | 127d087888a6cd2dc2786365206a20e495a092ff | 2,327 |
def float_or_none(string):
""" Returns float number iff string represents one, else return None. TESTS OK 2020-10-24. """
try:
return float(string)
except (ValueError, TypeError):
return None | 8cc4437841f67e5b2f884ca566f3e6870dcd7649 | 2,328 |
def load_region_maps(region_file):
"""Extracts creates a map from PHI region id to a continuous region id."""
region_ids = [] # Used mainly for eval
region_ids_inv = {} # Used in data loader
region_names_inv = {} # Used in eval
for l in region_file.read().strip().split('\n'):
tok_name_id, _ = l.strip().split(';') # second field is frequency, unused
region_name, region_id = tok_name_id.split('_')
region_name = region_name.strip()
region_id = int(region_id)
# Ignore unknown regions:
if ((region_name == 'Unknown Provenances' and region_id == 884) or
(region_name == 'unspecified subregion' and region_id == 885) or
(region_name == 'unspecified subregion' and region_id == 1439)):
continue
region_ids.append(region_id)
region_ids_inv[region_id] = len(region_ids_inv)
region_names_inv[len(region_names_inv)] = region_name
return {
'ids': region_ids,
'ids_inv': region_ids_inv,
'names_inv': region_names_inv
} | 201240ce485b4039b12741bb03c547de7976c99a | 2,329 |
import re
import glob
import os
def split_fortran_files(source_dir, subroutines=None):
"""Split each file in `source_dir` into separate files per subroutine.
Parameters
----------
source_dir : str
Full path to directory in which sources to be split are located.
subroutines : list of str, optional
Subroutines to split. (Default: all)
Returns
-------
fnames : list of str
List of file names (not including any path) that were created
in `source_dir`.
Notes
-----
This function is useful for code that can't be compiled with g77 because of
type casting errors which do work with gfortran.
Created files are named: ``original_name + '_subr_i' + '.f'``, with ``i``
starting at zero and ending at ``num_subroutines_in_file - 1``.
"""
if subroutines is not None:
subroutines = [x.lower() for x in subroutines]
def split_file(fname):
with open(fname, 'rb') as f:
lines = f.readlines()
subs = []
need_split_next = True
# find lines with SUBROUTINE statements
for ix, line in enumerate(lines):
m = re.match(b'^\\s+subroutine\\s+([a-z0-9_]+)\s*\\(', line, re.I)
if m and line[0] not in b'Cc!*':
if subroutines is not None:
subr_name = m.group(1).decode('ascii').lower()
subr_wanted = (subr_name in subroutines)
else:
subr_wanted = True
if subr_wanted or need_split_next:
need_split_next = subr_wanted
subs.append(ix)
# check if no split needed
if len(subs) <= 1:
return [fname]
# write out one file per subroutine
new_fnames = []
num_files = len(subs)
for nfile in range(num_files):
new_fname = fname[:-2] + '_subr_' + str(nfile) + '.f'
new_fnames.append(new_fname)
with open(new_fname, 'wb') as fn:
if nfile + 1 == num_files:
fn.writelines(lines[subs[nfile]:])
else:
fn.writelines(lines[subs[nfile]:subs[nfile+1]])
return new_fnames
exclude_pattern = re.compile('_subr_[0-9]')
source_fnames = [f for f in glob.glob(os.path.join(source_dir, '*.f'))
if not exclude_pattern.search(os.path.basename(f))]
fnames = []
for source_fname in source_fnames:
created_files = split_file(source_fname)
if created_files is not None:
for cfile in created_files:
fnames.append(os.path.basename(cfile))
return fnames | aa709fcd2b73921b19c8d1e3235a30867112f2ea | 2,330 |
import re
def expand_parameters(host, params):
"""Expand parameters in hostname.
Examples:
* "target{N}" => "target1"
* "{host}.{domain} => "host01.example.com"
"""
pattern = r"\{(.*?)\}"
def repl(match):
param_name = match.group(1)
return params[param_name]
return re.sub(pattern, repl, host) | 04f62924fdc77b02f3a393e5cc0c5382d1d4279a | 2,332 |
import re
def _skip_comments_and_whitespace(lines, idx):
###############################################################################
"""
Starting at idx, return next valid idx of lines that contains real data
"""
if (idx == len(lines)):
return idx
comment_re = re.compile(r'^[#!]')
lines_slice = lines[idx:]
for line in lines_slice:
line = line.strip()
if (comment_re.match(line) is not None or line == ""):
idx += 1
else:
return idx
return idx | b2b794681859eaa22dfc1807211bf050423cd107 | 2,333 |
def named_payload(name, parser_fn):
"""Wraps a parser result in a dictionary under given name."""
return lambda obj: {name: parser_fn(obj)} | 259525b93d056e045b0f8d5355d4028d67bfac45 | 2,334 |
def unpack_puzzle_input(dir_file: str) -> tuple[list, list]:
"""
Args:
dir_file (str): location of .txt file to pull data from
Returns:
bingo numbers and bingo cards in list format
"""
with open(dir_file, "r") as file:
content = file.read().splitlines()
bingo_numbers = [int(i) for i in content[0].split(",")]
bingo_cards = []
for index in range(2, len(content)):
if content[index-1] == '':
bingo_cards.append([[int(i) for i in content[index].split()]])
elif content[index] != '':
bingo_cards[-1].append([int(i) for i in content[index].split()])
return bingo_numbers, bingo_cards | 47ea8846233aabf1bc8e07f22e9993b7a5a328e1 | 2,336 |
def validateRange(rangeStr : str) -> bool:
"""Validates the range argument"""
# type cast and compare
try:
# get range indices
ranges = rangeStr.split(",", 1)
rangeFrom = 0 if ranges[0] == "" else int(ranges[0])
rangeTo = 0 if ranges[1] == "" else int(ranges[1])
# check first if both ranges are not set
# using the -r , hack
if ranges == ["", ""]:
return False
# check if any of the range param is set
# and do testing per side
# if either range start/end is set and is <= 0:
if (ranges[0] != "" and rangeFrom < 0) or\
(ranges[1] != "" and rangeTo < 0):
return False
elif (ranges[0] != "") and (ranges[1] != ""):
# if both are set, do conditions here
# if from == to or from > to or from,to <=0, fail
if (rangeFrom == rangeTo) or\
(rangeFrom > rangeTo) or\
((rangeFrom <= 0) or (rangeTo <= 0)):
return False
except (ValueError, IndexError, AttributeError):
return False
return True | 375d80ef61c429a4e22df7321d223fe18939f597 | 2,337 |
from operator import add
def update_log_ip_dict_per_ingress_egress_point(flow_ingress_asn, flow_ip, origin_asn, ip_prefix, country_code, flow_bytes, flow_packets, d_ipsrc_level_analysis_perpoint):
"""
Account for unique IPAddresses, BGP prefixes, origin_asn per ingress/egress points.
:param flow_ingress_asn:
:param flow_ip:
:param origin_asn:
:param ip_prefix:
:param d_ipsrc_level_analysis_perpoint:
:return: dict of dict {'1234': {('10.10.10.1', 23456, '10.0.0.0/8'): [1]},
'5678': {('181.3.50.1', 98765, '181.3.50.0/20'): [1]}, ...}
"""
k = (flow_ip, origin_asn, ip_prefix, country_code)
values = [1, flow_bytes, flow_packets]
flow_ingress_asn = frozenset(flow_ingress_asn)
if flow_ingress_asn not in d_ipsrc_level_analysis_perpoint.keys():
d_ipsrc_level_analysis_perpoint[flow_ingress_asn] = dict()
d_ipsrc_level_analysis_perpoint[flow_ingress_asn][k] = values
else:
if k not in d_ipsrc_level_analysis_perpoint[flow_ingress_asn]:
d_ipsrc_level_analysis_perpoint[flow_ingress_asn][k] = values
else:
d_ipsrc_level_analysis_perpoint[flow_ingress_asn][k] = map(add, d_ipsrc_level_analysis_perpoint[flow_ingress_asn][k], values)
return d_ipsrc_level_analysis_perpoint | ad6ccefd62b11f3cf1a7b5e452789ddf22fcad55 | 2,338 |
def _4_graphlet_contains_3star(adj_mat):
"""Check if a given graphlet of size 4 contains a 3-star"""
return (4 in [a.sum() for a in adj_mat]) | 307f03707d1a7032df0ccb4f7951eec0c75832fe | 2,339 |
def get_sentence_content(sentence_token):
"""Extrac sentence string from list of token in present in sentence
Args:
sentence_token (tuple): contains length of sentence and list of all the token in sentence
Returns:
str: setence string
"""
sentence_content = ''
for word in sentence_token[1]:
sentence_content += word.text
return sentence_content | 4f6f1bb557bb508e823704fc645c2901e5f8f03f | 2,340 |
import os
def _parse_filename(filename):
"""Parse meta-information from given filename.
Parameters
----------
filename : str
A Market 1501 image filename.
Returns
-------
(int, int, str, str) | NoneType
Returns a tuple with the following entries:
* Unique ID of the individual in the image
* Index of the camera which has observed the individual
* Filename without extension
* File extension
Returns None if the given filename is not a valid filename.
"""
filename_base, ext = os.path.splitext(filename)
if '.' in filename_base:
# Some images have double filename extensions.
filename_base, ext = os.path.splitext(filename_base)
if ext != ".jpg":
return None
person_id, cam_seq, frame_idx, detection_idx = filename_base.split('_')
return int(person_id), int(cam_seq[1]), filename_base, ext | 61d8b721a594a802de8abc1c30a316fd1995a14e | 2,341 |
def sequence_generator(data, look_back = 50):
"""\
Description:
------------
Input data for LSTM: Convert to user trajectory (maximum length: look back)
"""
train,test, valid = [],[],[]
unique_users = set(data[:,0])
items_per_user = {int(user):[0 for i in range(look_back)] for user in unique_users}
for (idx,row) in enumerate(data):
user,item,time = int(row[0]),int(row[1]),row[2]
items_per_user[user] = items_per_user[user][1:]+[item+1]
current_items = items_per_user[user]
if row[3]==0:
train.append([current_items[:-1],current_items[-1]])
elif row[3]==2:
test.append([current_items[:-1],current_items[-1]])
else:
valid.append([current_items[:-1],current_items[-1]])
return train,test | 688e572edf1b6d2dea2f069742b01c10ec36f928 | 2,342 |
def option_not_exist_msg(option_name, existing_options):
""" Someone is referencing an option that is not available in the current package
options
"""
result = ["'options.%s' doesn't exist" % option_name]
result.append("Possible options are %s" % existing_options or "none")
return "\n".join(result) | 7ffa0afa81483d78a1ed0d40d68831e09710b7e1 | 2,343 |
def string_unquote(value: str):
"""
Method to unquote a string
Args:
value: the value to unquote
Returns:
unquoted string
"""
if not isinstance(value, str):
return value
return value.replace('"', "").replace("'", "") | e062c012fc43f9b41a224f168de31732d885b21f | 2,347 |
def refresh_blind_balances(wallet, balances, storeback=True):
""" Given a list of (supposedly) unspent balances, iterate over each one
and verify it's status on the blockchain. Each balance failing
this verification updates own status in the database (if storeback is True).
Returns a list of TRULY unspent balances.
"""
rpc = wallet.rpc
unspent = [ ]
for balance in balances:
result = rpc.get_blinded_balances([balance["commitment"]])
if len(result) == 0:
if storeback:
wallet.modifyBlindBalance(balance["commitment"], used=True)
else:
unspent.append(balance)
return unspent | 2d468827ae32d359b323921d5933796ada22d627 | 2,350 |
import configparser
def read_section(section, fname):
"""Read the specified section of an .ini file."""
conf = configparser.ConfigParser()
conf.read(fname)
val = {}
try:
val = dict((v, k) for v, k in conf.items(section))
return val
except configparser.NoSectionError:
return None | 65d6b81b45fc7b75505dd6ee4dda19d13ebf7095 | 2,351 |
def _helper_fit_partition(self, pnum, endog, exog, fit_kwds,
init_kwds_e={}):
"""handles the model fitting for each machine. NOTE: this
is primarily handled outside of DistributedModel because
joblib cannot handle class methods.
Parameters
----------
self : DistributedModel class instance
An instance of DistributedModel.
pnum : scalar
index of current partition.
endog : array_like
endogenous data for current partition.
exog : array_like
exogenous data for current partition.
fit_kwds : dict-like
Keywords needed for the model fitting.
init_kwds_e : dict-like
Additional init_kwds to add for each partition.
Returns
-------
estimation_method result. For the default,
_est_regularized_debiased, a tuple.
"""
temp_init_kwds = self.init_kwds.copy()
temp_init_kwds.update(init_kwds_e)
model = self.model_class(endog, exog, **temp_init_kwds)
results = self.estimation_method(model, pnum, self.partitions,
fit_kwds=fit_kwds,
**self.estimation_kwds)
return results | 30b7e6d48c2f0fa3eb2d2486fee9a87dad609886 | 2,352 |
import sys
def new(option):
"""
Create a new message queue object; options must contain the type of
queue (which is the name of the child class), see above.
"""
options = option.copy()
qtype = options.pop("type", "DQS")
try:
__import__("messaging.queue.%s" % (qtype.lower()))
except SyntaxError:
raise SyntaxError("error importing dirq type: %s" % qtype)
except ImportError:
raise ImportError(
"you must install %s dependencies before using this module" %
(qtype, ))
try:
module = sys.modules["messaging.queue.%s" % (qtype.lower())]
return getattr(module, qtype)(**options)
except KeyError:
pass
raise ValueError("queue type not valid: %s" % qtype) | 9e285f4bee5442a41c10b32158595da5e03707de | 2,353 |
def get_users(metadata):
"""
Pull users, handles hidden user errors
Parameters:
metadata: sheet of metadata from mwclient
Returns:
the list of users
"""
users = []
for rev in metadata:
try:
users.append(rev["user"])
except (KeyError):
users.append(None)
return users | 48dbae6a63019b0e4c2236a97e147102fe4d8758 | 2,354 |
import argparse
def get_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser(description="Expression aggregator")
parser.add_argument(
"-e", "--expressions", nargs="+", help="Expressions", required=True
)
parser.add_argument(
"-d", "--descriptors", nargs="+", help="Descriptors", required=True
)
parser.add_argument("-s", "--source", help="Source", required=True)
parser.add_argument(
"-t", "--expression-type", help="Expression type", required=True
)
parser.add_argument("-g", "--group-by", help="Group by", required=True)
parser.add_argument("-a", "--aggregator", help="Aggregator")
parser.add_argument("-b", "--box-plot-output", help="Box plot output file name")
parser.add_argument(
"-l", "--log-box-plot-output", help="Log box plot output file name"
)
parser.add_argument(
"-x", "--expressions-output", help="Expressions output file name"
)
return parser.parse_args() | a33401b0407ca8538f09918c8ec9074ca21e2438 | 2,355 |
import os
def get_tempdir() -> str:
"""Get the directory where temporary files are stored."""
return next((os.environ[var] for var in (
'XDG_RUNTIME_DIR', 'TMPDIR', 'TMP', 'TEMP'
) if var in os.environ), '/tmp') | 95c90d9f297bbd76e1f083d07058db1b46c275ba | 2,356 |
def get_user_input(prompt: str, current_setting: str):
"""
Get user input
:param prompt: prompt to display
:param current_setting: current value
:return:
"""
if current_setting != '':
print(f'-- Current setting: {current_setting}')
use_current = '/return to use current'
else:
use_current = ''
user_ip = ''
while user_ip == '':
user_ip = input(f'{prompt} [q to quit{use_current}]: ')
if user_ip.lower() == 'q':
break
if user_ip == '' and current_setting != '':
user_ip = current_setting
return user_ip | 358bd937db4ae111eb515385f0f61391a7ae665c | 2,359 |
def get_config(cfg, name):
"""Given the argument name, read the value from the config file.
The name can be multi-level, like 'optimizer.lr'
"""
name = name.split('.')
suffix = ''
for item in name:
assert item in cfg, f'attribute {item} not cfg{suffix}'
cfg = cfg[item]
suffix += f'.{item}'
return cfg | 4b0a8eedb057a26d67cd5c9f7698c33754b29249 | 2,361 |
def str_to_size(size_str):
"""
Receives a human size (i.e. 10GB) and converts to an integer size in
mebibytes.
Args:
size_str (str): human size to be converted to integer
Returns:
int: formatted size in mebibytes
Raises:
ValueError: in case size provided in invalid
"""
if size_str is None:
return None
# no unit: assume mebibytes as default and convert directly
if size_str.isnumeric():
return int(size_str)
size_str = size_str.upper()
# check if size is non-negative number
if size_str.startswith('-'):
raise ValueError(
'Invalid size format: {}'.format(size_str)) from None
# decimal units are converted to bytes and then to mebibytes
dec_units = ('KB', 'MB', 'GB', 'TB')
for index, unit in enumerate(dec_units):
# unit used is different: try next
if not size_str.endswith(unit):
continue
try:
size_int = int(size_str[:-2]) * pow(1000, index+1)
except ValueError:
raise ValueError(
'Invalid size format: {}'.format(size_str)) from None
# result is returned in mebibytes
return int(size_int / pow(1024, 2))
# binary units are just divided/multipled by powers of 2
bin_units = ('KIB', 'MIB', 'GIB', 'TIB')
for index, unit in enumerate(bin_units):
# unit used is different: try next
if not size_str.endswith(unit):
continue
try:
size_int = int(int(size_str[:-3]) * pow(1024, index-1))
except ValueError:
raise ValueError(
'Invalid size format: {}'.format(size_str)) from None
return size_int
raise ValueError(
'Invalid size format: {}'.format(size_str)) from None | 0051b7cf55d295a4fffcc41ed5b0d900243ef2da | 2,362 |
from datetime import datetime
def date_convert(value):
"""
日期字符串转化为数据库的日期类型
:param value:
:return:
"""
try:
create_date = datetime.strptime(value, '%Y/%m/%d').date()
except Exception as e:
create_date = datetime.now().date()
return create_date | 40d7a213a8aeed692940bbb285fdad1bbb5b65a6 | 2,363 |
import imghdr
def get_img_content(session,
file_url,
extension=None,
max_retry=3,
req_timeout=5):
"""
Returns:
(data, actual_ext)
"""
retry = max_retry
while retry > 0:
try:
response = session.get(file_url, timeout=req_timeout)
except Exception as e:
print(f'Exception caught when downloading file {file_url}, '
f'error: {e}, remaining retry times: {retry - 1}')
else:
if response.status_code != 200:
print(f'Response status code {response.status_code}, '
f'file {file_url}')
break
# get the response byte
data = response.content
if isinstance(data, str):
print('Converting str to byte, later remove it.')
data = data.encode(data)
actual_ext = imghdr.what(extension, data)
actual_ext = 'jpg' if actual_ext == 'jpeg' else actual_ext
# do not download original gif
if actual_ext == 'gif' or actual_ext is None:
return None, actual_ext
return data, actual_ext
finally:
retry -= 1
return None, None | 156005420ebc1503d5cf7a194051b93d9fccb8ed | 2,364 |
def decay_value(base_value, decay_rate, decay_steps, step):
""" decay base_value by decay_rate every decay_steps
:param base_value:
:param decay_rate:
:param decay_steps:
:param step:
:return: decayed value
"""
return base_value*decay_rate**(step/decay_steps) | c593f5e46d7687fbdf9760eb10be06dca3fb6f7b | 2,366 |
import os
def get_filenames(data_dir, mode, valid_id, pred_id, overlap_step, patch_size):
"""Returns a list of filenames."""
if mode == 'train':
train_files = [
os.path.join(data_dir, 'subject-%d.tfrecords' % i)
for i in range(1, 11)
if i != valid_id
]
for f in train_files:
assert os.path.isfile(f), \
('Run generate_tfrecord.py to generate training files.')
return train_files
elif mode == 'valid':
valid_file = os.path.join(data_dir,
'subject-%d-valid-%d-patch-%d.tfrecords' % (valid_id, overlap_step, patch_size))
assert os.path.isfile(valid_file), \
('Run generate_tfrecord.py to generate the validation file.')
return [valid_file]
elif mode == 'pred':
pred_file = os.path.join(data_dir,
'subject-%d-pred-%d-patch-%d.tfrecords' % (pred_id, overlap_step, patch_size))
assert os.path.isfile(pred_file), \
('Run generate_tfrecord.py to generate the prediction file.')
return [pred_file] | 255a89254c860d7bbd7941da017e7e015406cf8d | 2,367 |
import pytz
def local_tz2() -> pytz.BaseTzInfo:
"""
Second timezone for the second user
"""
return pytz.timezone("America/Los_Angeles") | d841f3ea06334540b8dca6fd2c2a2e823227fa37 | 2,370 |
def crc16(data):
"""CRC-16-CCITT computation with LSB-first and inversion."""
crc = 0xffff
for byte in data:
crc ^= byte
for bits in range(8):
if crc & 1:
crc = (crc >> 1) ^ 0x8408
else:
crc >>= 1
return crc ^ 0xffff | 2560f53c1f2b597d556a0b63462ef56f0c972db2 | 2,371 |
def _read_dino_waterlvl_metadata(f, line):
"""read dino waterlevel metadata
Parameters
----------
f : text wrapper
line : str
line with meta dictionary keys
meta_dic : dict (optional)
dictionary with metadata
Returns
-------
meta : dict
dictionary with metadata
"""
meta_keys = line.strip().split(",")
meta_values = f.readline().strip().split(",")
meta = {}
for key, value in zip(meta_keys, meta_values):
key = key.strip()
if key in ["X-coordinaat", "Y-coordinaat"]:
if key == "X-coordinaat":
meta["x"] = float(value)
elif key == "Y-coordinaat":
meta["y"] = float(value)
elif key == "Locatie":
meta["locatie"] = value
meta["name"] = value
return meta | 949535f4fc677a7d0afc70a76e377ccefcc8943f | 2,372 |
def _read_unicode_table(instream, separator, startseq, encoding):
"""Read the Unicode table in a PSF2 file."""
raw_table = instream.read()
entries = raw_table.split(separator)[:-1]
table = []
for point, entry in enumerate(entries):
split = entry.split(startseq)
code_points = [_seq.decode(encoding) for _seq in split]
# first entry is separate code points, following entries (if any) are sequences
table.append([_c for _c in code_points[0]] + code_points[1:])
return table | e27e59b57d10cb20dd4ddc832c65cb8802984d44 | 2,373 |
def reverse(array):
"""Return `array` in reverse order.
Args:
array (list|string): Object to process.
Returns:
list|string: Reverse of object.
Example:
>>> reverse([1, 2, 3, 4])
[4, 3, 2, 1]
.. versionadded:: 2.2.0
"""
# NOTE: Using this method to reverse object since it works for both lists
# and strings.
return array[::-1] | 5eb096d043d051d4456e08fae91fb52048686992 | 2,375 |
def help_text_metadata(label=None, description=None, example=None):
"""
Standard interface to help specify the required metadata fields for helptext to
work correctly for a model.
:param str label: Alternative name for the model.
:param str description: Long description of the model.
:param example: A concrete example usage of the model.
:return dict: Dictionary of the help text metadata
"""
return {
'label': label,
'description': description,
'example': example
} | a1fb9c9a9419fe7ce60ed77bc6fadc97ed4523f8 | 2,376 |
def split_function(vector, column, value):
"""
Split function
"""
return vector[column] >= value | c6129422fd5bf0b16229e6346adde5f50b203e7b | 2,377 |
def _maven_artifact(
group,
artifact,
version,
ownership_tag = None,
packaging = None,
classifier = None,
exclusions = None,
neverlink = None,
testonly = None,
tags = None,
flatten_transitive_deps = None,
aliases = None):
"""Defines maven artifact by coordinates.
Args:
group: The Maven artifact coordinate group name (ex: "com.google.guava").
artifact: The Maven artifact coordinate artifact name (ex: "guava").
version: The Maven artifact coordinate version name (ex: "1.20.1").
ownership_tag: 3rd party dependency owner responsible for its maintenance.
packaging:The Maven artifact coordinate packaging name (ex: "jar").
classifier: The Maven artifact coordinate classifier name (ex: "jdk11").
exclusions: Artifact dependencies to be excluded from resolution closure.
neverlink: neverlink value to set,
testonly: testonly value to set.
tags: Target tags.
flatten_transitive_deps: Define all transitive deps as direct deps.
aliases: aliases that will point to this dep.
"""
maven_artifact = {}
maven_artifact["group"] = group
maven_artifact["artifact"] = artifact
maven_artifact["version"] = version
maven_artifact["aliases"] = aliases
maven_artifact["tags"] = tags
maven_artifact["flatten_transitive_deps"] = flatten_transitive_deps
if packaging != None:
maven_artifact["packaging"] = packaging
if classifier != None:
maven_artifact["classifier"] = classifier
if exclusions != None:
maven_artifact["exclusions"] = exclusions
if neverlink != None:
maven_artifact["neverlink"] = neverlink
if testonly != None:
maven_artifact["testonly"] = testonly
if ownership_tag != None:
maven_artifact["ownership_tag"] = ownership_tag
return maven_artifact | 9f97cd8cadfc3ad1365cb6d291634a9362fea4e8 | 2,378 |
import toml
import itertools
from pathlib import Path
def load_plate(toml_path):
"""\
Parse a TOML-formatted configuration file defining how each well in a
particular plate should be interpreted.
Below is a list of the keys that are understood in the configuration file:
'xlsx_path' [string]
The path to the XLSX file containing the plate reader data, relative to
the configuration file itself. If not specified, this script will look
for a file with the same name as the configuration file, but the
'.xlsx' extension, e.g. 'abc.xlsx' if the config file is 'abc.toml'.
'template' [string]
The path to another TOML file that should be interpreted as containing
default values for all possible settings.
'notes' [string]
A string that will be printed every time the file is visualized. This
is meant to reminder the user of any details relating to this
particular experiment (e.g. mistakes) that might affect interpretation
of the data.
The following keys relate to particular wells. Each of these keys can be
specified in any of four kinds of block: [well.A1], [row.A], [col.1], and
[plate]. The [well] block allows values to be set for individual wells ('A1'
in this example). The [row] and [col] blocks allow values to be set for
whole rows and columns ('A' and '1' in these examples). The [plate] block
allows values to be set for the whole plate. The same value can be set
multiple times, in which case the value from the most specific block will
take precedence.
"""
def recursive_merge(layout, defaults, overwrite=False):
for key, default in defaults.items():
if isinstance(default, dict):
layout.setdefault(key, {})
recursive_merge(layout[key], default)
else:
if overwrite or key not in layout:
layout[key] = default
def do_load_paths(toml_path, expected_ext='.xlsx'):
toml_path = Path(toml_path).resolve()
layout = toml.load(str(toml_path))
# Resolve the path(s) to actual data.
if 'path' in layout and 'paths' in layout:
raise ValueError(f"{toml_path} specifies both 'path' and 'paths'")
elif 'path' in layout:
path = toml_path.parent / layout['path']
layout['paths'] = {'default': path}
elif 'paths' in layout:
layout['paths'] = {
toml_path.parent / x
for x in layout['paths']
}
else:
default_path = toml_path.with_suffix(expected_ext)
if default_path.exists():
layout['paths'] = {'default': default_path}
# Include a remote file if one is specified.
if 'template' in layout:
layout['template'] = toml_path.parent / layout['template']
template = do_load_paths(layout['template'])
recursive_merge(layout, template)
return layout
layout = do_load_paths(toml_path)
# Apply any row or column defaults.
if 'well' not in layout:
layout['well'] = {}
rows = layout.get('row', {})
cols = layout.get('col', {})
# Create new wells implied by the 'row' and 'col' blocks.
for row, col in itertools.product(rows, cols):
layout['well'].setdefault(f'{row}{col}', {})
# Update any existing wells.
for well in layout.get('well', {}):
row, col = well[:1], well[1:]
recursive_merge(layout['well'][well], rows.get(row, {}))
recursive_merge(layout['well'][well], cols.get(col, {}))
# Apply any plate-wide defaults.
layout.setdefault('plate', {}),
for well in layout.get('well', {}):
recursive_merge(layout['well'][well], layout['plate'])
# If the experiment has any notes, print them out.
if 'notes' in layout:
print(toml_path)
print(layout['notes'].strip())
print()
return layout | cc92a9dae783de915628984979119ca9d2b591a2 | 2,379 |
def gen_accel_table(table_def):
"""generate an acceleration table"""
table = []
for i in range(1001):
table.append(0)
for limit_def in table_def:
range_start, range_end, limit = limit_def
for i in range(range_start, range_end + 1):
table[i] = limit
return table | 53d96db86068d893dfbb216e9e1283535cad9412 | 2,380 |
import os
def issue_config_exists(repo_path):
"""
returns True if the issue template config.yml file exists in the repo_path
"""
path_to_config = repo_path + "/.github/ISSUE_TEMPLATE/config.yml"
return os.path.exists(path_to_config) | 129b5b47304a60a6c10a8740dda1459c816f6ea1 | 2,381 |
from typing import List
def get_povm_object_names() -> List[str]:
"""Return the list of valid povm-related object names.
Returns
-------
List[str]
the list of valid povm-related object names.
"""
names = ["pure_state_vectors", "matrices", "vectors", "povm"]
return names | cb80899b9b3a4aca4bfa1388c6ec9c61c59978a4 | 2,383 |
def get_dotted_field(input_dict: dict, accessor_string: str) -> dict:
"""Gets data from a dictionary using a dotted accessor-string.
Parameters
----------
input_dict : dict
A nested dictionary.
accessor_string : str
The value in the nested dict.
Returns
-------
dict
Data from the dictionary.
"""
current_data = input_dict
for chunk in accessor_string.split("."):
current_data = current_data.get(chunk, {})
return current_data | 2c82c0512384810e77a5fb53c73f67d2055dc98e | 2,384 |
import re
def separa_frases(sentenca):
"""[A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca]
Arguments:
sentenca {[str]} -- [recebe uma frase]
Returns:
[lista] -- [lista das frases contidas na sentença]
"""
return re.split(r'[,:;]+', sentenca) | d3ac427172e34054119659adc55295ac27965e6c | 2,385 |
def as_actor(input, actor) :
"""Takes input and actor, and returns [as
<$actor>]$input[endas]."""
if " " in actor :
repla = "<%s>"%actor
else :
repla = actor
return "[as %s]%s[endas]" % (repla, input) | dc9bd33bd6b2156f4fa353db2a0b01bfa6dd1357 | 2,386 |
def _format_param(name, optimizer, param):
"""Return correctly formatted lr/momentum for each param group."""
if isinstance(param, (list, tuple)):
if len(param) != len(optimizer.param_groups):
raise ValueError("expected {} values for {}, got {}".format(
len(optimizer.param_groups), name, len(param)))
return param
else:
return [param] * len(optimizer.param_groups) | 52904bdfb1cba7fe3175606bf77f5e46b3c7df80 | 2,387 |
def get_map_with_square(map_info, square):
"""
build string of the map with its top left
bigger square without obstacle full
"""
map_string = ""
x_indices = list(range(square["x"], square["x"] + square["size"]))
y_indices = list(range(square["y"], square["y"] + square["size"]))
M = map_info["matrix"]
for y in range(map_info["line_num"]):
if map_string:
map_string += '\n'
for x in range(map_info["line_len"]):
if M[y][x]:
map_string += map_info["obstacle_char"]
elif x in x_indices and y in y_indices:
map_string += map_info["full_char"]
else:
map_string += map_info["empty_char"]
return map_string | 20d405edd8e5e86e943c297455ebfbeb54b669f8 | 2,388 |
import requests
def get_vlan_groups(url, headers):
"""
Get dictionary of existing vlan groups
"""
vlan_groups = []
api_url = f"{url}/api/ipam/vlan-groups/"
response = requests.request("GET", api_url, headers=headers)
all_vlan_groups = response.json()["results"]
for vlan_group in all_vlan_groups:
vlan_group_info = dict()
vlan_group_info["name"] = vlan_group["name"]
vlan_group_info["state"] = "present"
if vlan_group["site"] is not None:
vlan_group_info["site"] = vlan_group["site"]["name"]
else:
vlan_group_info["site"] = None
vlan_groups.append(vlan_group_info)
return vlan_groups | c0494708e4d2cb5b61a8e4c7ac4136051b1903c7 | 2,389 |
def operating_cf(cf_df):
"""Checks if the latest reported OCF (Cashflow) is positive.
Explanation of OCF: https://www.investopedia.com/terms/o/operatingcashflow.asp
cf_df = Cashflow Statement of the specified company
"""
cf = cf_df.iloc[cf_df.index.get_loc("Total Cash From Operating Activities"),0]
if (cf > 0):
return True
else:
return False | ed6a849fa504b79cd65c656d9a1318aaaeed52bf | 2,390 |
def func(var):
"""Function"""
return var + 1 | a6ca4247f7f7307c384708ed9535046e4ec7d4e3 | 2,392 |
def flanking_regions_fasta_deletion(genome, dataframe, flanking_region_size):
"""
Makes batch processing possible, pulls down small region
of genome for which to design primers around.
This is based on the chromosome and position of input file.
Each Fasta record will contain:
>Sample_Gene_chr:posStart-posStop
Seq of flanking region upstream of SV + seq of flanking region downstream of SV
Args:
genome (list): genome list of tuples (header, seq).
dataframe (pandas object): dataframe with sample info.
flanking_region_size (int): length of sequence upstream and downstream of
input coordinate position to pull as sequence to design primers around.
"""
output = []
for headers, seqs in genome:
chrm = str(headers)
seq = str(seqs)
for gene, sample, chrom, start, stop in zip(dataframe.Gene, dataframe.Sample, dataframe.Chr,
dataframe.PosStart, dataframe.PosStop):
if str(chrom) == chrm:
header = str(str(sample)+"_"+str(gene)+"_"+\
str(chrom)+":"+str(start)+"-"+str(stop)+"__")
flank_seq = seq[int(start)-int(flanking_region_size):int(start)+1]\
+seq[int(stop):(int(stop)+int(flanking_region_size))]
output.append((header, flank_seq.upper()))
return output | a20da206630d1f2fb002c5ca63eab9f240b1f1d5 | 2,393 |
def get_ref(cube):
"""Gets the 8 reflection symmetries of a nd numpy array"""
L = []
L.append(cube[:,:,:])
L.append(cube[:,:,::-1])
L.append(cube[:,::-1,:])
L.append(cube[::-1,:,:])
L.append(cube[:,::-1,::-1])
L.append(cube[::-1,:,::-1])
L.append(cube[::-1,::-1,:])
L.append(cube[::-1,::-1,::-1])
return L | 683ef2c7c0a312e4cf891f191452f9c29f6bc1fd | 2,395 |
def _is_json_mimetype(mimetype):
"""Returns 'True' if a given mimetype implies JSON data."""
return any(
[
mimetype == "application/json",
mimetype.startswith("application/") and mimetype.endswith("+json"),
]
) | 9c2580ff4a783d9f79d6f6cac41befb516c52e9f | 2,396 |
from datetime import datetime
def make_request(action, data, token):
"""Make request based on passed arguments and timestamp."""
return {
'action': action,
'time': datetime.now().timestamp(),
'data': data,
'token': token
} | 60e511f7b067595bd698421adaafe37bbf8e59e1 | 2,397 |
def _unflattify(values, shape):
"""
Unflattifies parameter values.
:param values: The flattened array of values that are to be unflattified
:type values: torch.Tensor
:param shape: The shape of the parameter prior
:type shape: torch.Size
:rtype: torch.Tensor
"""
if len(shape) < 1 or values.shape[1:] == shape:
return values
return values.reshape(values.shape[0], *shape) | e885517419eb48fd1a4ebdf14a8fa3b19f3c5444 | 2,398 |
def get_unique_chemical_names(reagents):
"""Get the unique chemical species names in a list of reagents.
The concentrations of these species define the vector space in which we sample possible experiments
:param reagents: a list of perovskitereagent objects
:return: a list of the unique chemical names in all of the reagent
"""
chemical_species = set()
if isinstance(reagents, dict):
reagents = [v for v in reagents.values()]
for reagent in reagents:
chemical_species.update(reagent.chemicals)
return sorted(list(chemical_species)) | ae5d6b3bdd8e03c47b9c19c900760c8c2b83d0a0 | 2,399 |
def max_votes(x):
"""
Return the maximum occurrence of predicted class.
Notes
-----
If number of class 0 prediction is equal to number of class 1 predictions, NO_VOTE will be returned.
E.g.
Num_preds_0 = 25,
Num_preds_1 = 25,
Num_preds_NO_VOTE = 0,
returned vote : "NO_VOTE".
"""
if x['Num_preds_0'] > x['Num_preds_1'] and x['Num_preds_0'] > x['Num_preds_NO_VOTE']:
return 0
elif x['Num_preds_1'] > x['Num_preds_0'] and x['Num_preds_1'] > x['Num_preds_NO_VOTE']:
return 1
else:
return 'NO_VOTE' | 2eadafdaf9e9b4584cd81685a5c1b77a090e4f1c | 2,401 |
def dict2obj(d):
"""Given a dictionary, return an object with the keys mapped to attributes
and the values mapped to attribute values. This is recursive, so nested
dictionaries are nested objects."""
top = type('dict2obj', (object,), d)
seqs = tuple, list, set, frozenset
for k, v in d.items():
if isinstance(v, dict):
setattr(
top,
k, dict2obj(v)
)
elif isinstance(v, seqs):
setattr(
top,
k, type(v)(dict2obj(sj) if isinstance(sj, dict) else sj for sj in v)
)
else:
setattr(top, k, v)
return top | ccfa713dc130024427872eb6f2017a0383e3bc01 | 2,403 |
def _get_log_time_scale(units):
"""Retrieves the ``log10()`` of the scale factor for a given time unit.
Args:
units (str): String specifying the units
(one of ``'fs'``, ``'ps'``, ``'ns'``, ``'us'``, ``'ms'``, ``'sec'``).
Returns:
The ``log10()`` of the scale factor for the time unit.
"""
scale = {"fs": -15, "ps": -12, "ns": -9, "us": -6, "ms": -3, "sec": 0}
units_lwr = units.lower()
if units_lwr not in scale:
raise ValueError(f"Invalid unit ({units}) provided")
else:
return scale[units_lwr] | 2371aab923aacce9159bce6ea1470ed49ef2c72f | 2,404 |
from typing import Dict
from typing import Any
from typing import Tuple
def verify_block_arguments(
net_part: str,
block: Dict[str, Any],
num_block: int,
) -> Tuple[int, int]:
"""Verify block arguments are valid.
Args:
net_part: Network part, either 'encoder' or 'decoder'.
block: Block parameters.
num_block: Block ID.
Return:
block_io: Input and output dimension of the block.
"""
block_type = block.get("type")
if block_type is None:
raise ValueError(
"Block %d in %s doesn't a type assigned.", (num_block, net_part)
)
if block_type == "transformer":
arguments = {"d_hidden", "d_ff", "heads"}
elif block_type == "conformer":
arguments = {
"d_hidden",
"d_ff",
"heads",
"macaron_style",
"use_conv_mod",
}
if block.get("use_conv_mod", None) is True and "conv_mod_kernel" not in block:
raise ValueError(
"Block %d: 'use_conv_mod' is True but "
" 'conv_mod_kernel' is not specified" % num_block
)
elif block_type == "causal-conv1d":
arguments = {"idim", "odim", "kernel_size"}
if net_part == "encoder":
raise ValueError("Encoder does not support 'causal-conv1d.'")
elif block_type == "conv1d":
arguments = {"idim", "odim", "kernel_size"}
if net_part == "decoder":
raise ValueError("Decoder does not support 'conv1d.'")
else:
raise NotImplementedError(
"Wrong type. Currently supported: "
"causal-conv1d, conformer, conv-nd or transformer."
)
if not arguments.issubset(block):
raise ValueError(
"%s in %s in position %d: Expected block arguments : %s."
" See tutorial page for more information."
% (block_type, net_part, num_block, arguments)
)
if block_type in ("transformer", "conformer"):
block_io = (block["d_hidden"], block["d_hidden"])
else:
block_io = (block["idim"], block["odim"])
return block_io | cead023afcd72d1104e02b2d67406b9c47102589 | 2,405 |
def set_up_s3_encryption_configuration(kms_arn=None):
"""
Use the default SSE-S3 configuration for the journal export if a KMS key ARN was not given.
:type kms_arn: str
:param kms_arn: The Amazon Resource Name to encrypt.
:rtype: dict
:return: The encryption configuration for JournalS3Export.
"""
if kms_arn is None:
return {'ObjectEncryptionType': 'SSE_S3'}
return {'ObjectEncryptionType': {'S3ObjectEncryptionType': 'SSE_KMS', 'KmsKeyArn': kms_arn}} | dd8663c17e040423a08c772fd9ca64d25abd2850 | 2,406 |
def filesystem_entry(filesystem):
"""
Filesystem tag {% filesystem_entry filesystem %} is used to display a single
filesystem.
Arguments
---------
filesystem: filesystem object
Returns
-------
A context which maps the filesystem object to filesystem.
"""
return {'filesystem': filesystem} | 3afbd0b8ee9e72ab8841ca5c5517396650d2a898 | 2,409 |
def init_brats_metrics():
"""Initialize dict for BraTS Dice metrics"""
metrics = {}
metrics['ET'] = {'labels': [3]}
metrics['TC'] = {'labels': [1, 3]}
metrics['WT'] = {'labels': [1, 2, 3]}
for _, value in metrics.items():
value.update({'tp':0, 'tot':0})
return metrics | 755dc706f7090d78dac18a989745041b8617a9d6 | 2,410 |
def gather_squares_triangles(p1,p2,depth):
""" Draw Square and Right Triangle given 2 points,
Recurse on new points
args:
p1,p2 (float,float) : absolute position on base vertices
depth (int) : decrementing counter that terminates recursion
return:
squares [(float,float,float,float)...] : absolute positions of
vertices of squares
triangles [(float,float,float)...] : absolute positions of
vertices of right triangles
"""
# Break Recursion if depth is met
if depth == 0:
return [],[]
# Generate Points
pd = (p2[0] - p1[0]),(p1[1] - p2[1])
p3 = (p2[0] - pd[1]),(p2[1] - pd[0])
p4 = (p1[0] - pd[1]),(p1[1] - pd[0])
p5 = (p4[0] + (pd[0] - pd[1])/2),(p4[1] - (pd[0] + pd[1])/2)
# Gather Points further down the tree
squares_left,triangles_left = gather_squares_triangles(p4,p5,depth-1)
squares_right,triangles_right = gather_squares_triangles(p5,p3,depth-1)
# Merge and Return
squares = [[p1,p2,p3,p4]]+squares_left+squares_right
triangles = [[p3,p4,p5]]+triangles_left+triangles_right
return squares,triangles | de4e720eb10cb378f00086a6e8e45886746055c0 | 2,411 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.