content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def calc_buckets(
start: float,
upper_bound: float,
/,
*,
increment_ratio: float = 1.20
) -> tuple[float, ...]:
"""Calculate histogram buckets on a logarithmic scale."""
# See https://amplitude.com/blog/2014/08/06/optimal-streaming-histograms
# for more details.
result: list[float] = []
while start <= upper_bound:
result.append(start)
start *= increment_ratio
return tuple(result) | 541fdb81b150a81d24b515acaf53365afb4b62b4 | 10,341 |
def from_hexpoints(s):
"""Parses a string from its codepoints as hex.
Given a string containing one or more code points (as hex values),
parse it to a a string. For example, the input "006E 006F" produces
output "no".
"""
return "".join([chr(int(cp, 16)) for cp in s.split()]) | 55b5088d000cf90059ca93d7e3895aee043d554e | 10,342 |
def basicFitness(individual, env):
"""
The trivial case, where fitness is just the result of passing through
the environment.
"""
return individual.result | 7d108bac92ce390699b66e1ead5b08080856c5be | 10,344 |
def get_metadata_from_attributes(Object, skip_attributes = None, custom_classes = None):
"""
Get metadata dict from attributes of an object.
Parameters
----------
Object :
object from which the attributes are.
skip_attributes : list, optional
If given, these attributes are skipped (next to the methods of the class of Object).
The default is None.
custom_classes : dict, optional
Dict where keys are classes and values are functions that specify how
objects of this class should be stored in metadata_dict. The default is None.
Returns
-------
metadata_dict : dict
dict where keys-values are attributes from Object.
"""
if skip_attributes is None:
skip_attributes = []
skip_attributes += dir(type(Object)) # methods of class will be skipped as attributes
if custom_classes is None:
custom_classes = {}
metadata_dict = {}
for a in dir(Object):
if a not in skip_attributes:
a_val = getattr(Object,a)
if a_val is None:
metadata_dict[a] = "None"
elif type(a_val) in custom_classes:
# treat class as specified in custom_classes dict
metadata_dict[a] = custom_classes[type(a_val)](a_val)
elif callable(a_val):
# only get docstrings from callables
metadata_dict[a] = a_val.__doc__
else:
metadata_dict[a] = a_val
return metadata_dict | 0c6eacae223ea94e128ff5b507fcaa5a71034c38 | 10,351 |
import zipfile
import json
def _GetExtensionInfoFromCRX(crx_path):
"""Parse an extension archive and return information.
Note:
The extension name returned by this function may not be valid
(e.g. in the case of a localized extension name). It's use is just
meant to be informational.
Args:
crx_path: path to crx archive to look at.
Returns:
Tuple consisting of:
(crx_version, extension_name)"""
crx_zip = zipfile.ZipFile(crx_path)
manifest_contents = crx_zip.read('manifest.json')
decoded_manifest = json.loads(manifest_contents)
crx_version = decoded_manifest['version']
extension_name = decoded_manifest['name']
return (crx_version, extension_name) | 8a5f2ef2547c67d65195334df89d589fbff54dcf | 10,352 |
def update_state(td_state, job_results):
"""
Updates the torsiondrive state with the compute jobs. The state is updated inplace
Parameters
----------
td_state : dict
The current torsiondrive state
job_results : dict
A dictionary of completed jobs and job ID's
Returns
-------
None
"""
for grid_id_str, job_result_tuple_list in job_results.items():
if grid_id_str not in td_state['grid_status']:
td_state['grid_status'][grid_id_str] = []
td_state['grid_status'][grid_id_str] += job_result_tuple_list
return td_state | 476d19fb9946045dc555f815d680d0475f253003 | 10,353 |
from typing import Tuple
def yaml_remove_split(docstr: str) -> Tuple[str, str]:
"""Extract parameter summary within :yaml: tags in docstring,
and clean up the :yaml: tag for the full docstring.
Return cleaned up docstring, and summary version."""
key = ":yaml:"
summary = ""
i_start = docstr.find(key)
while i_start >= 0:
i_key_stop = i_start + len(key) + 1 # end of :yaml:
i_stop = docstr.find("`", i_key_stop) + 1 # end of content after it
fullkey = docstr[i_start:i_stop] # includes `content` after key
summary = docstr[i_key_stop : (i_stop - 1)] # just the content
docstr = docstr.replace(fullkey, summary)
# Search for any other keys:
i_start = docstr.find(key)
return docstr, summary | e0e3e34871c56c5cc176fcd371e5402846771078 | 10,354 |
def year_isnt_int(YEARS):
"""A funciton to test if years are integers"""
return any(type(x) != int for x in YEARS) | d23d093f4655846d07228019b4110e2ef3180c69 | 10,359 |
import json
def isJsonDict(s):
"""
Take a string and determine if valid JSON.
"""
try:
data = json.loads(s)
return type(data) == dict
except ValueError:
return False | 222841fc39f78a45f0c682a83282e714a26dc0ed | 10,361 |
import json
def read_dependencies(file_path):
"""Reads a json file and creates an iterable of unique dependencies.
Args:
file_path: The path to the runtime dependencies file.
Returns:
An iterable with unique dependencies.
"""
deps = None
with open(file_path) as deps_file:
deps = json.load(deps_file)
deps_set = set()
for _, dep_list in deps.items():
deps_set.update(dep_list)
return deps_set | 52756a443b3a7b0ee6ed1a932b3d1b7984fe4189 | 10,364 |
def get_face_rectangles(azure_response):
"""
Returns the rectangles corresponding to the faces detected by Azure
:param azure_response: Response from Azure Face request as dictionary.
:return: The rectangles of any detected face with the format: (width, height, left, top)
"""
result = []
for face in azure_response:
result.append(face.face_rectangle)
return result | d07e40f1b4c648c52ea660179f8d8c1f4957f0db | 10,367 |
def retr_spacegroup_number(xtalstr,ini0):
"""
Retrieve the space group number.
When choosing real space grids some space groups imply restrictions
on the number of points along the different lattice vectors.
To impose the correct restrictions we need to know the space group
number of the current system. This function add this number to
the dictionary.
"""
ini0["spacegroup"] = xtalstr.spacegroup_number
return ini0 | f86827aa089b79c054fe8c4d78c5a6da48f0463e | 10,368 |
def snd(pair):
"""Return the second element of pair
"""
return pair[1] | a7893d60324ebdd358a0a0bb86bd2842d2617629 | 10,374 |
def avoid_multiple_update(func):
"""Decorator to prevent handlers to update multiple times."""
def wrapper(self, attr, old, new):
if self.in_update:
return
self.in_update = True
func(self, attr, old, new)
self.in_update = False
return wrapper | d35e9add88ddb62d1368ff0fbf94e9637518c249 | 10,385 |
def unsigned_to_signed(seq, width):
"""
Convert unsigned integer to signed.
"""
out = []
k = pow(2, width)
for x in seq:
if x >= k/2:
x -= k
out.append(x)
return out | b7b90ff4de576bee2e91af27f6fc3b509608ef34 | 10,387 |
def containerhasprefix(obj, prefix):
"""
Returns whether obj's container starts with prefix.
"""
return obj.container.name.startswith(prefix) | dcf4b14cfa1ac15355e7db5110be55b623e35f20 | 10,389 |
import requests
def check_user_permission(access_token, job_requires):
"""
Check if user has permission to run the job or not
Args:
access_token(str): the access token
job_requires(dict): requirements so that job can run
{
"arborist_url": "http://arborist-service",
"job_access_req": (
[
{"resource": "/sower", "action": {"service": "job", "method": "access"}},
{"resource": "/programs", "action": {"service": "indexd", "method": "write"}},
],
)
}
Returns:
bool: if user has permission to run the job or not
dict: a message log
"""
params = {
"user": {"token": access_token},
"requests": job_requires["job_access_req"],
}
response = requests.post(
"{}/auth/request".format(job_requires["arborist_url"].strip("/")),
headers={"content-type": "application/json"},
json=params,
)
if response.status_code != 200:
return (
False,
{"message": "Can not run the job. Detail {}".format(response.json())},
)
elif not response.json()["auth"]:
return (False, {"message": "User does not have privilege to run the job"})
else:
return True, {"message": "OK"} | c1069364b8d59965f78367e64b88206062c60b2b | 10,392 |
def to_upper(string):
"""The upper case version of a string"""
return string.upper() | 8d3bb60b7b4704479cb642b9ab910f7a1469fd44 | 10,394 |
def trimmed_split(s, seps=(";", ",")):
"""Given a string s, split is by one of one of the seps."""
for sep in seps:
if sep not in s:
continue
data = [item.strip() for item in s.strip().split(sep)]
return data
return [s] | 192c75f7e346860010031cf9e621d7bb5664dde4 | 10,398 |
def descendant_selected(node):
"""Returns true if this node or a descendant of it is selected.
Use as e.g.: {% if descendant_selected node %}
"""
if node.selected:
return True
for child in node.children:
if descendant_selected(child):
return True
return False | 974b95b8d658ec173ccbbf9488f309529c1f0d86 | 10,399 |
def fileNumber(filePath):
"""
Get the number of the file. foo.0080 would return 0080
"""
num = filePath.split('.')[-1]
return int(num) | 731918660f0c145c15ba99c2f5eb8be9745e8576 | 10,404 |
def get_top_adj(full_results, num):
"""
Takes dictionary of results from run_adj_analysis and number of top results to return.
Returns the top num adjectives associated with male pronouns and female pronouns.
:param full_results: dictionary from result of run_adj_analysis
:param num: number of top results to return per gender
:return: tuple of lists of top adjectives associated with male pronouns and female pronouns, respectively
"""
male_adj = []
female_adj = []
for adj, val in full_results.items():
male_adj.append((val[0]-val[1], adj))
female_adj.append((val[1]-val[0], adj))
male_top = sorted(male_adj, reverse=True)[0:num]
female_top = sorted(female_adj, reverse=True)[0:num]
return male_top, female_top | 0eddb189f79bc9b18f2994f2a99aef1b07181691 | 10,405 |
def manhattan_distance(x, y):
""" Returns the Manhattan (City Block) distance between two lists
"""
return sum(abs(a - b) for a, b in zip(x, y)) | 4887024603a8fe3398ec80a17d1d70fbe15fdfab | 10,407 |
import copy
import collections
def get_db_data(relation_data, unprefixed):
"""Organize database requests into a collections.OrderedDict
:param relation_data: shared-db relation data
:type relation_data: dict
:param unprefixed: Prefix to use for requests without a prefix. This should
be unique for each side of the relation to avoid
conflicts.
:type unprefixed: str
:returns: Order dict of databases and users
:rtype: collections.OrderedDict
"""
# Deep copy to avoid unintentionally changing relation data
settings = copy.deepcopy(relation_data)
databases = collections.OrderedDict()
# Clear non-db related elements
if "egress-subnets" in settings.keys():
settings.pop("egress-subnets")
if "ingress-address" in settings.keys():
settings.pop("ingress-address")
if "private-address" in settings.keys():
settings.pop("private-address")
singleset = {"database", "username", "hostname"}
if singleset.issubset(settings):
settings["{}_{}".format(unprefixed, "hostname")] = (
settings["hostname"])
settings.pop("hostname")
settings["{}_{}".format(unprefixed, "database")] = (
settings["database"])
settings.pop("database")
settings["{}_{}".format(unprefixed, "username")] = (
settings["username"])
settings.pop("username")
for k, v in settings.items():
db = k.split("_")[0]
x = "_".join(k.split("_")[1:])
if db not in databases:
databases[db] = collections.OrderedDict()
databases[db][x] = v
return databases | 0e2a30624f35f49119ae9bd275153d5e9fdf7503 | 10,412 |
def get_username(prompt: str) -> str:
"""Prompt the user for a username"""
username = input(prompt)
return username | c6d6119ba9b2b1ec9408501afb04b513aa9b4965 | 10,416 |
def remove_low_information_features(feature_matrix, features=None):
"""Select features that have at least 2 unique values and that are not all null
Args:
feature_matrix (:class:`pd.DataFrame`): DataFrame whose columns are feature names and rows are instances
features (list[:class:`featuretools.FeatureBase`] or list[str], optional): List of features to select
Returns:
(feature_matrix, features)
"""
keep = [c for c in feature_matrix
if (feature_matrix[c].nunique(dropna=False) > 1 and
feature_matrix[c].dropna().shape[0] > 0)]
feature_matrix = feature_matrix[keep]
if features is not None:
features = [f for f in features
if f.get_name() in feature_matrix.columns]
return feature_matrix, features
return feature_matrix | 0377b2b87d04ddaa89e332ed72c12a05b4a1c3e6 | 10,417 |
def diff_template(page, label=None):
"""
Return a Template:Diff2 string for the given Page.
"""
if label is None:
label = page.title()
return f"{{{{Diff2|{page.latest_revision_id}|{label}}}}}" | 48cca6ad78ce2e0f2e1ce4902d56734fccf45030 | 10,421 |
import math
def calc_entropy(data, base=2):
"""
Calculate the entropy of data. Using documentation from
scipy.stats.entropy as the basis for this code
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.entropy.html).
:param data: Measure the entropy of this object
:return: Calculated entropy value
"""
if not data:
return 0
# calculate frequency list
chars = set(data)
frequencies = [float(data.count(ch)) / len(data) for ch in chars]
# calculate shannon entropy
H = -sum([freq * math.log(freq) / math.log(base) for freq in frequencies ])
return H | 72a9d111120415471c7d54e11c862ce8ebde4a55 | 10,424 |
def count_unplayed_cards(r, progress):
"""Returns the number of cards which are not yet played, including cards
which are unplayable because all those cards (or cards of a value below it)
are already discarded"""
n = 0
for suit in r.suits:
n += 5 - progress[suit]
return n | 445b1b3d07aff4c2010fc482cb7f5b55a8250bac | 10,426 |
def toFENhash(fen):
""" Removes the two last parts of the FEN notation
"""
return ' '.join(fen.split(" ")[:-2]) | bc2575ce35f8d2678753369d2b3a4d050388891f | 10,428 |
import math
def gcd(*nums):
"""
Find the greatest common divisor (GCD) of a list of numbers.
Args:
*nums (tuple[int]): The input numbers.
Returns:
gcd_val (int): The value of the greatest common divisor (GCD).
Examples:
>>> gcd(12, 24, 18)
6
>>> gcd(12, 24, 18, 42, 600, 66, 666, 768)
6
>>> gcd(12, 24, 18, 42, 600, 66, 666, 768, 101)
1
>>> gcd(12, 24, 18, 3)
3
"""
gcd_val = nums[0]
for num in nums[1:]:
gcd_val = math.gcd(gcd_val, num)
return gcd_val | ea3b2f55ab1db530a2c0e5d472c053b9e538f3e0 | 10,436 |
import cmath
def rotate_points(points, phase_shift):
"""
Rotate a point about the origin.
Arguments:
points: iterable(complex)
Points to rotate in the complex plane.
phase_shift:
Magnitude of rotation in radians.
Returns:
rotated_points: list(complex)
Points rotated about the origin.
"""
rotated_points = []
for point in points:
rad, phase = cmath.polar(point)
rotated_points.append(cmath.rect(rad, phase + phase_shift))
return rotated_points | e9ae43774bc8f5ac770413f6e758d41223fb3c00 | 10,438 |
def tupleize(func):
"""A decorator that tuple-ize the result of a function. This is useful
when the evaluation function returns a single value.
"""
def wrapper(*args, **kargs):
return func(*args, **kargs),
return wrapper | 2a2a9d709177868bd47571f86ae026d666b2593b | 10,439 |
import torch
def rejoin(chunked, initial_shape):
"""
Rejoins chunked tensor, removing the padding as necessary
>>> eq = lambda a, b: torch.all(torch.lt(torch.abs(torch.add(a, -b)), 1e-12))
>>> x = torch.arange(end=4) + 3
>>> y = torch.arange(end=15) + 2
>>> mesh = x.view(-1, 1) @ y.view(1, -1)
>>> mesh = torch.stack([mesh, mesh + 1, mesh + 2], dim=0)
First we create an array. I don't know why I created it in such a silly way.
Next, we'll show that chunking/rejoining result in the exact same array,
despite the fact that some of the chunks are padded!
>>> mesh.shape
torch.Size([3, 4, 15])
>>> chunks(mesh, 3).shape
torch.Size([1, 2, 5, 3, 3, 3])
>>> rejoined = rejoin(chunks(mesh, 3), mesh.shape)
>>> rejoined.shape
torch.Size([3, 4, 15])
>>> torch.equal(mesh, rejoined)
True
Great! Now we can try specifying a chunk size that is smaller than the
minimum dimension, and it still works.
>>> initial = torch.arange(512).view(8, 8, 8)
>>> chunked = chunks(initial, 9)
>>> reconstructed = rejoin(chunked, (8, 8, 8))
>>> torch.equal(initial, reconstructed)
True
:param chunked: a chunked tensor created by `chunks`
:param initial_shape: the initial shape of the tensor before chunking
:return: tensor in the shape `initial_shape`, dimensions `i` and
`i + len(initial_shape)` are joined
"""
indices = []
padded_shape = []
for i in range(len(initial_shape)):
indices.append(i)
indices.append(i + len(initial_shape))
padded_shape.append(chunked.shape[i] * chunked.shape[len(initial_shape) + i])
repermuted = chunked.permute(*indices)
padded = repermuted.reshape(*padded_shape)
for i, s in enumerate(initial_shape):
padded = torch.narrow(padded, i, 0, s)
return padded | 6bcf5bf07b813b79245b50c72e67a98e575df5f9 | 10,441 |
def transient_provider(func):
"""
Decorator to mark a provider as transient
"""
func.transient = True
return func | 2f540fc3099c3fc71ac49ce44dbd69a042b9e39f | 10,442 |
def validate_keep(keep):
"""validates the value of the keep parameter
If it's not coercable to an int or equal to the special string values,
raise a ValueError. Otherwise, return `keep`.
:param keep: value to validate
:type keep: int or str
:return: the validated value of keep
:rtype: either an int or the special value 'all'
:raises TypeError: if `keep` can't be coerced to an int
"""
if keep != 'all':
keep=int(keep)
return keep | 5a1d03140eeab9bef1f3ae417c3d3fc77b8499bd | 10,443 |
def levensthein_dist(input_command: str, candidate: str) -> int:
"""
Implement the Levenshtein distance algorithm to determine, in case of a non-existing handle,
if theres a very similar command to suggest.
:param input_command: The non-existing handle the user gave as input
:param candidate: The (possible similar) alternative command
:return: The similarity between the two strings measured by the levensthein distance
"""
if not input_command or not candidate:
return max(len(input_command), len(candidate)) # at least one string is empty
dp_table = [[0 for col in range(len(input_command) + 1)] for row in range(len(candidate) + 1)]
dp_table[0] = list(range(0, len(input_command) + 1))
for i in range(1, len(candidate) + 1):
dp_table[i][0] = i
# now choose minimum levensthein distance from the three option delete/replace/insert
# if chars are the same -> levensthein distance is the same as for those substring without these chars of input_command
# and candidate
for i in range(1, len(candidate) + 1):
for j in range(1, len(input_command) + 1):
# choose minimum edit distance from delete, replace or insert at current substring
if input_command[j - 1] == candidate[i - 1]:
dp_table[i][j] = dp_table[i - 1][j - 1]
else:
dp_table[i][j] = min(min(dp_table[i][j - 1], dp_table[i - 1][j - 1]), dp_table[i - 1][j]) + 1
return dp_table[len(candidate)][len(input_command)] | 02506be8655f97a60665a507cfa62cb9703590ef | 10,446 |
from datetime import datetime
def _is_start_date_before_end_date(start: datetime, end: datetime) -> bool:
"""Whether the start date is before the end date.
Args:
start: The start date of an event.
end: The end date of an event.
Returns:
True if valid, otherwise returns False.
"""
return start <= end | 4a296d6673f6beb704b590893088c50a97184764 | 10,450 |
def ConvertIndexListToSet(index_list):
"""Creates a set containing the indices of all '1' entries in the index
list
"""
return set(i + 1 for i, j in enumerate(index_list) if j == 1) | 78d0769de4b22aabd0d0ea2f906958a929da5299 | 10,452 |
from typing import Tuple
def decrypt(text_enc: Tuple[int, int]) -> str:
"""Function that decrypt the tuple of tokens
and re-convert them into string.
:param text_enc: the tuple of the text encrypted
:return: the text decrypted
"""
encrypted = text_enc[0] ^ text_enc[1]
decrypted = encrypted.to_bytes((encrypted.bit_length() + 7) // 8, 'big')
return decrypted.decode() | 0496d90818ef310b885341dad2d91823eadf97e2 | 10,453 |
def get_mentored_team(mentor, hackathon):
""" Retrieve all mentored teams for a Judge at a Hackathon """
mentored_teams = None
mentored_teams = mentor.mentored_teams.filter(
hackathon=hackathon).order_by('display_name')
return mentored_teams | de6e25b3c116b4bca4c4559b3a204d46ed5b38ba | 10,461 |
def get_ranges(uid_number, inc=0):
"""
Return two block ranges to be used to create subnets for
Atmosphere users.
NOTE: If you change MAX_SUBNET then you should likely change
the related math.
"""
MAX_SUBNET = 4064 # Note 16 * 256
n = uid_number % MAX_SUBNET
# 16-31
block1 = (n + inc) % 16 + 16
# 1-254
block2 = ((n + inc) / 16) % 254 + 1
return (block1, block2) | b364a9051a8c8a9ea68dcee76fc7b993e30c13af | 10,462 |
def returnRectangles(a,x):
""" Returns 2D discrete integral array using the rectangle method
The calculation for each array element is
:math:`(\Delta y_i = 0.5(a_{n-1}+a_{n})*(x_{n-1}-x_{n})`
Parameters
----------
a : numpy.ndarray
Description: Array of y(x) function with N+1 elements
x : numpy.ndarray
Description: x- coordinate array with N elements
Returns
--------
numpy.ndarray
"""
return 0.5*(a[1:]+a[:-1])*(x[1:]-x[:-1]) | dd9931bcc2eae7d6712d4e809b23e3145ee4555b | 10,463 |
def strip_id(url):
"""Get MP database ID from url."""
url = url.split('/')
return url[-2] | fa4ae6a3f58067b99fd5f89a2d1f10b49bc27f5e | 10,464 |
def find_channel(channel_name, source):
"""
Search for channel by name on the given object
"""
name = channel_name.lower()
for channel in source.channels:
if channel.name.lower() == name:
return channel
return None | 2b9feb9f60986392137deb6c8364e6c2d7e5efd4 | 10,466 |
def get_metadata(data):
"""Gets metadata from the input cloutformation file content."""
return data['Metadata'] | 5fed88a19704430272b73751938f34a3a7a09d8d | 10,468 |
import pytz
def to_naive_utc(dtime):
"""convert a datetime object to UTC and than remove the tzinfo, if
datetime is naive already, return it
"""
if not hasattr(dtime, 'tzinfo') or dtime.tzinfo is None:
return dtime
dtime_utc = dtime.astimezone(pytz.UTC)
dtime_naive = dtime_utc.replace(tzinfo=None)
return dtime_naive | 4d76ef2a3a5f11efeeedb9f920bc15b55c996da3 | 10,471 |
def get_true_positives(data, classifier):
"""Find the total positives that also are selected by our classifier."""
return data[data["foundSegment"] & classifier(data)].size | 2d1581e5f9ade4ff299557c76f3a9507c4dc5a55 | 10,474 |
def precision_at_k(vanilla_topk, fair_topk):
"""
calculate precision @ K
:param vanilla_topk: top K nodes in vanilla mining result
:param fair_topk: top K nodes in debiased mining result
:return: precision @ K
"""
topk = set(fair_topk)
groundtruth = set(vanilla_topk)
return len(topk.intersection(groundtruth)) / len(topk) | 073abf75d0a66d492541d13c799b67c1b09662f8 | 10,478 |
import hashlib
import json
def param_to_hash(param_dict):
"""Generate a hash for a fixed hyperparameter setting"""
config_hash = hashlib.md5(json.dumps(param_dict, sort_keys=True).encode("utf-8")).hexdigest()
return config_hash | 5302000372023af0d2ea44ca3e5497741cd84e58 | 10,479 |
def lower(series):
"""Transform all text to lowercase."""
return series.str.lower() | fd8f443ff8cb27700bceed3a2b2843befbc932a3 | 10,480 |
def parse_account(s):
""" parse entity and account string """
return s.strip().split('.') | acb14bd71a670e67b306f022792702dc2677539a | 10,482 |
def normalize_job_id(job_id):
"""Convert the job id into job_id, array_id."""
job_id = job_id.split('.')[0]
if '[' in job_id:
job_id, array_id = job_id.split('[')
job_id = job_id.strip('[]')
array_id = array_id.strip('[]')
if not array_id:
array_id = None
else:
array_id = None
return job_id, array_id | 6eb83b54f8a0cec8c094372a5e5446334a1863c3 | 10,485 |
import random
def select_ip_from_network(network):
"""
Randomly selects an IP address from a particular network
:param network: Network from which to select the IP address
:return: IP address as integer from network
"""
max_address = network.address | ((1 << (32 - network.prefix)) - 1)
return random.randint(network.address, max_address) | 7356b6cedd5bc78933484e0cba9a8f84d1507b07 | 10,487 |
import json
import requests
def query(query, experiment_id, host, verify_cert=True):
"""
Query the MDML for an example of the data structure that your query will return. This is aimed at aiding in development of FuncX functions for use with the MDML.
Parameters
----------
query : list
Description of the data to send funcx. See queries format in the documentation on GitHub
experiment_id : string
MDML experiment ID for which the data belongs
host : string
Host of the MDML instance
verify_cert : bool
Boolean is requests should verify the SSL cert
Returns
-------
list
Data structure that will be passed to FuncX
"""
resp = requests.get(f"https://{host}:1880/query?query={json.dumps(query)}&experiment_id={experiment_id}", verify=verify_cert)
return json.loads(resp.text) | 3080ab6322a2756fdc6cb4da8ccc81b96154cbb4 | 10,488 |
def GetPercentage(number, total):
"""Returns the float percentage that a number is of a total."""
if not number:
return 0
return float(number) / total * 100 | 46e2a5b2b4a3fa648792a461852f84e886810a7f | 10,491 |
import re
def parse_billing_code_col(s:str):
"""Split billing code into separate fields for code_type and code. E.g., 'MSDRG .... 001'."""
l:list = s.split()
if 'MS-DRG' in s:
code_type, code = l[0], l[4]
elif re.search('CPT|HCPCS', s):
code_type, code = l[0], l[1]
else:
code_type, code = 'Other', None
return code_type, code | 78865842a457c0226184ace94334d0934a0e0a5c | 10,493 |
def DetermineLocaleType(locale_str):
"""Determines the locale 'type' for a given locale name.
Returns:
(string) Always one of the following strings,
'world' If the locale name refers to the world.
'country' If the locale name looks like a country ID.
'region' If the locale name looks like a region ID.
'city' If the locale name looks like a city ID.
"""
if locale_str == 'world':
return 'world'
depth_map = {1: 'country', 2: 'region', 3: 'city'}
depth = len(locale_str.split('_'))
return depth_map[depth] | 52ac86d8e011d9d01b6f31ed6a27d535a55b26a4 | 10,495 |
def get_model(instance):
"""Get model by instance."""
return instance._meta.model | e386449ecd49d5874fb704c683c17d4492ea6f6c | 10,497 |
def force_slashend(path):
"""
Return ``path`` suffixed with ``/`` (path is unchanged if it is already
suffixed with ``/``).
"""
if not path.endswith('/'):
path = path + '/'
return path | 2e2be0dbb88fb380e581f49af532ea7b5724d918 | 10,498 |
import ast
def load(symbol_id: str) -> ast.Name:
"""Returns an AST Name node that loads a variable."""
return ast.Name(id=symbol_id, ctx=ast.Load()) | 94a76bdac89a9e7b1f766832dd2aef63469704ac | 10,503 |
def _NegGrad(_, grad):
"""Returns -grad."""
return -grad | c2a2800e1d80c3151019425e3871c235485c9324 | 10,504 |
from typing import OrderedDict
def init_vec(f, names, data):
"""Initialize resizable 1d arrays to hold the outputs."""
# Iterate over 'names' and columns of 'data'
dset = [
(name, f.create_dataset(name, data=d, maxshape=(None,)))
for name, d in zip(names, data.T)
] # -> list
return OrderedDict(dset) | ed024e176da68d1a6753da305cea0a36d21e0e7a | 10,505 |
def clean_url(url):
"""
Reformat a URL with all querystrings stripped
:param url: The URL
:return: A clean URL
"""
return url[:url.find('?')] | 56edc6db7b59e1550a68377dcdcb80a83b79e854 | 10,506 |
def level_is_rtl(lev):
"""
Return True if `lev' is a Right-to-Left level, False otherwise.
"""
return lev & 1 | 5c3d6127bcef2f17b347c7c26a86f723f25ab6a7 | 10,510 |
def _read_timestamp(file):
"""Get start and end time from timestamp csv file."""
try:
with open(file, 'r') as f:
rows = f.readlines()
starttime, endtime = float(rows[0].split(",")[0]), float(rows[-1].split(",")[0])
starttime, endtime = starttime / (10**3), endtime / (10**3)
except IOError:
starttime, endtime = "Nan", "Nan"
return starttime, endtime | be403b62623f45e6cf43c6239c1c068a48987183 | 10,513 |
import torch
def generate_padding_mask(x: torch.Tensor, length: torch.Tensor) -> torch.Tensor:
"""
Args:
x: tensor of shape [batch_size, length]
length: tensor of shape [batch_size]
Returns:
float tensor of shape [batch_size, length]
"""
assert x.dim() == 2
assert length.dim() == 1
return (torch.arange(x.shape[1], device=x.device)[None, :] < length[:, None]).to(x.dtype) | d475cfa6eb81525745bed8613827c59bac57aa5d | 10,515 |
def object_func(x, A, b):
""" Objective function for the optimization for L-BFGS-B
"""
y = A * x - b
return 0.5 * y.dot(y) | 8a9453f6f93f4b7c7e2f5202a2a81750df5c27df | 10,518 |
def bayesdb_generator_modelnos(bdb, generator_id):
"""Return list of model numbers associated with given `generator_id`."""
sql = '''
SELECT modelno FROM bayesdb_generator_model AS m
WHERE generator_id = ?
ORDER BY modelno ASC
'''
return [row[0] for row in bdb.sql_execute(sql, (generator_id,))] | e7cbb96679f25815df6a28e3eb89ad61e4b20e09 | 10,522 |
from typing import List
def format_terminal_call(cmd: List[str]) -> str:
"""
Format commands to/from the terminal for readability
:param cmd: List of strings much like sys.argv
:return: Formatted string used for display purposes
"""
return ' '.join(cmd).replace("--", " \\ \n\t--") | 63af43a7d8a5cb708f8a9f6d7467e62e378876b4 | 10,523 |
import yaml
def dict_to_yaml(dict_data):
"""Return YAML from dictionary.
:param dict_data: Dictionary data
:type dict_data: dict
:returns: YAML dump
:rtype: string
"""
return yaml.dump(dict_data, default_flow_style=False) | eba7896f63d499ef6c55b057320933dc0ff86333 | 10,536 |
def findNearestDate(date_list, date):
"""
Find closest datapoint of each uppermost sensor in time window.
Adapted from https://stackoverflow.com/a/32237949/3816498 .
Parameters
----------
date_list : array-like
List of dates
date : datetime
The date, to which the nearest date in `items` should be found.
Returns
-------
nearest_date : datetime
Nearest date to `date` in `date_list`
time_delta : int
Time difference in minutes
"""
nearest_date = min(date_list, key=lambda x: abs(x - date))
time_delta = (nearest_date - date).total_seconds() / 60.
return nearest_date, time_delta | 431f93212e6ffeddb994e727d94db8ccfe67315c | 10,538 |
from typing import List
from typing import Dict
def compute_images2labels(images: List[str], labels: List[str]) -> Dict:
"""Maps all image paths to a list of labels.
Args:
images (List[str]): The list of image paths.
labels (List[str]): The list of labels.
Returns:
Dict: The mapping between the image paths and the labels.
"""
# Init images2labels dict
images2labels = {}
# Find label for each image
for image_path in images:
for label in labels:
if f"/{label}/" in image_path:
images2labels[image_path] = label
return images2labels | 1af99cd93bea7530857502665d4ce388a9d8f9ba | 10,541 |
def sum_counts(fname, R1=False):
"""Collect the sum of all reads for all samples from a summary count file (e.g. from collect_counts)"""
count = 0
with open(fname, 'r') as infh:
for line in infh:
l = line.split()
if R1:
if l[2] == "R1":
count += float(l[3])
else:
count += float(l[3])
return count | 6ce7e156e1100f3106836e0b34caf6750baa18e1 | 10,544 |
def coerce_bool(value):
"""Coerce a string to a bool, or to None"""
clean_value = str(value).strip().lower()
if clean_value in ["yes", "1", "true", "t"]:
return True
elif clean_value in ["no", "n0", "0", "false", "f"]:
return False
elif clean_value in ["", "na", "n/a", "none"]:
return None
else:
raise ValueError("Could not determine truthiness of value {!r}".format(value)) | dd979b73717b2c86fe28cf7d1bdcc89020eda163 | 10,547 |
def count_consonants(string):
""" Function which returns the count of
all consonants in the string \"string\" """
consonants = "bcdfghjklmnpqrstvwxz"
counter = 0
if string:
for ch in string.lower():
if ch in consonants:
counter += 1
return counter | 4a852b3ec9f8f660d71dde547cbffb0c25b1e209 | 10,548 |
def soft_contingency_table(resp1, resp2):
"""Compute the soft contingency table for two responsibility matrices
Args:
resp1 (numpy array): N x K_1 responsibility matrix - each row is a probability
vector for one of the N items belonging to each of K_1 modes
resp1 (numpy array): N x K_2 responsibility matrix - each row is a probability
vector for one of the N items belonging to each of K_2 modes
Returns:
(numpy array): K_1 x K_2 contingency table for soft clustering - defined as
resp1^T resp2
"""
return resp1.T @ resp2 | 50ee20e05755d320fe9f130a6ca57728d1e1b5ad | 10,550 |
def allowed_request_lot_filters(lot_filters):
"""Create a set of (name, value) pairs for all form filters."""
filters = set()
def recursive_search(filters):
more_filters = set()
for f in filters:
more_filters.add((f['name'], f['value']))
children = f.get('children')
if children:
more_filters.update(recursive_search(children))
return more_filters
# recursive search to account for sub-filters (i.e. sub-categories)
for section in lot_filters:
filters.update(recursive_search(section['filters']))
return filters | 1b265e0233e08d0cab4248f28424b5b26ad28340 | 10,551 |
def _check_if_StrNotBlank(string):
"""
check if a sting is blank/empty
Parameters
----------
Returns
-------
: boolean
True if string is not blank/empty
False if string is blank/empty
"""
return bool(string and string.strip()) | e5de1d902f8e3931d23e04c6ba825b17d90e8d1d | 10,553 |
from math import asin, atan2, sqrt, degrees
def normal2SD(x,y,z):
"""Converts a normal vector to a plane (given as x,y,z)
to a strike and dip of the plane using the Right-Hand-Rule.
Input:
x: The x-component of the normal vector
y: The y-component of the normal vector
z: The z-component of the normal vector
Output:
strike: The strike of the plane, in degrees clockwise from north
dip: The dip of the plane, in degrees downward from horizontal
"""
# Due to geologic conventions, positive angles are downwards
z = -z
# First convert the normal vector to spherical coordinates
# (This is effectively a plunge/bearing of the normal vector)
r = sqrt(x*x + y*y + z*z)
plunge = degrees(asin(z/r))
bearing = degrees(atan2(y, x))
# Rotate bearing so that 0 is north instead of east
bearing = 90-bearing
if bearing<0: bearing += 360
# If the plunge angle is upwards, get the opposite end of the line
if plunge<0:
plunge = -plunge
bearing -= 180
if bearing<0:
bearing += 360
# Now convert the plunge/bearing of the pole to the plane that it represents
strike = bearing+90
dip = 90-plunge
if strike > 360: strike -= 360
return strike, dip | 6b8bcfb9444352f8722aa1742909544202fa32d9 | 10,554 |
def append_result(results, result_hash, item):
"""Append to results, creating an index if needed."""
if result_hash not in results:
results[result_hash] = [item]
else:
results[result_hash].append(item)
return results | 1b2af69ad291f885a8f52ce95ba7e0d82792e835 | 10,556 |
def gen_range(start, end):
"""Return a list of the numbers between |start| and |end|.
This filters out surrogate pairs.
"""
return [x for x in range(start, end) if x < 0xd800 or x >= 0xe000] | 009b0af854b5ad6d8a06d01a24cf08b26a644dbc | 10,559 |
def _GetStarredIssues(cnxn, logged_in_user_id, services):
"""Get the set of issues that the logged in user has starred."""
starred_iids = services.issue_star.LookupStarredItemIDs(
cnxn, logged_in_user_id)
return set(starred_iids) | 6185371138da1e79673d7347770073a18f82b099 | 10,568 |
import pytz
def AdaptReadableDatetime(date_obj):
"""Adapts a datetime.datetime object to its ISO-8601 date/time notation."""
try:
date_obj = date_obj.astimezone(pytz.utc)
except ValueError:
pass # naive datetime object
return date_obj.isoformat() | feceafb58995001acdeb2285fa5782bec7cc756d | 10,569 |
def prompt_choice(length, select_action, per_page):
"""
Prompt the user for a choice of entry, to continue or to quit.
An invalid choice will repeat the prompt.
@param length: the largest choosable value
@param select_action: description of what choosing an entry will result in.
@param per_page: number of results to offer next. Set to 0 to hide "next"
option.
"""
prompt = 'What do you want to do? [{0}] to {1}, {2}[Q]uit: '.format(
'1' if length == 1 else '1–{length}',
select_action,
'[N]ext {per_page}, ' if per_page else '')
while True:
choice = input(prompt.format(length=length, per_page=per_page))
try:
int_choice = int(choice)
except ValueError:
int_choice = None
if choice.lower() == 'n' and per_page:
return None
elif choice.lower() == 'q':
exit(0)
elif int_choice and (1 <= int_choice <= length):
return int_choice
else:
print('Invalid choice. Try again!') | 4b957479d96e5b8c642db4e888faf92c8c9cf945 | 10,570 |
from typing import Tuple
def info_to_table(rows: list) -> Tuple[list, list]:
""" Formats raw row data into a table format that will be used
with other render functions. This function is where column headers
should be defined.
Arguments:
rows(list): Rows of data
Return:
List : List of column names
List : Full table representation of data
"""
columns = ["Address", "Name", "Type"]
full_table = []
for row in rows:
# create table
full_table.append([row["address"], row["name"], row["type"]])
return columns, full_table | 3f92acccb3d93aa539ef34f136c557ca3ecb324b | 10,572 |
def _get_suggestions_index(name):
"""Returns suggestions index name for a regular index name."""
return f'df_suggestions_{name}' | 4cd2294e89f05dfbefe65ff4604c43818880d6c9 | 10,580 |
def add_two_polynomials(polynomial_1: list, polynomial_2: list) -> list:
"""
This function expects two `polynomials` and returns a `polynomial` that contains
their `sum`.
:param polynomial_1: First polynomial
:param polynomial_2: Second polynomial
:return: A polynomial representing the sum of the two polynomials
"""
# declaring the polynomial that will be returned (the sum)
return_polynomial = []
# storing the length of the shortest polynomial via the inbuilt min() function
minimum_length = min(len(polynomial_1), len(polynomial_2))
# adding the coefficients for every power of X up until the shortest one ends and appending the sum
for k in range(minimum_length):
return_polynomial.append(polynomial_1[k] + polynomial_2[k])
# figuring out which polynomial is longer and appending all of the coefficients that are left
if len(polynomial_1) > len(polynomial_2):
# using the inbuilt function range() to iterate through the coefficients that are left
for k in range(len(polynomial_2), len(polynomial_1)):
return_polynomial.append(polynomial_1[k])
else:
# I intentionally checked both for '>' and '<' in order to rule out the case in which they are equal
if len(polynomial_1) < len(polynomial_2):
# using the inbuilt function range() to iterate through the coefficients that are left
for k in range(len(polynomial_1), len(polynomial_2)):
return_polynomial.append(polynomial_2[k])
return return_polynomial | e2fed8be5f35f1c306b78b69c608f90668aeb2f1 | 10,587 |
def dict_from_two_lists(keys: list, values: list):
"""Creates a dictionary from a list of keys and a list of values.
Examples:
>>> keys = ('bztar', 'gztar', 'tar', 'xztar', 'zip')\n
>>> values = ('.tbz2', '.tgz', '.tar', '.txz', '.zip')\n
>>> newdict = dict_from_two_lists(keys, values)\n
>>> pprint(newdict)\n
{'bztar': '.tbz2',
'gztar': '.tgz',
'tar': '.tar',
'xztar': '.txz',
'zip': '.zip'}
Args:
keys (list): Reference the keys list
values (list): Reference the values list
Returns:
dict: Returns a dictionary
"""
result = {k: v for k, v in zip(keys, values)}
return result | 3865a8e5a890dc00e69ea3feafc161f8617697ff | 10,590 |
def hashable_index(tuple_idx):
"""Return an hashable representation of a tuple of slice object
We add this because the slice object in python is not hashable.
Parameters
----------
tuple_idx : tuple
A tuple of slice/int objects
Returns
-------
ret : tuple
A hashable representation of the slice data
"""
l = []
for ele in tuple_idx:
if isinstance(ele, slice):
l.append(ele.__reduce__())
else:
l.append(ele)
return tuple(l) | e83d4db426053cc64f9ffda3b938bb85395e4741 | 10,591 |
import torch
def gaussian_kernel1d(
kernel_size: int, sigma: float, device: torch.device, dtype: torch.dtype
):
"""1D Gaussian kernel."""
khalf = (kernel_size - 1) / 2.0
x = torch.linspace(-khalf, khalf, steps=kernel_size, dtype=dtype, device=device)
pdf = torch.exp(-0.5 * (x / sigma).pow(2))
return pdf / pdf.sum() | f76fc18500160510f162e93bb68803a36ce4633a | 10,594 |
def add_class(add, class_):
"""Add to a CSS class attribute.
The string `add` will be added to the classes already in `class_`, with
a space if needed. `class_` can be None::
>>> add_class("foo", None)
'foo'
>>> add_class("foo", "bar")
'bar foo'
Returns the amended class string.
"""
if class_:
class_ += " "
else:
class_ = ""
return class_ + add | b5f12ea7a5c573b65ebbd84d987a5de5090e33d0 | 10,602 |
import re
def to_lower_camel_case(str_to_convert):
"""
This function will convert any string with spaces or underscores to lower camel case string
:param str_to_convert: target string
:return: converted string
"""
if type(str_to_convert) is not str:
raise TypeError("The method only take str as its input")
str_to_convert = str_to_convert.replace("_", " ")
tmp = re.split(r'\s|-', str_to_convert)
return "".join([item.lower() for i, item in enumerate(tmp) if i == 0] +
[item.capitalize() for i, item in enumerate(tmp) if i != 0]) | 8bfd591fcbfcff51b463596266cba1403f2d6153 | 10,603 |
def ordinal_suffix(n):
"""Return the ordinal suffix for a positive integer
>>> ordinal_suffix(0)
''
>>> ordinal_suffix(1)
'st'
>>> ordinal_suffix(2)
'nd'
>>> ordinal_suffix(3)
'rd'
>>> ordinal_suffix(4)
'th'
>>> ordinal_suffix(11)
'th'
>>> ordinal_suffix(12)
'th'
>>> ordinal_suffix(13)
'th'
>>> ordinal_suffix(21)
'st'
>>> ordinal_suffix(22)
'nd'
>>> ordinal_suffix(23)
'rd'
>>> ordinal_suffix(101)
'st'
>>> ordinal_suffix(111)
'th'
>>> ordinal_suffix(112)
'th'
>>> ordinal_suffix(113)
'th'
>>> ordinal_suffix(121)
'st'
>>> ordinal_suffix(1111)
'th'
>>> ordinal_suffix(1322)
'nd'
>>> ordinal_suffix('horse')
''
"""
try:
n = int(n)
except Exception:
return ''
if n < 1:
return ''
elif n >= 100:
return ordinal_suffix(n%100)
elif 11 <= n <= 13:
return 'th'
elif n%10 in (1,2,3):
return ('st','nd','rd')[n%10-1]
else:
return 'th' | 53617737aaf28c2d239301358f01d1b0cea9f6cb | 10,604 |
def _strftime(d):
"""
Format a date the way Atom likes it (RFC3339?)
"""
return d.strftime('%Y-%m-%dT%H:%M:%SZ%z') | 1eebf1bff9c68ba4649f1377f16b4b9feb737f01 | 10,608 |
def merge_consecutive_timestamps(timestamps):
"""
Merges consecutive timestamps in a list if they're less than 2 seconds apart
Example: [(0,5), (5,10), (20,30)] gets combined into [(0,10),[20,30]
"""
result = []
i = 0
while i < len(timestamps):
(start, end) = timestamps[i]
# check if we're not already at the last element
if i < len(timestamps) - 1:
(start_next, end_next) = timestamps[i + 1]
# merge if less than 2 seconds apart
if abs(end - start_next) < 2:
result.append((start, end_next))
i += 1
else:
result.append((start,end))
else:
result.append((start, end))
i += 1
return result | 9ec3817dd62771d7269892ae590a160ea581fa53 | 10,617 |
import torch
def hard_sigmoid(tensor: torch.Tensor, inplace: bool = False) -> torch.Tensor:
"""
Applies HardSigmoid function element-wise.
See :class:`torchlayers.activations.HardSigmoid` for more details.
Arguments:
tensor :
Tensor activated element-wise
inplace :
Whether operation should be performed `in-place`. Default: `False`
Returns:
torch.Tensor:
"""
return torch.nn.functional.hardtanh(tensor, min_val=0, inplace=inplace) | 5f4d87749ddca014076f46e0af6e9b3c4308ddf7 | 10,621 |
import re
def scan(file, blocksize=10000):
"""Get the number of molecules in an sd file"""
pat = re.compile("^[$][$][$][$]", re.MULTILINE)
text = file.read(blocksize)
count = 0
while text:
g = pat.findall(text)
count += len(g)
if text[-1] == "$" and text[-4]!='$':
next = text[-6:]
text = "".join([next, file.read(blocksize)])
else:
text = text = file.read(blocksize)
return count | e3b1af1e65e28146d609a0c2841f195ad783b59e | 10,622 |
def display_time(seconds, granularity=1):
""" Turns seconds into weeks, days, hours, minutes and seconds.
Granularity determines how many time units should be returned. EG:
# 2 time unites, week and day
1934815, 2 = '3 weeks, 1 day'
# 4 time units
1934815, 4 = '3 weeks, 1 day, 9 hours, 26 minutes'
"""
result = []
intervals = (
# weeks weren't necessary so I removed them
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ' '.join(result[:granularity]) | d4b94ffafcdbda99526ecc906ed8379d7fc5edab | 10,624 |
def join_values(values):
"""Return the values as a space-delimited string."""
return " ".join((str(v) for v in values)) | 60fd4acbafc4619c134ae67d224df408cad12ab0 | 10,626 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.