content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def add_base32_padding(base32_input_no_padding): """Return a base32 string with the correct padding Parameters: base32_input_no_padding (str): base32 string without padding """ result = base32_input_no_padding while ((len(result) % 8) != 0): result += '=' return result
9f98d4c705b5932171954aee3fb90c34681ea7fc
10,627
def format_number(number): """ Round number to two decimals :param number: input number :return: number rounded to two decimals """ return format(float(number), '.2f')
621e48e46aef43c6ede38b62a3608b8e193d7df7
10,629
import math def quantity_string(quantity, unit, computer_prefix=False): """Return a human-friendly string representing a quantity by adding prefixes and keeping the number of significant figures low. 'computer_prefix' determines whether each prefix step represents 1024 or 1000. Examples: >>> quantity_string(1024, "B", True) '1.0KB' >>> quantity_string(40000, "m", False) '40km' >>> quantity_string(0.01, "m", False) '0.010m' """ if quantity == 0: return "0%s" % unit # Units like m, B, and Hz are typically written right after the number. # But if your unit is "file" or "image" then you'll want a space between # the number and the unit. if len(unit) > 2: space = " " else: space = "" if computer_prefix: prefixes = ["", "K", "M", "G", "T"] prefix_multiplier = 1024 else: prefixes = ["", "k", "M", "G", "T"] prefix_multiplier = 1000 divisor = 1 for p in prefixes: digits = int(math.log10(quantity / divisor)) + 1 if digits <= 3: format = "%%.%df%s%s%s" % (max(2 - digits, 0), space, p, unit) return format % (float(quantity) / divisor) divisor *= prefix_multiplier # No prefix available. Go scientific. return "%.2e%s%s"% (quantity, space, unit)
04aa8743e045c2aaf80115984c30f091d695de77
10,633
def vector_to_dictionary(vector, layers): """ Convert the parameter vector of a model into a dictionary used by the model Arguments: vector -- one-dimensional vector in orders: "W1", "W2", "WL", "b1", "b2", "bL" layers -- list of (n_uints, activations) pairs that define network structure, including input layer X Returns: ret -- parameter dictionary, {"W1": ..., "WL": ..., "b1": ..., "bL": ..., "r1": ..., "rL": ...} """ ret = {} idx = 0 # recover Ws first for l in range(1, len(layers)): length = layers[l][0] * layers[l - 1][0] ret["W" + str(l)] = vector[idx:idx + length].copy().reshape( (layers[l][0], layers[l - 1][0])) idx = idx + length # recover bs for l in range(1, len(layers)): length = layers[l][0] ret["b" + str(l)] = vector[idx:idx + length].copy().reshape( (layers[l][0], 1)) idx = idx + length # recover rs for l in range(1, len(layers)): length = layers[l][0] ret["r" + str(l)] = vector[idx:idx + length].copy().reshape( (layers[l][0], 1)) idx = idx + length return ret
33f628463546892ae8127cad25bda1e7ae19a78a
10,637
def spawn_dates_times(df, spawn_dates=True, spawn_times=False): """ Build date/times column from a timeseries dataframe :param df: (pd.DataFrame) - dataframe with datetime index :param spawn_dates: (boolean) - whether to spawn year, month, day cols :param spawn_times: (boolean) - whether to spawn hour, minute, second cols :return df: (pd.DataFrame) - dataframe with datetime index """ if spawn_dates: ind = df.index df = df.assign(year=ind.year, month=ind.month, day=ind.day) if spawn_times: ind = df.index df = df.assign(hour=ind.hour, minute=ind.minute, second=ind.second) return df
ee58b5117d65fa3f217b16973dcdc06918c5474b
10,645
def test_metathesis(word, anagram): """ Tests if a word and an anagram are a metathesis pair This is only true if the words are I. Anagrams II. Differ in two places """ count = 0 for para in zip(word, anagram): if para[0] != para[1]: count += 1 return count == 2
c26090d92b526ce7d2a84138e4248c884a19f5bc
10,652
def standardize(ds, dim="time"): """Standardize Dataset/DataArray .. math:: \\frac{x - \\mu_{x}}{\\sigma_{x}} Args: ds (xarray object): Dataset or DataArray with variable(s) to standardize. dim (optional str): Which dimension to standardize over (default 'time'). Returns: stdized (xarray object): Standardized variable(s). """ stdized = (ds - ds.mean(dim)) / ds.std(dim) return stdized
99de2a60e340a7ad451daec27aad028485d4aa59
10,653
def make_range_partition(min_val, max_val): """ Returns a new partitioning function that partitions keys in the range *[min_val:max_val]* into equal sized partitions. The number of partitions is defined by the *partitions* parameter """ r = max_val - min_val f = ("lambda k_n_p: int(round(float(int(k_n_p[0]) - {0}) / {1} * (k_n_p[1] - 1)))" .format(min_val, r)) return eval(f)
6b9c15f93312a913dff432f0954e409b69f97572
10,659
def completeTreeNames(tree, useBS = False ) : """ Takes: - tree (ete3.Tree) - useBS (bool) [default = False] : uses bootstrap to name nodes Returns: (ete3.Tree) : the tree, but where the nodes without a name now have one that correspond to their post-order OR their bootstrap """ for i,n in enumerate(tree.traverse('postorder')): if n.name == "": print (n.support) if useBS: n.name = str(int(n.support)) else: n.name = str(i) return tree
07077fce3ea18ba40af578cd1b0768a71e7b16c8
10,664
def read_raw_parse(path): """ Read GamParse's forum output into a list. :param path: path to a file containing GamParse output :return: a list containing the lines of the input file """ with open(path, 'r') as input_handle: return input_handle.read().splitlines()
26926ca6a164c29681a0cf45a4114ffc5cff0fa9
10,668
import math def position_check(inlat, inlon): """ Simple check to make sure that the latitude and longitude are within the bounds specified by the ICOADS documentation. Latitude is between -90 and 90. Longitude is between -180 and 360 :param inlat: latitude :param inlon: longitude :type inlat: float :type inlon: float :return: 1 if either latitude or longitude is invalid, 0 otherwise :return type: integer """ # return 1 if lat or lon is invalid, 0 otherwise assert inlat is not None and not (math.isnan(inlat)) assert inlon is not None and not (math.isnan(inlon)) result = 0 if inlat < -90 or inlat > 90: result = 1 if inlon < -180 or inlon > 360: result = 1 assert result == 1 or result == 0 return result
7f9e6d92667cd81ad02b034fcba231fd4763f966
10,669
def normalize_email(email): """ Email Address Normalization. 1. Remove leading and trailing spaces. 2. Convert all ASCII characters to lowercase. 3. In gmail.com email addresses, remove the following characters from the username part of the email address: 1) The period (. (ASCII code 46)). For example, normalize [email protected] to [email protected]. 2) The plus sign (+ (ASCII code 43)) and all subsequent characters. For example, normalize [email protected] to [email protected]. """ email = email.strip().lower() (user, domain) = email.split("@") if domain == "gmail.com": user = user.replace(".", "").split("+")[0] email = "@".join((user, domain)) return email
e128952a38cd699dffa55de0e486ee6bb245697a
10,670
def urljoin(url, suffix=""): """ Will join url and its suffix Example: "https://google.com/", "/" => "https://google.com/" "https://google.com", "/" => "https://google.com/" "https://google.com", "api" => "https://google.com/api" "https://google.com", "/api" => "https://google.com/api" "https://google.com/", "api" => "https://google.com/api" "https://google.com/", "/api" => "https://google.com/api" :type url: ``string`` :param url: URL string (required) :type suffix: ``string`` :param suffix: the second part of the url :rtype: ``string`` :return: Full joined url """ if url[-1:] != "/": url = url + "/" if suffix.startswith("/"): suffix = suffix[1:] return url + suffix return url + suffix
cd8a81d7b427678330d1258fa5644f9d4cf631a0
10,671
def get_dot_file_path(gname): """ For a graph named gname, this method returns the path to its dot file in the dot_atlas directory. Parameters ---------- gname : str Returns ------- str """ return "dot_atlas/good_bad_trols_" + gname + ".dot"
4ead9a2c3656718a8c088e879742962a9782ef02
10,677
def _invert_signs(signs): """ Shall we invert signs? Invert if first (most probable) term is negative. """ return signs[0] < 0
4135340cfbeb4fce67513a160b63304a3199cf1a
10,678
def parse_directive(source_text, directive): """ <Purpose> Given the file source, 'source-text', this function will search for the given directive. <Arguments> source_text: The source in which we are searching for a pragma directive. directive: The pragma directive we are searching for. <Exceptions> None <Side Effects> None <Returns> Return all relevant information for the specified directive: [(Directive, Type, Argument)... ] """ result = [] directive_string = '#' + directive for line in source_text.splitlines(): if line.startswith(directive_string): stripped = line[len(directive_string):].strip() (pragma_type, separator, arg) = stripped.partition(' ') result.append((directive, pragma_type, arg)) return result
1fb538a75a530ff5c9d368dcc3601be0419fc150
10,680
def safe_xml_tag_name( name: str, numeric_prefix: str = "tag-", empty_fallback: str = "empty-tag" ) -> str: """ Returns a safe xml tag name by replacing invalid characters with a dash. :param name: The name that must be converted to a safe xml tag name. :param numeric_prefix: An xml tag name can't start with a number, so if that is the case, then this value will be prepended. :param empty_fallback: An xml tag name can't be empty, so if that is the case, then this fallback value will be returned. :return: A safe name that can be used as xml tag. """ safe_name = "" for char in name: if char.isalnum(): safe_name += char else: safe_name += "-" while "--" in safe_name: safe_name = safe_name.replace("--", "-") if safe_name.startswith("-"): safe_name = safe_name[1:] if safe_name.endswith("-"): safe_name = safe_name[:-1] if len(safe_name) > 0 and safe_name[0].isnumeric(): safe_name = f"{numeric_prefix}{safe_name}" if len(safe_name) == 0: return empty_fallback return safe_name
fea34367fbc7f2a4b9dbe23d11c70ecb75dad3da
10,681
def tcl_prep_otaver(ota=None): """ Prepare variables for OTA versus full check. :param ota: The starting version if OTA, None if not. Default is None. :type ota: str """ if ota is not None: mode = 2 fvver = ota else: mode = 4 fvver = "AAA000" return mode, fvver
b42c9af8abd9b6c2b361d906737429acf967182b
10,684
def get_name_from_key(key) -> str: """Given a dask collection's key, extract the collection name. Parameters ---------- key: string or tuple Dask collection's key, which must be either a single string or a tuple whose first element is a string (commonly referred to as a collection's 'name'), Examples -------- >>> get_name_from_key("foo") 'foo' >>> get_name_from_key(("foo-123", 1, 2)) 'foo-123' """ if isinstance(key, tuple) and key and isinstance(key[0], str): return key[0] if isinstance(key, str): return key raise TypeError(f"Expected str or tuple[str, Hashable, ...]; got {key}")
8a5b46a85000325932c043eb4a94864fef2d6dd4
10,688
def process(proc_data): """ Final processing to conform to the schema. Parameters: proc_data: (Dictionary) raw structured data to process Returns: Dictionary. Structured data with the following schema: { "uid": { "id": integer, "name": string }, "gid": { "id": integer, "name": string }, "groups": [ { "id": integer, "name": string }, { "id": integer, "name": string } ], "context": { "user": string, "role": string, "type": string, "level": string } } """ if 'uid' in proc_data: if 'id' in proc_data['uid']: try: proc_data['uid']['id'] = int(proc_data['uid']['id']) except (ValueError): proc_data['uid']['id'] = None if 'gid' in proc_data: if 'id' in proc_data['gid']: try: proc_data['gid']['id'] = int(proc_data['gid']['id']) except (ValueError): proc_data['gid']['id'] = None if 'groups' in proc_data: for group in proc_data['groups']: if 'id' in group: try: group['id'] = int(group['id']) except (ValueError): group['id'] = None return proc_data
b97d35b93ea08d6adcd69e0fa84c9b59be8d4419
10,691
def get_tag_type(tagtype, pairs): """ Given a list of (word,tag) pairs, return a list of words which are tagged as nouns/verbs/etc The tagtype could be 'NN', 'JJ', 'VB', etc """ return [w for (w, tag) in pairs if tag.startswith(tagtype)]
383515704788e0fd6bcfd7d7f21e77be18397163
10,692
def read_data_from(file_: str) -> list: """Read boarding pass data from file.""" with open(file_, "r") as f: return f.read().splitlines()
7315c4c284cdd2e9e1b66776c26eccabe13acdb4
10,693
import ast def get_auxiliary_name(node: ast.AST, aux_symbol_id: str) -> str: """ Generates a name for auxiliary variables. :param node: the ast node that originates the auxiliary symbol :param aux_symbol_id: the id name of the auxiliary symbol :return: the unique name to the symbol. """ return "{0}_{1}".format(aux_symbol_id, id(node))
d67fc8d70553265a7d5345e97877e07caf36e19a
10,695
def search_for_pod_name(details: dict, operator_id: str): """Get operator pod name. Args: details (dict): workflow manifest from pipeline runtime operator_id (str): operator id Returns: dict: id and status of pod """ try: if 'nodes' in details['status']: for node in [*details['status']['nodes'].values()]: if node['displayName'] == operator_id: return {'name': node['id'], 'status': node['phase'], 'message': node['message']} except KeyError: pass
cc5bc532a1875145452fbe71cca54f840257c90d
10,696
import inspect def is_mod_class(mod, cls): """Checks if a class in a module was declared in that module. Args: mod: the module cls: the class """ return inspect.isclass(cls) and inspect.getmodule(cls) == mod
7a9b228995d2bf46467ef75823a1aad26d16df0e
10,706
from datetime import datetime def getDateTime(timestamp): """ Converts to datetime from timestamp :param timestamp: string (or integer) value :return: datetime value """ return datetime.fromtimestamp(int(timestamp)/1e3)
2197806bf7372305cd048295c8c63a0269625262
10,707
def _get_mass_dict(factor=1000000000, type=int): """ Return a Dictionary containing the masses of each aminoacid We explicitly convert them by a factor of 1 000 000 000 (default) into integers The values are taken from: https://proteomicsresource.washington.edu/protocols06/masses.php """ return dict( # In format: AA = (MONO_MASS, AVG_MASS) G=(type(57.021463735 * factor), type(57.05132 * factor)), A=(type(71.037113805 * factor), type(71.0779 * factor)), S=(type(87.032028435 * factor), type(87.0773 * factor)), P=(type(97.052763875 * factor), type(97.11518 * factor)), V=(type(99.068413945 * factor), type(99.13106 * factor)), T=(type(101.047678505 * factor), type(101.10388 * factor)), C=(type(103.009184505 * factor), type(103.1429 * factor)), L=(type(113.084064015 * factor), type(113.15764 * factor)), I=(type(113.084064015 * factor), type(113.15764 * factor)), N=(type(114.042927470 * factor), type(114.10264 * factor)), D=(type(115.026943065 * factor), type(115.0874 * factor)), Q=(type(128.058577540 * factor), type(128.12922 * factor)), K=(type(128.094963050 * factor), type(128.17228 * factor)), E=(type(129.042593135 * factor), type(129.11398 * factor)), M=(type(131.040484645 * factor), type(131.19606 * factor)), H=(type(137.058911875 * factor), type(137.13928 * factor)), F=(type(147.068413945 * factor), type(147.17386 * factor)), U=(type(150.953633405 * factor), type(150.3079 * factor)), R=(type(156.101111050 * factor), type(156.18568 * factor)), Y=(type(163.063328575 * factor), type(163.17326 * factor)), W=(type(186.079312980 * factor), type(186.2099 * factor)), O=(type(237.147726925 * factor), type(237.29816 * factor)), # Special Aminoacids J=(type(113.084064015 * factor), type(113.1594 * factor)), X=(type(0.0 * factor), type(0.0 * factor)), # Unknown Amino Acid Z=(type(128.55059 * factor), type(128.6231 * factor)), B=(type(114.53495 * factor), type(114.5962 * factor)), # Custom start and end points __start__=(type(0), type(0)), __end__=(type(0), type(0)), )
fabdf445765acb1bde082ba63df9b22b06669ab9
10,714
import hmac def is_authenticated(request, secret): """ Verify whether the user is authenticated Args: request (tornado.httputil.HTTPRequest): The request secret (str): The secret to use for authentication """ # See https://api.slack.com/authentication/verifying-requests-from-slack for more info timestamp = request.headers["X-Slack-Request-Timestamp"] basestring = f"v0:{timestamp}:{request.body.decode()}".encode() digest = ( "v0=" + hmac.new(key=secret.encode(), msg=basestring, digestmod="sha256").hexdigest() ).encode() signature = request.headers["X-Slack-Signature"].encode() return hmac.compare_digest(digest, signature)
4d8915018f5d4e97934a581a79bb935533714817
10,716
def chi2_fun(theta, parameters_to_fit, event): """ Calculate chi2 for given values of parameters Keywords : theta: *np.ndarray* Vector of parameter values, e.g., `np.array([5380., 0.5, 20.])`. parameters_to_fit: *list* of *str* List of names of parameters corresponding to theta, e.g., `['t_0', 'u_0', 't_E']`. event: *MulensModel.Event* Event which has datasets for which chi2 will be calculated. Returns : chi2: *float* Chi2 value for given model parameters. """ # First we have to change the values of parameters in # event.model.parameters to values given by theta. for (parameter, value) in zip(parameters_to_fit, theta): setattr(event.model.parameters, parameter, value) # After that, calculating chi2 is trivial: return event.get_chi2()
14f74f3cf64770dc1cb7e335880f445eb75ca007
10,719
def link(content, target): """Corresponds to ``[content](target)`` in the markup. :param content: HTML that will go inside the tags. :param target: a full URL, or a local ``filename.html#subtitle`` URL """ return '<a href="%s">%s</a>' % (target, content)
c0355f78db31edccf7e904b3696a169980fa796b
10,720
def get_version_details(path): """Parses version file :param path: path to version file :return: version details """ with open(path, "r") as reader: lines = reader.readlines() data = { line.split(" = ")[0].replace("__", ""): line.split(" = ")[1].strip().replace("'", "") for line in lines } return data
6ea7019e4e39b5c315e085369c6ab2bd6729d6bb
10,727
def size_to_bytes(size, largeur=6): """ Convert a size in a bytes with k, m, g, t...""" if size > 1073741824*1024: return b"%*.2fT"%(largeur, size / (1073741824.*1024.)) elif size > 1073741824: return b"%*.2fG"%(largeur, size / 1073741824.) elif size > 1048576: return b"%*.2fM"%(largeur, size / 1048576.) elif size > 1024: return b"%*.2fK"%(largeur, size / 1024.) else: return b"%*dB"%(largeur, size)
f05e74d89b710936a8253f13b9e6d804f9004b6b
10,728
def excess_entropy_fast(text: str, H_single, H_pair): """ Calculates excess entropy of given string in O(n) time complexity :param text: an input tokenized string :param H_single: a function that calculates H(i, x_i) :param H_pair: a function that calculates H(x_i | x_{i-1}) = H(i, x_{i-1}, x_i) :return: a float value which is equal to excess entropy of given input string """ n = len(text) EE = 0 for i in range(n - 1): EE += H_single(i + 1, text[i + 1]) - H_pair(i + 1, text[i], text[i + 1]) return EE
1e590f7577fa9b9185160eea26d3900476f56bf0
10,737
from typing import Set import ast def _h5attr2set(attr: str) -> Set[str]: """Convert an HDF5 attribute to a list of strings""" if not attr or attr == "set()": return set() return ast.literal_eval(attr)
55aa07126efe42fa1f3437ce6206e72db58c7fd3
10,738
def velmodellayers_sdsu(ifile): """ Input a SDSU type velocity model file and return number of layers as defined by SDSU. This is designed for use in the SDSU code which required the number of layers in a file. """ lincount = 0 infile = open(ifile, "r") for _ in infile: lincount = lincount + 1 infile.close() if lincount < 3: return 0 else: return lincount-2
922f9443b3b30cbe58639ab51ab9f183205d0dec
10,739
from typing import Any def to_qualified_name(obj: Any) -> str: """ Given an object, returns its fully-qualified name, meaning a string that represents its Python import path Args: - obj (Any): an importable Python object Returns: - str: the qualified name """ return obj.__module__ + "." + obj.__qualname__
45824d1f84a96f254274e7fe40f2ed9546ccb346
10,740
def get_index(search, names): """ Find index matching search in names list of 'Key|Value' """ for name_index, name in enumerate(names): if search == name.split('|')[0]: return name_index return None
fbfc6b71b75172e2980a604f53e602c9b3cb9a84
10,742
def _map_args(repo, args): """ Maps a set of arguments to a predefined set of values. Currently only __REPO__ is support and will be replaced with the repository name. :param repo: The repo name used for mapping. :type repo: str :param args: An array of arguments to map. :type args: list :rtype: list """ arg_map = {'__REPO__': repo} mapped_args = [] for arg in args: mapped_args.append(arg_map.get(arg, arg)) return mapped_args
0e31510a764c3f6dca4726daa4e8716bdc7328db
10,753
def R_curv(deltaT_sub, r_min, radius, Q_drop): """ thermal resistance due drop curvature Parameters ---------- deltaT_sub: float temperature difference to the cooled wall in K r_min: float minimum droplet radius in m radius: float radius of drop in m Q_drop: float rate of heat flow through drop in W Returns ---------- R_curv: float thermal resistance due drop curvature in K/W """ R_curv = (deltaT_sub*r_min / radius) / Q_drop return R_curv
176520b43184e879bb25bcecc50e64e6dabaa6cc
10,754
def _decode_list(vals): """ List decoder """ return [val.decode() if hasattr(val, 'decode') else val for val in vals]
852629abaa25e5bc4f2388b273a0676d3afb8337
10,757
from pathlib import Path def get_toplevel_dirpath(path): """ Provide the top level directory for the given path. The top level directory contains the ``controls.json`` file. This function returns ``None`` if a top level path can not be found. :param path: absolute or relative path to a file or directory. :returns: the absolute path to the top level directory. """ if path is None: return paths = list(Path(path).resolve().parents) if Path(path).resolve().is_dir(): paths = [Path(path).resolve()] + paths for path in paths[:-1]: if Path(path, 'controls.json').is_file(): return str(path)
324df1ceda2bf5813d65f86f6319dde8819f8fa3
10,759
import pathlib def _detect_home_location() -> pathlib.Path: """Detects the location of the root directory""" # path/to/home/backend/core/config_loader.py path_to_self = pathlib.Path(__file__).absolute() # path/to/home/backend/core/ path_to_core_module = path_to_self.parent # path/to/home/backend/ path_to_backend_module = path_to_core_module.parent # path/to/home/ path_to_home = path_to_backend_module.parent return path_to_home
c309656d5a56261fd96c86c179947981dc65dc58
10,763
def normalize_numpy(img_256_arr): """ Normalizes an image NumPy array so its values lie in the range [0, 1] Args: img_256_arr: a NumPy array (intended to be 2D or 3D) whose values lie in the range [0, 255], representing an image Returns: A NumPy array, with the same dimensions as the input, whose values lie in [0, 1] """ return img_256_arr / 255.0
50fa2854c53a975487d501e6a6703b63e82def86
10,772
from pathlib import Path def read_image_scipy2(input_filename: Path) -> np.array: # type: ignore """ Read an image file with scipy and return a numpy array. :param input_filename: Source image file path. :return: numpy array of shape (H, W), (H, W, 3). """ numpy_array = imageio.imread(input_filename).astype(np.float) # type: ignore return numpy_array
54d3aa7d8a3043e5a79e2668be1e971b543669d9
10,780
def format_secret(secret): """ Format secret to compatible decrypt string Args: secret (string): KMS secret hash Returns: formatted ef resolvable KMS decrypt string Raises: None """ return "{{aws:kms:decrypt,%s}}" % secret
274a0686db07621d657ebc29eda21ec18c1d0afa
10,787
import torch def le(a, b, square=False, **kwargs): """ Encodes the loss function for "a <= b". If square is false d = |a - b| is used, else d = (a - b)^2. """ if square: return torch.clamp((a - b).sign() * (a - b) * (a - b), min=0) else: return torch.clamp(a - b, min=0)
02c8a2f5255255f754be6335a7ed8c55a1910192
10,788
def process_stn_activation(df, side): """ Calculates STN activation percentage :param df: dictionary containing STN volume and active STN volume for patients :param side: side of the brain 'L' or 'R' :return: STN activation percentage for each side for all patients """ #print(list(df)) df = df.sort_values(['Patient']) left_stn_vol = df['STN vol (' + side + ') [mm3]'] left_active_stn = df['VTA inside STN (' + side + ') [mm3]'] stn_percent_activation = (left_active_stn/left_stn_vol)*100 return stn_percent_activation
6ed2a21e547a0ed9f35a77fed02e6f4fbf59cb6a
10,801
def greatest_common_divisor(larger_num, smaller_num): """This function uses Euclid's algorithm to calculate the Greatest Common Divisor of two non-negative integers pre: larger_num & smaller_num are both non-negative integers, and larger_num > smaller_num post: returns the greatest common divisor of larger_num & smaller_num """ while smaller_num != 0: remainder = larger_num % smaller_num larger_num, smaller_num = smaller_num, remainder return larger_num
70db69b222f1a4a0d395e9bb1ff87ef031dbbbd6
10,803
def normalize(data): """ Normalize data set to have zero mean and unit variance. Args data: A numpy array of arrays containing input or target data. Returns A normalized numpy array of arrays. """ return (data - data.mean(axis=0)) / data.var(axis=0)
d09f7bc81c5f6c5e1c593836bd758a3553f243ca
10,809
def coord(x_coordinate = 0, y_coordinate = 0): """function to form a coordinate string from x and y integers""" return '(' + str(x_coordinate) + ',' + str(y_coordinate) + ')'
65bb18ffefdaaa13b30d5237b9061b053a9e8a9d
10,811
def score_filter(predictions, min_score): """Remove prediction bounding boxes with probability under a threshold Parameters ---------- predictions : dict all predictions min_score : int threshold score Returns ------- dict filtered predictions """ new_pred = {} new_pred['type'] = predictions['type'] new_pred['features'] = [] for feature in predictions['features']: if feature['properties']['score'] >= min_score: new_pred['features'].append(feature) return new_pred
28f8c0604f3dabc76ffbda911357d0bdd5bd5331
10,815
import struct def decode_option(packet, offset): """Decode a navdata option.""" id_nr, size = struct.unpack_from("HH", packet, offset) end_offset = offset + size data = packet[offset + struct.calcsize("HH"):end_offset] return id_nr, data, end_offset
c0ada56ba1f227a9db74b6d090496b3cfdbb1041
10,817
import math def getsteps(tspan, h): """ Given a timespan and a timestep h, return the number of steps. """ t1, t2 = tspan return int(math.floor((t2-t1)/h))
a10e0a4302352be6eafb9fad8e00005b9c79dd27
10,818
from typing import List import random import string def get_addresses(n=50) -> List[dict]: """ Generate random addresses """ def _get_address() -> dict: return { "name": "John Doe", "companyName": "Test Co", "streetAddress": "{} Test St".format(random.randint(10, 100)), "postCode": str((random.randrange(10**4, 10**5))), "city": "Test City", "state": "Test State", "country": "".join(random.choices(string.ascii_uppercase, k=2)), "phoneNumber": "+{}".format(random.randrange(10**9, 10**10)) } return [_get_address() for _ in range(n)]
e125abb419cfeb9e06c2c0930f5e3082baa01b72
10,823
import re def _prepare_date(from_date): """ Private function to prepare from_date by converting it to YYYY-MM-DD format. """ # check if from_date was provided and if it was provided in the right # format from_date_str = None if from_date is not None: if not isinstance(from_date, str): try: from_date_str = from_date.strftime('%Y-%m-%d') except AttributeError: raise ValueError( f"""from_date must be a string in the format YYYY-MM-DD or datetime. String provided: {from_date}" """ ) else: # regex template for YYYY-MM-DD pattern = re.compile("\\d{4}-\\d{2}-\\d{2}") match = pattern.match(from_date) if match is None: raise ValueError( f"""from_date must be a string in the format YYYY-MM-DD \ or datetime. String provided: {from_date}" """ ) from_date_str = from_date[0:10] return from_date_str
2766138027a1a2cc89e66370d792e2a317f6aa21
10,825
import copy import torch def get_loss(cfg_loss): """ Build the loss with the proper parameters and return it. Parameters ---------- cfg_loss : dict Dictionary containing the name of the loss to use and it's specific configs. Returns ------- loss_function : function The loss function. """ loss_args = copy.deepcopy(cfg_loss) # Import proper loss class if loss_args['name'] == 'BinaryFocalLoss': exec(f"from utils.loss import {loss_args['name']}") else: exec(f"from torch.nn import {loss_args['name']}") loss_class = eval(loss_args['name']) del loss_args['name'] # Convert to torch.tensor some argument that requires it. for arg_name in ['pos_weight', 'weight']: if arg_name in loss_args: loss_args[arg_name] = torch.tensor(loss_args[arg_name]) loss_function = loss_class(**loss_args) return loss_function
88bacc521eee93c2a8c8d73a829a57f5d02bfdbd
10,827
def split_nav_dataframe(nav_table, split_date): """ Split NAV pandas DataFrame into a training and a testing DataFrame according to a split_date, such that split_date becomes the last date of the training DataFrame. Args: - split_date (datetime.datetime) Returns: - train: the training DataFrame - test: the testing DataFrame """ assert split_date in nav_table.index.tolist() split_index = nav_table.index.tolist().index(split_date) train = nav_table.iloc[:split_index + 1] test = nav_table.iloc[split_index + 1:] return train, test
78c6ae901641b3508d2a9f9c790be6d576cf8458
10,829
def find_groups_in_cluster(clustervs, elementgroupList): """ A utility function to find vertices with the same cluster memberships. :param igraph.vertex clustervs: an igraph vertex instance :param list elementgroupList: a list containing the vertices to group :returns: a list-of-lists containing the groupings of the vertices """ clustervertex = set([v for v in clustervs]) return [vg for vg in elementgroupList if len(set(vg) & clustervertex) > 0]
7cc941d086d7be7c7395e21f6bde1cd4e5611851
10,831
def check_padding(query): """ Check for missing padding in base64 encoding and fill it up with "=". :param query: :return: query """ missing_padding = len(query) % 4 if missing_padding: query += "=" * (4 - missing_padding) return query
d8ad3c96074d311dbd5ba17bb93d7ca7a8b5ccab
10,841
def align(l, alignto=4): """Aligned length to nearest multiple of 4.""" return (l + alignto - 1) & ~(alignto - 1)
2bef7f1c3486c9e633138178d80216bf750ff2ed
10,845
def table_exists(db_conn, table_name): """ Checks if a table matching table_name exist in the database """ cur = db_conn.cursor() tn = (table_name,) cur.execute("select name from sqlite_master where type='table' and name=?", tn) result = cur.fetchone() if(result): return True else: return False
ffe60c445a03530910084d01a7a488e7229bda0b
10,847
def calulate_loss_of_life(List_V, t): """ For list of V values, calculate loss of life in hours t = Time Interval (min) """ L = 0 for V in List_V: L += (V * t) # Sum loss of life in minutes for each interval LoL = L / 60 # Calculate loss of life in hours return LoL
ee2499af737cca764aad0a2f13794a925a172b9e
10,849
def to_hex(value, bit_count): """Converts an integer to a hexadecimal values with bit_count bits.""" return hex((value + (1 << bit_count)) % (1 << bit_count))
e7eaf89f7b5b43e6814a3d7faa5b2ef26320ac7d
10,850
def assert_keys_in_dict(allowable_keys, d): """ Checks that all keys in d are in allowable keys Args: allowable_keys: Set or List of allowable keys d: Dict Returns: Boolean if satisfied, None if correct/key that is not in allowable keys """ for k in d: if k not in allowable_keys: return False, k return True, None
017fc447d22b755d8b8447f51f636e666ed72309
10,853
from pathlib import Path def read(fname: str) -> str: """Read file starts from root directory.""" with (Path(__file__).resolve().parent / fname).open() as f: return f.read()
340d814777f1f0ef6d5b97d430b3313db5e0a5ca
10,855
def _unique_names(item): """ Compute the unique key for the given (namespaceless) item within a single collection. """ return item.metadata.name
6da786ae1adae29a143b1e171a7206b25b8e9556
10,856
def normalize_title(title: str, body: str) -> str: """Normalize the title if it spills over into the PR's body.""" if not (title.endswith("…") and body.startswith("…")): return title else: return title[:-1] + body[1:].partition("\n")[0].rstrip("\r")
ec3d560855abcd85afe35839a4af025a2b365b45
10,860
def find_datacenter(response): """Grabs the X-Served-By header and pulls the last three characters as the datacenter Returns: string: the datacenter identification code """ xsb = response.headers['X-Served-By'] return xsb[len(xsb) - 3: len(xsb)]
2c2556e5e9ed2044ed810c90fc9f4b65bbd7650e
10,862
def wcGeneralSettingLookup(gSettings, sid): """ Lookup an ID in WooCommerce general settings. """ assert gSettings is not None assert isinstance(sid, str) for settings in gSettings: if "id" in settings and settings["id"] == sid: return settings return None
ab3ca8a7f8a15db8fa93eaabd3b0f377d31e3b0c
10,863
def homo_lumo_mix(C, nocc, beta): """ Mix a portion of LUMO to HOMO. Used when generating spin-unrestricted guess. """ if beta < 0. or beta > 1.: raise Exception("Mixing beta must be in [0, 1]") Cb = C.copy() homo = C[:, nocc - 1] lumo = C[:, nocc] Cb[:, nocc - 1] = (1. - beta) ** 0.5 * homo + beta ** 0.5 * lumo return Cb
081c9149ad2d5fe39a16796943fb4d2c53c7b3d7
10,866
import json def loadJson(jsonfile): """ Reads a .json file into a python dictionary. Requires json package. """ with open(jsonfile, "r") as data: dictname = json.loads(data.read()) return dictname
de059637ea0dc1b0ef729a5d2bbfa7c3e72bb5b1
10,869
def restrict_chains(data, k): """Restrict data to people with at least k rows. Parameters ---------- data : pandas.DataFrame The `data` from US to be subsetted. k : int The minimum number of measurements needed for a person to be kept. Returns ------- data : pandas.DataFrame The subsetted data of people with at least k entires. """ # How many entries does each person have. # Take Ids of anyone with at least k values. # Subset the main data frame to remove anyone with less than k values. id_counts = data["pidp"].value_counts() trajectories_ids = list(id_counts.loc[id_counts >= k].index) data = data.loc[data["pidp"].isin(trajectories_ids)] return data
106fc9c43d12085392a84a188dcc8d40a89ae817
10,871
def list_of_elem(elem, length): """return a list of given length of given elements""" return [elem for i in range(length)]
d72bdab16a541714b2a0a781a3077d40e309e9f7
10,872
def all_ones(vector): """ Return True/False if all vector's entries are/are not 1s. """ return all([e==1 for e in vector])
5bd1509c72945de83f3e84e956efae39bd32fee0
10,874
def file_is_text(file): """ Vérifie qu'un fichier est au format texte et non binaire :param file: Chemin vers le fichier :return: Vrai si le fichier est au format texte, faux s'il est au format binaire """ textchars = bytearray([7, 8, 9, 10, 12, 13, 27]) + bytearray(range(0x20, 0x100)) is_plaintext = lambda _bytes: not bool(_bytes.translate(None, textchars)) with open(file, "rb") as f: return is_plaintext(f.read(1024))
6a49486aa05e8627e7a0f6504e5c9b86c050df81
10,889
def number_normalization(value, fromvalue, tovalue): """数値を範囲内の値に正規化する。 value: 正規化対象の数値。 fromvalue: 範囲の最小値。 tovalue: 範囲の最大値+1。 """ if 0 == tovalue: return value if tovalue <= value or value < fromvalue: value -= (value // tovalue) * tovalue if value < fromvalue: value += tovalue return value
912c515991246204ebc4d5eae8ffedb1c6d5823b
10,891
def validate_overlap(periods, datetime_range=False): """ Receives a list with DateRange or DateTimeRange and returns True if periods overlap. This method considers that the end of each period is not inclusive: If a period ends in 15/5 and another starts in 15/5, they do not overlap. This is the default django-postgresql behaviour: https://docs.djangoproject.com/en/dev/ref/contrib/postgres/fields/#daterangefield """ periods.sort() no_overlap = [True] for each in range(0, len(periods) - 1): latest_start = max(periods[each].lower, periods[each + 1].lower) earliest_end = min(periods[each].upper, periods[each + 1].upper) delta = earliest_end - latest_start if datetime_range: no_overlap.append(max(0, delta.total_seconds()) == 0) else: no_overlap.append(max(0, delta.days) == 0) return False if all(no_overlap) else True
bae96eb890063e4d27af0914e7fcd1348d1340a7
10,898
def _GetStepLogViewUrl(build, full_step_name, log_name, partial_match=False): """Gets view url of the requested log. Args: build (buildbucket_proto.build_pb2.Build proto): Information about a build. full_step_name (str): Full name of the step. log_name (str): Type of the log. partial_match (bool): If the step_name is not found among the steps in the builder, allow the function to retrieve the step log for a step whose name contains step_name as a prefix. Returns: (str): view_url of the requested log. """ for step in build.steps or []: if step.name == full_step_name: for log in step.logs or []: if log.name.lower() == log_name: return log.view_url if partial_match: for step in build.steps or []: if step.name.startswith(full_step_name): for log in step.logs or []: if log.name.lower() == log_name: return log.view_url return None
7e240e0414c8d83620d701d348d3386cb5054226
10,903
import itertools def pairwise(iterable): """ Iterate pairwise through an iterable. pairwise([1,2,3,4]) -> (1,2),(2,3),(3,4) """ val, nextVal = itertools.tee(iterable) next(nextVal, None) return zip(val, nextVal)
495fedbaf2046d66bd791dc78dea8525761e01b1
10,905
def create_mating_pool(population, fitnesses, norm=True): """ Generate a mating pool This will create a new population proportional to the fitnesses of the original population. The pool will the be used as the basis for generating the next generation. Parameters ---------- population : list of MiniMaxAgent The current population fitnesses : list of int The fitness values for each member of the population norm : bool True will apply basic normilization to the fitness values before creating the pool Returns ------- list of MiniMaxAgent The mating pool with the frequency of each agent proportional to its fitness """ if norm: mx = max(fitnesses) fitnesses = [ int((f / mx) * 10.0) for f in fitnesses ] pool = [] for i, fitness in enumerate(fitnesses): for _ in range(fitness): pool.append(population[i]) return pool
1ea329c334ffa54527aacf8fba7a33b55c927eb1
10,907
def get_target_name(label): """ Try to extract the target_name from a given PDS label. Parameters ---------- label : Object Any type of pds object that can be indexed Returns ------- target_name : str The defined target_name from the label. If this is None a target name could not be pulled from the label """ try: target_name = label['TARGET_NAME'] except KeyError: return None return target_name
7aae704d4590fb44bfb650481a65af870c7c6570
10,910
from typing import List from typing import Any def flatten_list(x: List[Any]) -> List[Any]: """ Converts a list of lists into a flat list. Args: x: list of lists Returns: flat list As per http://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python """ # noqa return [item for sublist in x for item in sublist]
b600cce1dc88869c60c80019f3be9ea5245cdda7
10,916
def return_subset_number(pedigree): """ Find out how many founders there are in the given file. """ founder = 0 with open(pedigree, 'r') as filein: for lines in filein: line = lines.rstrip().split() if line[2] == "0" and line[3] == "0": founder += 1 return founder
090661dfbc367fe3974fe2dfa2069f97dd70ee75
10,921
def loop_until_choice_is_correct(msg_prompt: str, valid_choices: dict) -> str: """ Loop until a valid value is provided, which must match a key from `valid_choices`. Args: msg_prompt (str): Prompt to display to the user to enter a choice. valid_choices (dict, optional): Keys are valid choices. Returns: str: String corresponding to the value of the matching key in `valid_choice`. """ choice = None while True: input_string = input(f"{msg_prompt}: ") if input_string not in valid_choices: print("Please enter a valid choice.") else: choice = valid_choices[input_string] break return choice
bb05f1d11b14611ffcada93629382bfb8a447fad
10,923
def repunctuate_character(letters, punctuation): """Apply the recorded punctuation to a character. The letters must be an iterator of base characters.""" if punctuation == 'UPPER': return next(letters).upper() elif punctuation == 'LOWER': return next(letters).lower() else: return punctuation
92cd8b30466e19a8ce1278f0b81440399c7f809c
10,931
def split_set_by_point(coord_array, point): """ point_array : array of Coord point : Coord Return the array of the points on the left and the right of the point """ left = [] right = [] for coord in set(coord_array): if coord.x < point.x or coord.y < point.y: left.append(coord) else: right.append(coord) return left, right
6a7ee4387f2bc74fcf06262bc4262351c66b3a1c
10,937
import re def split_camel_cased(text): """ Split camelCased elements with a space. :param text: the text to be converted processed. :type: str :return: text with all camelCased elements split into different elements :type: str """ return re.sub('(?!^)([A-Z][a-z]+)', r' \1', text)
6de55ed7d8fc4bc06e0d16d4b327999ad656ceac
10,943
def get_dict_from_namespace_object(args): """Unwrap the namespace object to pull out a dict of values""" return vars(args)
e1c34c1faff71ae330a44d91a399260fbdc454c6
10,947
def get_tuple(string, n, as_int = False): """ Splits a string into n tuples, ignoring the rest if there are more, and replicating if there is only one @param string The string @param n The number of elements @param as_int If true, it will cast to int """ t = string.split(",") if len(t) == n: t = tuple(t) elif len(t) > n: t = tuple(t[:n - 1]) elif len(t) == 1: t = tuple(t * n) else: raise IndexError("Invalid number of values") if as_int: return tuple([int(i) for i in t]) else: return t
59b339c561bccc428f4e2882c369a7dcda86aaff
10,948
def _filter_out(message: str) -> bool: """Return True when message should be ignored. Args: message (str): message to analyze Returns: bool: True when message should be ignored, False otherwise """ for authorized_function in ("input", "print", "pprint"): if f"Found wrong function call: {authorized_function}" in message: return True return False
461c0f36aed22d80384202093bc1cfc40b88242c
10,952
import re def byteform_to_num(byte_format): """Converts a string expressing a size of a file in bytes into the corresponding number of bytes. Accepts commas and decimal points in nums. Allows 'b', 'mb', 'gb', and variants like 'bytes', but not 'tb', 'zb', etc. Note that format_bytes is a lossy function (it doesn't retain all sigfigs by default), so byteform_to_num(format_bytes(x)) does not always equal x. """ x = re.findall("([\d,\.]+)\s*([a-z]*)", byte_format, re.I)[0] num, suf = float(x[0].replace(",", "")), x[1].lower() if suf == "" or suf[0] == "b": return num elif suf[:2] == "kb": return num * 1e3 elif suf[:2] == "mb": return num * 1e6 elif suf[:2] == "gb": return num * 1e9 raise ValueError(f"byteform_to_num couldn't recognize quantifier '{suf}'")
300ede4ef120b9e3a8db85effcb6611dd9299953
10,953
def test_identifier(recipe): """Test recipe identifier for proper construction. Args: recipe: Recipe object. Returns: Tuple of Bool: Failure or success, and a string describing the test and result. """ name = recipe["Input"].get("NAME") if name: # The identifier may not have spaces. name = name.replace(" ", "") description = ("Recipe identifier follows convention. " "('com.github.novaksam.jss.%s')" % name) result = False identifier = recipe.get("Identifier") if identifier and name: if (str(identifier).startswith("com.github.novaksam.jss.") and str(identifier).rsplit(".", 1)[1].startswith(name)): result = True return (result, description)
800b7a9d07ef7094d32a17033da519c9edb5039b
10,955
import six def get_probs_for_labels(labels, prediction_results): """ Given ML Workbench prediction results, get probs of each label for each instance. The prediction results are like: [ {'predicted': 'daisy', 'probability': 0.8, 'predicted_2': 'rose', 'probability_2': 0.1}, {'predicted': 'sunflower', 'probability': 0.9, 'predicted_2': 'daisy', 'probability_2': 0.01}, ... ] Each instance is ordered by prob. But in some cases probs are needed for fixed order of labels. For example, given labels = ['daisy', 'rose', 'sunflower'], the results of above is expected to be: [ [0.8, 0.1, 0.0], [0.01, 0.0, 0.9], ... ] Note that the sum of each instance may not be always 1. If model's top_n is set to none-zero, and is less than number of labels, then prediction results may not contain probs for all labels. Args: labels: a list of labels specifying the order of the labels. prediction_results: a pandas DataFrame containing prediction results, usually returned by get_prediction_results() call. Returns: A list of list of probs for each class. """ probs = [] if 'probability' in prediction_results: # 'probability' exists so top-n is set to none zero, and results are like # "predicted, predicted_2,...,probability,probability_2,... for i, r in prediction_results.iterrows(): probs_one = [0.0] * len(labels) for k, v in six.iteritems(r): if v in labels and k.startswith('predicted'): if k == 'predict': prob_name = 'probability' else: prob_name = 'probability' + k[9:] probs_one[labels.index(v)] = r[prob_name] probs.append(probs_one) return probs else: # 'probability' does not exist, so top-n is set to zero. Results are like # "predicted, class_name1, class_name2,... for i, r in prediction_results.iterrows(): probs_one = [0.0] * len(labels) for k, v in six.iteritems(r): if k in labels: probs_one[labels.index(k)] = v probs.append(probs_one) return probs
f18580a5aba09df56fbb9b45b8ca5753eeba0d62
10,956
import math def convert_local_to_global_vector(v: list, yaw: float): """ Converts the given vector in vehicle coordinate system to the global one under the given vehicle yaw. """ vx = math.cos(math.radians(yaw)) * v[0] - math.sin(math.radians(yaw)) * v[1] vy = math.sin(math.radians(yaw)) * v[0] + math.cos(math.radians(yaw)) * v[1] return [vx, vy]
62f96a22c85f22125165e387bfeea76d78e5c519
10,958
def humanReadable(size_in_bytes): """Returns sizes in human-readable units.""" try: size_in_bytes = int(size_in_bytes) except ValueError: size_in_bytes = 0 units = [(" KB", 10**6), (" MB", 10**9), (" GB", 10**12), (" TB", 10**15)] for suffix, limit in units: if size_in_bytes > limit: continue else: return str(round(size_in_bytes/float(limit/2**10), 1)) + suffix
29dd9e46d535943e83cd65c34f4135af898f0bbc
10,961
def _agent_has_email_address(agent_obj): """Check if agent has email. Arguments: agent_obj (list/dict): The specified field from the research dataset. If publisher then dict else list Returns: bool: True if has emails, False if not. """ if agent_obj: if isinstance(agent_obj, list) and len(agent_obj) > 0: return 'email' in agent_obj[0] elif isinstance(agent_obj, dict): return 'email' in agent_obj return False
a662a9a874e607d90db644c93515632606ae83fc
10,966
def list_acquisition_dates(dc, product): """Get a list of acquisition dates for a given product""" dataset = dc.load(product, dask_chunks={}) if not dataset: return [] return dataset.time.values.astype('M8[ms]').tolist()
97012b354d835ad94e7037f9c68fcfa72d143930
10,967
import torch def coord_map(shape,device, start=-1, end=1): """ Gives, a 2d shape tuple, returns two mxn coordinate maps, Ranging min-max in the x and y directions, respectively. """ m, n = shape x_coord_row = torch.linspace(start, end, steps=n).to(device) y_coord_row = torch.linspace(start, end, steps=m).to(device) x_coords = x_coord_row.unsqueeze(0).expand(torch.Size((m, n))).unsqueeze(0) y_coords = y_coord_row.unsqueeze(1).expand(torch.Size((m, n))).unsqueeze(0) return torch.cat([x_coords, y_coords], 0)
056dddd9442cef58dca7cc6cc30fd6b8cca9bc85
10,968
def upsample_kernel_size_solver( in_size, out_size, stride=1, padding=0, output_padding=0, dilation=1, ): """ Returns kernel size needed to upsample a tensor of some input size to a desired output size. The implementation solves for kernel size in the equation described the the "Shape" section of the pytorch docs: https://pytorch.org/docs/stable/generated/torch.nn.ConvTranspose1d.html """ x = out_size - 1 - output_padding - (in_size - 1) * stride + 2 * padding x = int(x / dilation + 1) return (x, )
58bcf58c3781b195801e1239b9fa688d19c89288
10,971
def get_key_from_dict (dictionary, key, group, required=True): """ Grab a value from dictionary :param dictionary: Dictionary to utilize :param key: Key to fetch :param group: Group this key belongs to, used for error reporting purposes :param required: Boolean indicating whether this key is necessary for a valid manifest :return Value if found """ if key not in dictionary: if required: raise KeyError ("Failed to generate manifest: {0} missing {1}".format (group, key)) else: return None else: return dictionary[key]
81245795296c2fe6d21bb84f0cccfa197a966b24
10,972