content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def extract_title_from_text(text: str) -> str: """Extract and return the title line from a text written in Markdown. Returns the first line of the original text, minus any header markup ('#') at the start of the line. """ firstline = text.split('\n', 1)[0] return firstline.lstrip('# ')
c51c7dd517b7d50a50df472d055618a092bb3518
9,131
def _all_pairs(i, contextsize, arrlen): """ i: index in the array contextsize: size of the context around i arrlen: length of the array Returns iterator for index-tuples near i in a list of size s with context size @contextsize. Context of k around i-th index means all substrings/subarrays of a[i-N:i+N+1] containing a[i]. """ return [ (l,r) # start anywhere between i - contextsize to i for l in range(max(i-contextsize, 0), i+1) # end anywhere between i to i + contextsize for r in range(i+1, min(i + 1 + contextsize, arrlen)) ]
7234e7b092e60c74d4f1c0af44a469c25cc34dc9
9,132
def bufferParser(readbuffer, burst=16): """ Parse concatenated frames from a burst """ out = b'' offset = 1 while len(readbuffer) > 0: length = readbuffer[2] if readbuffer[4] == offset: out += readbuffer[5:3+length] offset += 1 readbuffer = readbuffer[4+length:] return out
a3a7eb312f9e9c0e9a2183960074ebd1e9925025
9,136
def sec_title(default_str: str) -> str: """Reads in a section title""" name = input('What would you like to title this section? ' + '(default is ' + default_str + ')\n') if name: return name return default_str
3dfc0ddcdc9cb9beb22b02892959334516b2a90b
9,144
def e_timeToString(dateString): """ input: string output: string description: format dateString to yyyymmddHHMM example: Wed Aug 29 07:23:03 CST 2018 ->> 201808290723 """ # define month list for get digital month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] year_string = dateString[-4:] month_string = str(month.index(dateString[4:7]) + 1) if len(month_string) == 1: month_string = "0" + month_string day_string = dateString[8:10] # time format HHMM time_string = dateString[11:16].replace(":", "") return year_string + month_string + day_string + time_string
1a0c3f014bbd95a9da0eb767e1ce219cb0c70195
9,145
def tachycardic_detector(patient_age, patient_heart_rate): """ Determines if patient is tachycardic based on their age and heart rate Args: patient_age: integer extracted from patient database entry patient_heart_rate: integer posted to patient database entry Returns: tachycardic_status: string containing either "tachycardic" or "not tachycardic" """ if patient_age < 3 and patient_heart_rate > 151: tachycardic_status = "tachycardic" elif patient_age < 5 and patient_heart_rate > 137: tachycardic_status = "tachycardic" elif patient_age < 8 and patient_heart_rate > 133: tachycardic_status = "tachycardic" elif patient_age < 12 and patient_heart_rate > 130: tachycardic_status = "tachycardic" elif patient_age <= 15 and patient_heart_rate > 119: tachycardic_status = "tachycardic" elif patient_heart_rate > 100: tachycardic_status = "tachycardic" else: tachycardic_status = "not tachycardic" # logging.info("Tachycardic status calculated: " + tachycardic_status) return tachycardic_status pass
595bf87d913cd94b9f4aa089a3f1cf32f342ccbf
9,146
def islist(item): """Check if an is item is a list - not just a sequence. Args: item (mixed): The item to check as a list. Returns: result (bool): True if the item is a list, False if not. """ return isinstance(item, list)
02c4157e1867e7b113e9695f2fa8fd4aaccc043d
9,156
from typing import Dict from typing import Any import logging def build_log_config(level: str) -> Dict[str, Any]: """Build a log config from a level.""" return { "version": 1, "disable_existing_loggers": False, "formatters": { "basic": {"format": "%(asctime)s %(name)s %(levelname)s %(message)s"} }, "handlers": { "stream_handler": { "class": "logging.StreamHandler", "formatter": "basic", "level": getattr(logging, level), }, }, "loggers": { "": { "handlers": ["stream_handler"], "level": getattr(logging, level), }, }, }
e20a419ee6c69f6fa0eefbd51e5542349b1a1e8b
9,163
from typing import Iterable from typing import Optional def effective_sample_size( weights: Iterable[float], total_weight: Optional[float] = None, ) -> float: """Computes the "effective sample size" of the given weights This value represents how "healthy" the underlying samples are. The lower this value, the fewer "real samples" are represented. As in, the closer this comes to zero, the more degenerated the samples are. See `https://en.wikipedia.org/wiki/Effective_sample_size` :param weights: the weights of the samples :param total_weight: total weight of all samples, requires extra computation if not given :return: the effective sample size of ``weights`` """ if total_weight is None: total_weight = sum(weights) assert total_weight and total_weight > 0 # for mypy return pow(total_weight, 2) / sum(pow(w, 2) for w in weights)
6915abd0484dc4b08b47c1c88b6e19e2af5dd1c4
9,168
def split_pair_occurrence(input_str): """ Q9HD36.A79T (x11) → (Q9HD36.A79T, 11) """ if '(x' not in input_str: return input_str, 1 pair, occurrence = [item.strip() for item in input_str.split()] occurrence = int(occurrence[2:-1]) return pair, occurrence
0812e907a97894ff6f2d94722874b3917ce30ad8
9,169
def find_joins(df, ids, downstream_col="downstream", upstream_col="upstream", expand=0): """Find the joins for a given segment id in a joins table. Parameters ---------- df : DataFrame data frame containing the joins ids : list-like ids to lookup in upstream or downstream columns downstream_col : str, optional (default "downstream") name of downstream column upstream_col : str, optional (default "upstream") name of upstream column expand : positive int, optional (default 0) If > 0, will expand the search to "expand" degrees from the original ids. E.g., if expand is 2, this will return all nonzero joins that are within 2 joins of the original set of ids. Returns ------- Joins that have the id as an upstream or downstream. """ out = df.loc[(df[upstream_col].isin(ids)) | (df[downstream_col].isin(ids))] # find all upstream / downstream joins of ids returned at each iteration for i in range(expand): next_ids = (set(out[upstream_col]) | set(out[downstream_col])) - {0} out = df.loc[ (df[upstream_col].isin(next_ids)) | (df[downstream_col].isin(next_ids)) ] return out
39f16985ddd8e79338e520e56ba6ee793558d03f
9,170
from datetime import datetime def now_str(time=False): """Return string to be used as time-stamp.""" now = datetime.now() return now.strftime(f"%Y-%m-%d{('_%H:%M:%S' if time else '')}")
02b73bda5f27e7c25120d50d50244bd103661c90
9,174
def from_file(path: str) -> set: """ Read conditions from a file. Each line contains a separate condition. :param path: Path to file. :return: Read conditions. """ conditions = set() with open(path) as f: for line in f: conditions.add(line) return conditions
3780d540d6f300fe0a97d354ed33fa0aab803d56
9,175
import json def parse_line(header, line): """Parse one line of data from the message file. Each line is expected to contain chunk key - comma - tile key (CSV style). Args: header (dict): Data to join with contents of line to construct a full message. line (string): Contents of the line. Returns: (string): JSON encoded data ready for enqueuing. Raises: (RuntimeError): if less than 2 columns found on a line. """ msg = {} msg['job_id'] = header['job_id'] msg['upload_queue_arn'] = header['upload_queue_url'] msg['ingest_queue_arn'] = header['ingest_queue_url'] tokens = line.split(',') if len(tokens) < 2: raise RuntimeError('Bad message line encountered.') msg['chunk_key'] = tokens[0].strip() msg['tile_key'] = tokens[1].strip() return json.dumps(msg)
452dd80f84a35f6e3532330155bade7f424c102a
9,179
import struct def byte_to_float(b1, b2, b3, b4): """ A function to get a 32 bit float from 4 bytes read in order [b1, b2, b3, b4] :param b1: first byte :param b2: second byte :param b3: third byte :param b4: fourth byte :return: the byte array from b1, b2, b3, b4 unpacked as a float using the 'struct' module """ arr = bytearray([b1, b2, b3, b4]) return struct.unpack('<f', arr)
962480d1b9d2c50e3196b5480e9c62bf696a8f0d
9,184
import re def get_used_by_from_comments(lines: "list[str]") -> "tuple[int, list[str]]": """Read the module-used-by block comment from a module file. Args: lines (list[str]): The content of the module file as a list of strings. Returns: tuple[int, list[str]]: The integer indicates the last line number of the module-used-by block comment. The list indicates the assembly file names from the block comment. """ line_count = len(lines) line_no = 0 matches = [] while line_no < line_count: line = lines[line_no].strip() match = re.match(r"^//\s*\*?(.+\.adoc)", line) if match: matches.append(match.group(1).strip()) if not re.match((r"^//"), line): break line_no += 1 return line_no, matches
6ac30266524373d0de7cf7bb9ad9fd8dcd1933a2
9,185
from pathlib import Path def construct_target_path(participant_name, model_name, roi): """Construct path to save results to.""" project_root = Path(__file__).parents[1] return project_root / "results" / participant_name / f"model_{model_name}"\ / f"roi_{roi}"
072681647a3362563829c25d4890aa13425cff2c
9,186
import codecs def txidFromBroadcast (hexStr): """Extracts the hex txid from a broadcast in hex.""" # The prevout txid is the first part of the broadcast data # in serialised form. But we need to reverse the bytes. hexRev = hexStr[:64] bytesRev = codecs.decode (hexRev, "hex") return bytesRev[::-1].hex ()
96690f4fdef5f0cff857188045696e427914b887
9,187
def filter_properties(person, PERSON_PROPERTIES): """ Extract specific properties of the given person into a new dictionary. Parameters: person (dict): the dictionary containing properties of a person. PERSON_PROPERTIES (tupl): a tuple containing the characteristics of a person Returns: record (dict): a dictionary containing filtered key-value pairs of characteristics of the person. """ record = {} for key, val in person.items(): if key in PERSON_PROPERTIES: record[key] = val return record
2a3ec4ab32c5d99d475ebffaefe0d8c40ce137af
9,190
def get_chain_hash(contract, s, u_i, s_i, a, b, bytes_30, dyn_bytes, bar_uint, arr) -> bytes: """Uses the contract to create and hash a Foo struct with the given parameters.""" result = contract.functions.hashFooStructFromParams(s, u_i, s_i, a, b, bytes_30, dyn_bytes, bar_uint, arr).call() return result
2faeb03eff5ee1a4e564a50f8bff78fb99cdd169
9,191
import torch def load_snapshot(model_path): """ Load snapshot :param model_path: path to snapshot :type model_path: str :return: built state :rtype: dict """ state = torch.load(model_path) return state
bdeba078302b8c8c6ac39f156877ef58e91341ec
9,198
def create_input_list(pdb_list_fname): """ create a list of tuples (pdb_id, chain) from a text file """ pdb_list = [] with open(pdb_list_fname, 'r') as f: for record in f.read().splitlines(): pdb_id, chain = record[:-1], record[-1] # check PDB ID and chain are valid if not pdb_id.isalnum() or len(pdb_id) != 4 or not chain.isalpha() or len(chain) != 1: continue pdb_list.append((pdb_id, chain)) return pdb_list
d02588ec1d2ff55454782b337ac15cf9e6f67a80
9,200
def dict_to_casl_rules(rules: dict): """ Given a dict where the keys are the subject and the values are the actions, return a list of dicts ready to be serialized as JSON :return: """ perms = [] for key, actions in rules.items(): perms.append({ 'subject': key, 'actions': actions }) return perms
5d0f3dfd610a1cd7deb7f09a668e291997419b2a
9,202
def _pathjoin(a, b): """ POSIX-like path join for Globus Transfer paths As with _normpath above, this is meant to behave correctly even on Windows systems """ if not b: # given "" as a file path return a elif b.startswith("/"): # a path starting with / is absolute return b if a.endswith("/"): return a + b else: return a + "/" + b
20079d97be4e07499a9b0dfa80458a7e151826c3
9,205
def rob(nums): """ You are a professional robber planning to rob houses along a street. Each house has a certain amount of money stashed, the only constraint stopping you from robbing each of them is that adjacent houses have security system connected and it will automatically contact the police if two adjacent houses were broken into on the same night. Given a list of non-negative integers representing the amount of money of each house, determine the maximum amount of money you can rob tonight without alerting the police. Args: nums: list[int] Returns: int """ # DP r = nr = 0 for x in nums: r_prev = r r = nr + x nr = max(r_prev, nr) return max(r, nr) # f(0): r = nums[0]; nr = 0 # f(1): r = nums[1]; nr = f(0) # f(k) = max( f(k-2) + nums[k], f(k-1) )
9bfb631b2781bbf95fa299a6474e0b1fe36ac19b
9,209
def linear_scale(input, in_low, in_high, out_low, out_high): """ (number, number, number, number, number) -> float Linear scaling. Scales old_value in the range (in_high - in-low) to a value in the range (out_high - out_low). Returns the result. >>> linear_scale(0.5, 0.0, 1.0, 0, 127) 63.5 """ in_range = (in_high - in_low) out_range = (out_high - out_low) result = (((input - in_low) * out_range) / in_range) + out_low return result
caeab8e992caca2dba96f48b0eb617fd361bb9eb
9,218
def _error_matches_criteria(error, criteria): """ Check if an error matches a set of criteria. Args: error: The error to check. criteria: A list of key value pairs to check for in the error. Returns: A boolean indicating if the provided error matches the given criteria. """ for key, value in criteria: if error.get(key) != value: return False return True
8f52f7288fdefa496084b4faf689ed269360050a
9,220
import re def pad_punctuation_w_space(text: str) -> str: """Pad punctuation marks with space for separate tokenization.""" result = re.sub(r'([:;"*.,!?()/\=-])', r" \1 ", text) result = re.sub(r"[^a-zA-Z]", " ", result) result = re.sub(r"\s{2,}", " ", result) # code for removing single characters result = re.sub(r"\b[a-zA-Z]\b", "", result) return result
8bdb82865d5e127e32d483f83246f4ad1b96b0be
9,222
def skippable_exons(exons): """ Determine which exon(s) can be skipped For each exon (except the first and second, which cannot be skipped), we want to find the minimum number of exons which together have a size that can be divided by 3. >>> list(skippable_exons([30])) [] >>> list(skippable_exons([30,30])) [] >>> list(skippable_exons([30,30,30])) [[1]] >>> list(skippable_exons([30,30,30,30])) [[1], [2]] >>> list(skippable_exons([30,31,32,30])) [[1, 2]] >>> list(skippable_exons([30,32,32,30])) [] """ # If there are less than 3 exons, there is nothing to skip if len(exons) < 3: return [] # We check every exon that isn't the first or the last for i in range(1,len(exons)): # Test every sub-sequence of exons, starting from the current exon for j in range(i+1, len(exons)): # Determine the total lenght of the exons we are considering total_length = sum(exons[i:j]) if total_length%3 == 0: yield list(range(i,j)) # Once we found the minimum number of exons to skip to stay in # frame (can be 1), we are not interested in skipping more break
f96ec0da6d72191d252cfe0ba5cdbeb21bc4388c
9,224
from typing import Callable from typing import Any def not_pf(predicate: Callable[[Any], bool]): """ Negates the predicate * **predicate**: predicate to be tested * **return**: a predicate that is the negation of the passed predicate >>> p = not_pf(true_p) >>> p(1) False >>> p = not_pf(false_p) >>> p(1) True """ def internal(elm): return not predicate(elm) return internal
50d3993c4a83e5794a63134b65c732d1aa0ca1fa
9,225
def remove_last_range(some_list): """ Returns a given list with its last range removed. list -> list """ return some_list[:-1]
ea2063c901d3aaf67caad97f1760f6fb6afb31c1
9,228
def speed_control(target, current, Kp=1.0): """ Proportional control for the speed. :param target: target speed (m/s) :param current: current speed (m/s) :param Kp: speed proportional gain :return: controller output (m/ss) """ return Kp * (target - current)
ce01369dc9445f65249a82cfb7882223ded38f36
9,235
def is_parsed_result_successful(parsed_result): """Returns True if a parsed result is successful""" return parsed_result['ResponseMetadata']['HTTPStatusCode'] < 300
717f8aa88b814405a5a008e9706338fd0f91a7ff
9,239
def sub_field(k, v): """Return a nested dictionary with field keys k and value v.""" res = {} field_d = res fields = k.split('.') for f in fields[:-1]: field_d[f] = {} field_d = field_d[f] field_d[fields[-1]] = v return res
193869fdfaca84172c71ca935f5fdb312682b19e
9,247
import torch def get_prediction(model, batch, device): """Get predicted labels for given input batch and model.""" images = torch.tensor(batch, dtype=torch.float).to(device) outputs = model(images) _, predicted = torch.max(outputs.data, 1) return predicted
e8bb4257dc19f26fa206e26fa844ec9717974e52
9,250
def list_split(l, indices): """Split list at given indices. Closed lists have the same first and last elements. If the list is closed, splitting wraps around if the first or last index is not in the indices to split. Parameters ---------- l : list A list. indices : list A list of indices to split. Returns ------- split_lists : list Nest lists from splitting the list at the given indices. """ n = len(l) if l[0] == l[-1]: closed = True if n - 1 in indices: indices.remove(n - 1) if 0 not in indices: indices.append(0) else: closed = False indices = list(sorted(set(indices))) split_lists = [] current_list = [] for index, item in enumerate(l): current_list.append(item) if (index in indices and index != 0) or index == n - 1: split_lists.append(current_list) current_list = [item] if closed: if 0 not in indices: start = split_lists.pop(0)[1:] split_lists[-1] += start return split_lists
a882842f6d51eeda010017dbdd2bfa722ebb363d
9,252
def no_op(ctx, node, name, args): """Skip node.""" return None
1fede015a843657f3959bb8da4c2216a8674e60c
9,253
import warnings import functools def deprecated(func): """ Decorator to be used to mark functions as deprecated. It will result in a warning being emitted when the function is used. Usage:: @other_decorators_must_be_upper @deprecated def some_old_function(x,y): return x + y class SomeClass: @deprecated def some_old_method(self, x,y): return x + y """ @functools.wraps(func) def new_func(*args, **kwargs): warnings.warn_explicit("Call to deprecated function %(funcname)s." % { 'funcname': func.__name__, }, category=DeprecationWarning, filename=func.func_code.co_filename, lineno=func.func_code.co_firstlineno + 1 ) return func(*args, **kwargs) return new_func
ef4ca24b5da4a4df2b3c2a11f2e6b71791233a85
9,255
import math def divisors(n: int) -> list[int]: """Get the proper divisors of a number n""" limit = int(math.sqrt(n)) + 1 proper_divisors = {1} for i in range(2, limit): if n % i == 0: proper_divisors.add(n // i) proper_divisors.add(i) return list(proper_divisors)
0a71ecccbda802d3a3575f024073fac575355ffa
9,260
def foreign_key( table_name: str, schema: str, parent_name: str, parent_schema: str ) -> str: """Return column names (child and parent) of the foreign key.""" return f""" SELECT att2.attname as child_column, att.attname as parent_column FROM (SELECT unnest(con1.conkey) AS parent, unnest(con1.confkey) AS child, con1.confrelid, con1.conrelid, con1.conname, ns2.nspname FROM pg_class cl JOIN pg_namespace ns ON cl.relnamespace = ns.oid JOIN pg_constraint con1 ON con1.conrelid = cl.oid JOIN pg_class cl2 on cl2.oid = con1.confrelid JOIN pg_namespace ns2 on ns2.oid = cl2.relnamespace WHERE cl.relname = '{table_name}' AND ns.nspname = '{schema}' AND cl2.relname = '{parent_name}' AND ns2.nspname = '{parent_schema}' AND con1.contype = 'f' ) con JOIN pg_attribute att ON att.attrelid = con.confrelid AND att.attnum = con.child JOIN pg_class cl ON cl.oid = con.confrelid JOIN pg_attribute att2 ON att2.attrelid = con.conrelid AND att2.attnum = con.parent """
e1c7221fd308ee44f7b09718e66028351262334a
9,262
def lighten(color, scale=1.0): """ Lighten a color. - color is a tuple (r, g, b, a) - scale can be any number, if < 1, color will be darken """ return tuple(map( lambda x: int(min(max(x * scale, 0), 255)), color[:3] )) + color[3:]
4c520c00ca3509b3e09090b7d72790db2a80f63c
9,279
def AB2Jy(ABmag): """Convert AB magnitudes to Jansky""" return 10.**(-0.4*(ABmag+48.60))/1e-23
a55b70df44f56461d935c8e5aa8aff50df26a982
9,280
def all_valid(formsets): """Validate every formset and return True if all are valid.""" # List comprehension ensures is_valid() is called for all formsets. return all([formset.is_valid() for formset in formsets])
3cffd9879143e4879794e86bbb65e49f4f2fd975
9,281
def horizontal_link_count(shape): """Number of horizontal links.""" assert len(shape) == 2 return shape[0] * (shape[1] - 1)
e4d997cd668a75410e3fb208e7a200cbba3fb6bf
9,282
def m(x0: float, x1: float, y0: float, y1: float) -> float: """ Simple gradient function. Parameters ---------- x0 : float x co-ordinate at time 0. x1 : float x co-ordinate at time 1. y0 : float y co-ordinate at time 0. y1 : float y co-ordinate at time 1. Returns ------- grad : float Gradient value. Notes ----- """ grad = (y1 - y0) / (x1 - x0) return grad
d138dfedd1e381a575ff6f5108b8841470febbd7
9,284
def _get_disposable_app_filename(clientInfo): """ Get name of file used to store creds. """ return clientInfo.get('file', clientInfo['name'] + '.client_data.json')
7d6a67443cd8815ddfde3f69aae450d59f59a437
9,291
def _timestamp_from_record_tuple(record): """Extract timestamp from HBase tuple record """ return record[0]['timestamp']
de0ff6f12e14093a236cab651d4baae2299d2124
9,292
def computeIoUs(preds, truths): """ Compute intersection over union for the predicted masks vs ground-truth masks. @preds and @truths must have the same length and both are iterables of numpy matrices of same dimensions. """ # List to collect IoU for each pair IoUs = [] # Iterate over the collections and compute IoUs for predicted, truth in zip(preds, truths): intersection = predicted * truth union = predicted + truth # Re-adjust union back to [0, 1] scale and return the result. union[union == 2] = 1 IoUs.append(float(sum(intersection.flat)) / (sum(union.flat) or 1)) return IoUs
208606710c07878bccf8cae0f3b95ce65cb4180a
9,300
def dict_to_cvode_stats_file(file_dict: dict, log_path: str) -> bool: """ Turns a dictionary into a delphin cvode stats file. :param file_dict: Dictionary holding the information for the cvode stats file :param log_path: Path to were the cvode stats file should be written :return: True """ file_obj = open(log_path + '/integrator_cvode_stats.tsv', 'w') file_obj.write(' Time [s]\t Steps\t RhsEvals\t LinSetups\t NIters\t NConvFails\t NErrFails\t' ' Order\t StepSize [s]\n') for line_index in range(0, len(file_dict['time'])): time_string = ' ' * (25 - len(str("{:.10f}".format(file_dict['time'][line_index])))) + \ str("{:.10f}".format(file_dict['time'][line_index])) steps_string = ' ' * (10 - len(str(file_dict['steps'][line_index]))) + \ str(file_dict['steps'][line_index]) rhs_string = ' ' * (10 - len(str(file_dict['rhs_evaluations'][line_index]))) + \ str(file_dict['rhs_evaluations'][line_index]) lin_string = ' ' * (10 - len(str(file_dict['lin_setups'][line_index]))) + \ str(file_dict['lin_setups'][line_index]) iterations_string = ' ' * (8 - len(str(file_dict['number_iterations'][line_index]))) + \ str(file_dict['number_iterations'][line_index]) conversion_fails_string = ' ' * (11 - len(str(file_dict['number_conversion_fails'][line_index]))) + \ str(file_dict['number_conversion_fails'][line_index]) error_fails_string = ' ' * (11 - len(str(file_dict['number_error_fails'][line_index]))) + \ str(file_dict['number_error_fails'][line_index]) order_string = ' ' * (6 - len(str(file_dict['order'][line_index]))) + \ str(file_dict['order'][line_index]) step_size_string = ' ' * (14 - len(str("{:.6f}".format(file_dict['step_size'][line_index])))) + \ str("{:.6f}".format(file_dict['step_size'][line_index])) file_obj.write(time_string + '\t' + steps_string + '\t' + rhs_string + '\t' + lin_string + '\t' + iterations_string + '\t' + conversion_fails_string + '\t' + error_fails_string + '\t' + order_string + '\t' + step_size_string + '\n') file_obj.close() return True
4b6d92ad610c47eed5b2e593980a74f617ed44f4
9,310
import re def isNumber(test): """ Test if the string is a valid number Return the converted number or None if string is not a number. """ try: test = str(test) if re.search('\.',test): try: return float(test) except: return None else: try: return int(test) except: return None except: return None
93f3afd1c3e8cefc64b1ff738e3f8336a1b8ffd6
9,311
def has_func(obj, fun): """check if a class has specified function: https://stackoverflow.com/a/5268474 Args: obj: the class to check fun: specified function to check Returns: A bool to indicate if obj has funtion "fun" """ check_fun = getattr(obj, fun, None) return callable(check_fun)
3284c1a30c3b74c93c1c34c102632beb99bf5576
9,318
import math def distance(p0, p1): """calculate distance between two joint/3D-tuple in the XZ plane (2D)""" return math.sqrt((p0[0] - p1[0])**2 + (p0[2] - p1[2])**2)
02e1a1488c32f465f2a1817adb8dfbdb4ea26431
9,322
def unique_chunks(lst, n): """ Returns unique chunks of length n from lst. """ if n < 1: return set() return {tuple(lst[i:i + n]) for i in range(0, len(lst), n)}
e06758a4cb13e42394560e3fe2b4889a8e321af9
9,327
from pathlib import Path def fixture_vcf_dir(fixtures_dir: Path) -> Path: """Return the path to the vcf fixtures directory""" return fixtures_dir / "vcfs"
7a77d40a34fc05b7acb20cc60c0e7343ffd4bfa8
9,329
def get_game_range_row_names(game_begin, game_end): """Get the row range containing the given games. Sample row name: g_0000000001_m001 To capture the range of all moves in the two given games, the end row will need to go up to g_00..(N+1). Args: game_begin: an integer of the beginning game number. game_end: an integer of the ending game number, inclusive. Returns: The two string row numbers to pass to Bigtable as the row range. """ row_fmt = 'g_{:0>10}_' return row_fmt.format(game_begin), row_fmt.format(game_end + 1)
700740131dbd497af8b80832a7ad11960ccc710f
9,332
import math def score_word_count(count: int) -> float: """Score word frequency as log of count with min of 1.0""" return max(1.0, math.log(count))
e49febcac36653a3a188c0ec3edd9cca0c18b81a
9,333
import math def choose_team_levels(num_teams, hackathon_level): """ Calculates the average experience level per team and distributes any remaining difference among some of the teams evenly Returns a list of team_levels """ avg_team_level = math.floor(hackathon_level / num_teams) team_levels = [] remainder = hackathon_level % num_teams remainder_per_team = math.ceil(remainder / num_teams) if remainder > 0: while remainder > 0: team_levels.append(avg_team_level + remainder_per_team) remainder -= remainder_per_team num_team_with_avg_level = num_teams - len(team_levels) teams_at_normal_level = [avg_team_level for team in range(num_team_with_avg_level)] return team_levels + teams_at_normal_level
aaf372f00969da62b966a2a09aff64a188fbce82
9,336
def infer_time_unit(time_seconds_arr): """ Determine the most appropriate time unit for an array of time durations specified in seconds. e.g. 5400 seconds => 'minutes', 36000 seconds => 'hours' """ if len(time_seconds_arr) == 0: return 'hours' max_time_seconds = max(time_seconds_arr) if max_time_seconds <= 60*2: return 'seconds' elif max_time_seconds <= 60*60*2: return 'minutes' elif max_time_seconds <= 24*60*60*2: return 'hours' else: return 'days'
11f25a712d8d66e8546fea2f7e36309dcebbcc74
9,338
def flip_y(im): """mirrors an image over the x axis.""" source_pix = im.load() im = im.copy() dest_pix = im.load() width, height = im.size for i in range(width): for j in range(height): dest_pix[i,j] = source_pix[i, height-j-1] return im
9ad00b2de3e628cc6dd441884103b9d2e3492333
9,343
import torch def lower_matrix_to_vector(lower: torch.Tensor) -> torch.Tensor: """Convert a lower triangular matrix to a vector. Parameters ---------- lower : torch.Tensor lower Returns ------- torch.Tensor """ shape = lower.shape assert shape[-1] == shape[-2] lower_idx = torch.tril_indices(shape[-1], shape[-1]) lower_flat = lower[..., lower_idx[0], lower_idx[1]] return lower_flat
e4fe825caf5926ce3219c4dd7720d1b7f180b998
9,344
def map_coords_to_scaled_float(coords, orig_size, new_size): """ maps coordinates relative to the original 3-D image to coordinates corresponding to the re-scaled 3-D image, given the coordinates and the shapes of the original and "new" scaled images. Returns a floating-point coordinate center where the pixel at array coordinates (0,0) has its center at (0.5, 0.5). Take the floor of the return value from this function to get back to indices. """ if not all( isinstance(arg, (tuple, list, set)) for arg in (coords, orig_size, new_size) ): raise TypeError( "`coords`, `orig_size` and `new_size` must be tuples corresponding to the image shape." ) if not all(len(arg) == len(coords) for arg in (orig_size, new_size)): raise TypeError( "Number of dimensions in `coords` ({}), `orig_size` ({}), and `new_size` ({}) did not match.".format( len(coords), len(orig_size), len(new_size) ) ) ratio = lambda dim: float(new_size[dim]) / orig_size[dim] center = lambda s, dim: s[dim] / 2.0 offset = lambda dim: (coords[dim] + 0.5) - center(orig_size, dim) new_index = lambda dim: (center(new_size, dim) + ratio(dim) * offset(dim)) return tuple([new_index(dim) for dim in range(len(orig_size))])
f5e1e1523366a9e1e37f9d1a304d9deea8d53e00
9,346
def _structure_summary(structure): """ Extract messages from the structure. Args: structure: a Pymatgen Structure object Returns: dict of the following messages: nsites (int): number of sites in the structure. is_ordered (bool): whether the structure is ordered or not. ...to be continued """ return {"n_sites": len(structure.sites), "is_ordered": structure.is_ordered}
65fe88a01d53df7ab487ae1d1ab24a4c2c746477
9,347
import torch def multiclass_nms(multi_bboxes, multi_scores, score_thr, nms_cfg, max_num=-1, score_factors=None): """NMS for multi-class bboxes. Args: multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) multi_scores (Tensor): shape (n, #class), where the 0th column contains scores of the background class, but this will be ignored. score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. nms_thr (float): NMS IoU threshold max_num (int): if there are more than max_num bboxes after NMS, only top max_num will be kept. score_factors (Tensor): The factors multiplied to scores before applying NMS Returns: tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels are 0-based. """ num_classes = multi_scores.shape[1] bboxes, labels = [], [] nms_cfg_ = nms_cfg.copy() nms_type = nms_cfg_.pop('type', 'nms') nms_op = nms_type for i in range(1, num_classes): cls_inds = multi_scores[:, i] > score_thr if not cls_inds.any(): continue # get bboxes and scores of this class if multi_bboxes.shape[1] == 4: _bboxes = multi_bboxes[cls_inds, :] else: _bboxes = multi_bboxes[cls_inds, i * 4:(i + 1) * 4] _scores = multi_scores[cls_inds, i] if score_factors is not None: _scores *= score_factors[cls_inds] cls_dets = torch.cat([_bboxes, _scores[:, None]], dim=1) cls_dets, _ = nms_op(cls_dets, **nms_cfg_) cls_labels = multi_bboxes.new_full((cls_dets.shape[0], ), i - 1, dtype=torch.long) bboxes.append(cls_dets) labels.append(cls_labels) if bboxes: bboxes = torch.cat(bboxes) labels = torch.cat(labels) if bboxes.shape[0] > max_num: _, inds = bboxes[:, -1].sort(descending=True) inds = inds[:max_num] bboxes = bboxes[inds] labels = labels[inds] else: bboxes = multi_bboxes.new_zeros((0, 5)) labels = multi_bboxes.new_zeros((0, ), dtype=torch.long) return bboxes, labels
f3152e30eda4286ecfedc1b3fa3cf922470e0ada
9,350
def borders(det): """ Calculates the borders for image Parameters -------------- det: int array Detected face Returns ---------------------- l: int list Coordinates for left bound of border r: int list Coordinates for right bound of border t: int list Coordinates for top bound of border b: int list Coordinates for bottom bound of border """ l, r, t, b = det.left(), det.right(), det.top(), det.bottom() return l, r, t, b
d6358c88ee26e64b7b209d2f5f9725a5b3fad9ba
9,351
def get_line_row(lines, row): """ - lines: (Array string), array of lines - row: int, >=0, the row index to grab RETURN: string, if row greater than or equal to lines length, returns '' """ if row < len(lines): return lines[row] return ''
f03f230b677fabb3c488c496dad7e35f875023fe
9,352
def filter_dict_null(d): """ Filters recursively null values from dictionary """ if isinstance(d, dict): return dict( (k, filter_dict_null(v)) for k, v in list(d.items()) if filter_dict_null(v) is not None ) elif isinstance(d, list): if len(d) > 0: return list(map(filter_dict_null, d)) return None return d
13b0288f2e032d0e6ca115d02d6540bb8f8739b5
9,353
from typing import Union def _get_reception_time_from_scene_dir_second(scene_dir_second: str) -> Union[str, None]: """If there is time datum inside `scene_dir_second` string, then return it. Otherwise, return None.""" # second part of scene dir, it can be: `13_53_00`, # `13_53_00_ETC2`, `14_35_23_CB11_SIR18`, etc. second_part = scene_dir_second.split('_') # I need at least three elements to create the time datum if len(second_part) < 3: return None # get the first three elements from the list second_part = second_part[0:3] # iterate over the list to check if all elements are numbers. # if there is an element that is not a number, then return None for part in second_part: if not part.isdigit(): return None # if all parts are numbers, then join them to create time datum return ':'.join(second_part)
89cf95ed1f110c6641de4eae6ac8230d78a7b802
9,370
import json import requests def get_arc_servicedict(url): """Returns a dict of service information for an ArcGIS REST service URL Arguments url (String): An ArcGIS REST service URL, e.g. 'http://services.slip.wa.gov.au/arcgis/rest/services/QC/MRWA_Public_Services/MapServer' """ res = json.loads(requests.get(url + "?f=pjson").content) d = dict() d["layer_ids"] = [str(x['id']) for x in res["layers"]] d["supportedExtensions"] = res["supportedExtensions"] return d
80a1775d809c63ea34729c02ddcf98b8488fd825
9,371
import re def escaped_split(inp_str, split_char): """ Split inp_str on character split_char but ignore if escaped. Since, return value is used to write back to the intermediate data file, any escape characters in the input are retained in the output. :param inp_str: String to split :param split_char: Split character :return: List of splits """ if len(split_char) > 1: raise ValueError('Expected split character. Found string!') out = re.sub(r'(\\.)|' + split_char, lambda m: m.group(1) or '\n', inp_str, len(inp_str)).split('\n') out = [x for x in out if x] return out
13eaf77ffff52fdd6cfaa83ee08fc773f241be17
9,376
import torch def construct_edge_feature_gather(feature, knn_inds): """Construct edge feature for each point (or regarded as a node) using torch.gather Args: feature (torch.Tensor): point features, (batch_size, channels, num_nodes), knn_inds (torch.Tensor): indices of k-nearest neighbour, (batch_size, num_nodes, k) Returns: edge_feature: (batch_size, 2*channels, num_nodes, k) Notes: Pytorch Gather is 50x faster than advanced indexing, but needs 2x more memory. It is because it will allocate a tensor as large as expanded features during backward. """ batch_size, channels, num_nodes = feature.shape k = knn_inds.size(-1) # CAUTION: torch.expand feature_central = feature.unsqueeze(3).expand(batch_size, channels, num_nodes, k) feature_expand = feature.unsqueeze(2).expand(batch_size, channels, num_nodes, num_nodes) knn_inds_expand = knn_inds.unsqueeze(1).expand(batch_size, channels, num_nodes, k) feature_neighbour = torch.gather(feature_expand, 3, knn_inds_expand) # (batch_size, 2 * channels, num_nodes, k) edge_feature = torch.cat((feature_central, feature_neighbour - feature_central), 1) return edge_feature
b49d26e0e7cee13952ff85f8f1f8075658fc391a
9,382
import re def normalize_summary(summary): """Return normalized docstring summary.""" # Remove newlines summary = re.sub(r'\s*\n\s*', ' ', summary.rstrip()) # Add period at end of sentence if ( summary and (summary[-1].isalnum() or summary[-1] in ['"', "'"]) and (not summary.startswith('#')) ): summary += '.' return summary
002e72668e87d668c2d6df678092ac57fc2b1d37
9,388
import re def count_characters(text, whites=False): """ Get character count of a text Args: whites: If True, whitespaces are not counted """ if whites: return len(text) else: return len(re.sub(r"\s", "", text))
e4db9e873e800282cf7f2398272a8b4546fe171e
9,395
import re def remove_html(raw_text): """ Remove html tags """ text = str(raw_text) cleaner = re.compile('<.*?>') text = re.sub(cleaner, '', text) return text
397b49c052e055a71876d9883ab259f871b5015e
9,397
def pull_words(words_file, word_length): """Compile set of words, converted to lower case and matching length of start and end words. Args: words_file: str, name of the file containing all words word_length: int, length of the start/end words Returns: words_set: set, all possible interesting words """ words_set = set() with open(words_file) as words: for word in words: word_ = word.strip().lower() if len(word_) == word_length and word_ not in words_set: words_set.add(word_) return words_set
cbecb29bd93177cb14a208e7e3a7bcee14f7c010
9,398
def vertices_vector_to_matrix(vertices): """vertices_vector_to_matrix(vertices) -> List[List[float]] PyPRT outputs the GeneratedModel vertex coordinates as a list. The list contains the x, y, z coordinates of all the vertices. This function converts the vertex list into a list of N vertex coordinates lists (with N, the number of geometry vertices). Parameters: vertices: List[float] Returns: List[List[float]] Example: ``[[-10.0, 0.0, 10.0], [-10.0, 0.0, 0.0], [10.0, 0.0, 0.0], [10.0, 0.0, 10.0]] = vertices_vector_to_matrix([-10.0, 0.0, 10.0, -10.0, 0.0, 0.0, 10.0, 0.0, 0.0, 10.0, 0.0, 10.0])`` """ vertices_as_matrix = [] for count in range(0, int(len(vertices)/3)): vector_per_pt = [vertices[count*3], vertices[count*3+1], vertices[count*3+2]] vertices_as_matrix.append(vector_per_pt) return vertices_as_matrix
0d03a60f32ed722d089500840e1a2a2e645c20b4
9,399
import torch def random_well_conditioned_matrix(*shape, dtype, device, mean=1.0, sigma=0.001): """ Returns a random rectangular matrix (batch of matrices) with singular values sampled from a Gaussian with mean `mean` and standard deviation `sigma`. The smaller the `sigma`, the better conditioned the output matrix is. """ primitive_dtype = { torch.float: torch.float, torch.double: torch.double, torch.cfloat: torch.float, torch.cdouble: torch.double } x = torch.rand(shape, dtype=dtype, device=device) m = x.size(-2) n = x.size(-1) u, _, vh = torch.linalg.svd(x, full_matrices=False) s = (torch.randn(*(shape[:-2] + (min(m, n),)), dtype=primitive_dtype[dtype], device=device) * sigma + mean) \ .sort(-1, descending=True).values.to(dtype) return (u * s.unsqueeze(-2)) @ vh
bd2d7e232ffcd2848b836e9187d32a00339477de
9,400
def get_index_str(n, i): """ To convert an int 'i' to a string. Parameters ---------- n : int Order to put 0 if necessary. i : int The number to convert. Returns ------- res : str The number as a string. Examples -------- ```python getIndexStr(100,15) ``` Out: ``` '015' ``` """ if i < 0 or i > n: raise ValueError("N >= i or i > 0 is required") lm = len(str(n)) res = str(i) while lm > len(res): res = "0" + res return res
e7b3561a49b447d1edec22da8cc86d2a702ec039
9,401
def get_ipsec_udp_key_status( self, ) -> dict: """Get IPSEC UDP key status for all appliances .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - ikeless - GET - /ikeless/seedStatus :return: Returns dictionary ikeless key status \n * keyword **<ne_pk>** (`dict`): Appliance key status object \n * keyword **hasActiveSeed** (`bool`): If appliance has the active key material * keyword **hasNewSeed** (`bool`): If appliance has the new key material * keyword **detail** (`str`): Detail of appliance seed status, including date stamp ID of active and new seed :rtype: dict """ return self._get("/ikeless/seedStatus")
db5ac6fee37574987a023183f8416d40234ac4e4
9,402
import typing def describe_services( ecs, cluster: str, services: typing.Set[str] ) -> typing.List[typing.Dict[str, typing.Any]]: """Wrap `ECS.Client.describe_services` to allow more then 10 services in one call. """ result: typing.List[typing.Dict[str, typing.Any]] = [] services_list = list(services) for i in range(0, len(services_list), 10): response = ecs.describe_services( cluster=cluster, services=services_list[i : i + 10] ) result.extend(response["services"]) return result
f585610480aa7c657974b6f3163888fe7e9b6a32
9,403
import re def extract_page_nr(some_string): """ extracts the page number from a string like `Seite 21` :param some_string: e.g. `Seite 21` :type some_string: str :return: The page number e.g. `21` :rtype: str """ page_nr = re.findall(r'\d+', some_string) if len(page_nr) > 0: return "-".join(page_nr) else: return some_string
6d39314de89c8f4bf4d931f2dc329fe394a10091
9,404
def intDictToStringDict(dictionary): """ Converts dictionary keys into strings. :param dictionary: :return: """ result = {} for k in dictionary: result[str(k)] = dictionary[k] return result
65e519f04433a5dfcb4d7ace9bad91d8e06db4e5
9,408
def get_page(paginated_list): """ Take a github.PaginatedList.PaginatedList and then iterate through the pages to get all of its entries Args: paginated_list (github.PaginatedList.PaginatedList): PyGithub paginated list object Returns: `list`: All entries in the paginated list """ idx = 0 _page_entries = paginated_list.get_page(idx) page_entries = [] while _page_entries: page_entries.extend(_page_entries) idx += 1 _page_entries = paginated_list.get_page(idx) return page_entries
0510537b20c18b6b1be5b10ca014e13be7a19a1f
9,413
import math def prime(n): """Primality test by trial division.""" if n == 2: return True elif n < 2 or n % 2 == 0: return False else: return not any(n % x == 0 for x in range(3, math.ceil(math.sqrt(n)) + 1, 2))
3504217a7e8149867ec16ddf9c54f4fac736d592
9,414
def average(numbers): """ :param list[float] numbers: a list of numbers :returns: the average of the given number sequence. an empty list returns 0. :rtype: float """ return float(sum(numbers)) / max(len(numbers), 1)
d86c6f24733d3032b82cb6c64c02eba37cc34a04
9,415
def general_pool_fn(x): """ x[0]: function to call x[1] to x[n]: arguments of the function """ return x[0](*x[1:])
d398378d3d1671f0e58bff2bc8737ff07da0c3e3
9,417
def _EraseTombstone(device, tombstone_file): """Deletes a tombstone from the device. Args: device: An instance of DeviceUtils. tombstone_file: the tombstone to delete. """ return device.RunShellCommand( 'rm /data/tombstones/' + tombstone_file, root=True)
00e6f316062785d7465f501ea743a2dc94864aef
9,419
def shorten_class(class_name: str) -> str: """Returns a shortened version of the fully qualilied class name.""" return class_name.replace('org.chromium.', '.').replace('chrome.browser.', 'c.b.')
2064e6e0dc159bc130f84ce4a830857455d12ba4
9,421
def weight_function(run_params, displacement_norm): """Determine motion-dependent prediction weight of given supporter point. This method determines the weight to apply to each supporter point when using it for prediction of a target point based on the norm of its displacement vector. The larger the displacement, the higher weight the supporter point receives; the weight is a linear function of the displacement norm, with an offset of 1. Displacement-based weighting is used to give less importance to supporter points that are part of the "background" and have little correlation with the movement of the muscle fascia. Args: run_params (ParamValues): values of parameters used in tracking, including scalar alpha displacement_norm (float): L2 norm of relevant supporter point's displacement vector Returns: float weighting applied to supporter point when tracking target point """ alpha = run_params.displacement_weight return 1 + (alpha * displacement_norm)
2fdea32511ae8b4cedd47e79d7f8517a08a6b457
9,422
def add_api_config_to_queries(generated_query_strings, search_engines): """ Merges the two parameters and returns a list of dicts that include the api config. If only 1 API key is provided, it is assumed this is valid for many searches and is used for all queries If more than 1 is provided, then the number of keys provided needs to match the number of queries Args: generated_query_strings: The output from the generate_query_strings function. search_engines: The search engines list that is found in the api_config file. See the documentation for usage guidelines (http://coast_search.readthedocs.io/). Returns: result_list: Updated list of query data now including search engine/api info """ if len(search_engines) == 1: se = search_engines[0] for query_object in generated_query_strings: query_object["se_name"] = se["name"] query_object["api_key"] = se["api_key"] query_object["search_engine_id"] = se["search_engine_id"] elif len(search_engines) == len(generated_query_strings): for i in range(0, len(search_engines)): query_object = generated_query_strings[i] se = search_engines[i] query_object["se_name"] = se["name"] query_object["api_key"] = se["api_key"] query_object["search_engine_id"] = se["search_engine_id"] else: raise Exception("Invalid number of API keys.") return generated_query_strings
209b14e98c2cb339f958fc7dfe456a4a40876c8c
9,424
from typing import Dict from typing import Any import requests def pull_astronaut_list(url: str ='http://api.open-notify.org/astros.json') -> Dict[str, Any]: """ Pull a list of astronauts via API. Defaults to open-notify's API. Args: url: the URL to pull data from. Returns: A dict containing the astronaut count and names. """ data = requests.get(url).json() return data
d008cd1d62a435086dbd8dc08baaa5323298f11c
9,428
def get_widget_for_attr(traits_ui, attr_name): """ Return the Qt widget in the UI which displays the attribute specified. """ x_editor = traits_ui.get_editors(attr_name)[0] qt_widget = x_editor.control return qt_widget
2bb2959963734bee48d067f41425808412bd2421
9,429
def all(*args, span=None): """Create a new expression of the intersection of all conditions in the arguments Parameters ---------- args : list List of symbolic boolean expressions span : Optional[Span] The location of this operator in the source code. Returns ------- expr: Expr Expression """ if not args: raise ValueError("Any must take at least 1 argument") if len(args) == 1: return args[0] val = _ffi_api._OpAnd(args[0], args[1], span) # type: ignore for i in range(2, len(args)): val = _ffi_api._OpAnd(val, args[i], span) # type: ignore return val
f0cebfb241c10c2d53c58a8b4fb186e9d65a1b7a
9,430
def update_cache_bykey(cache_list, new_list, key='id'): """ Given a cache list of dicts, update the cache with a 2nd list of dicts by a specific key in the dict. :param cache_list: List of dicts :param new_list: New list of dicts to update by :param key: Optional, key to use as the identifier to update new entries with :return: Updated list of dicts """ # create a cache dict keyed by id. cache_bykey = {entry[key]: entry for entry in cache_list if entry.get(key)} # create a new dict keyed by id. new_bykey = {entry[key]: entry for entry in new_list if entry.get(key)} # combine and update cache into a 3rd dict combined_bykey = {**cache_bykey, **new_bykey} # return a list of the updated dict. return [value for key, value in combined_bykey.items()]
b077a1c40cbf0a8848ff9e017a644c20e1d25199
9,432
def find_factors(n): """ Finds a list of factors of a number """ factList = {1, n} for i in range(2, int(n ** 0.5) + 1): if (n % i == 0): factList.add(i) factList.add(n // i) return sorted(factList)
0b8992bfe81bfd49c738b49380ceb0c8e7155b3f
9,437
def max_key(dict): """ Returns the maximum key in an integer-keyed dictionary. Args: dict (dict): The integer-keyed dictionary. Returns: int: The maximum key. """ output = 0 for key, value in dict.items(): output = max(output, int(key)) return output
059a26fa690aaca2df2b0a7e251c206aa5e7276b
9,442
def polaritySanitizer(polarity): """Sanitize input polarity values. Renames the, case-insensitive, values 'positive', 'pos', or '+' to 'positive' and 'negative', 'neg', or '-' to 'negative'. Errors on unrecognized polarity value. Arguments: polarity (str): unsanitized polarity type """ if polarity.lower() in ["positive", "pos", "+"]: polarity = "positive" elif polarity.lower() in ["negative", "neg", "-"]: polarity = "negative" else: raise ValueError(f"{polarity} is not recognized as a polarity type.") return polarity
e328345ea48a9441f9ab323fd6a3ff5ca06f07d5
9,447
def hash_table_size(item, tablesize): """ A hashing technique that involves 1. Converting the characters in a string to a list of ordinal values 2. Get the sum of the list 3. Get the remained by doing a modulo using tablesize item - string tablesize """ ordinal_list = [ord(i) for i in item] return sum(ordinal_list) % tablesize
cf47a023c35693681485331878dfd3eb9164a7bf
9,448
def validate_testapps(apis, api_configs): """Ensures the chosen apis are valid, based on the config.""" if "all" in apis: return [key for key in api_configs] for api in apis: if api not in api_configs: raise RuntimeError("Testapp given as flag not found in config: %s" % api) return apis
4726ae4e28bb57e2fa812cd0fa3721d38ba1103a
9,454
def _dol_to_lod(dol): """Convert a dict-of-lists into a list-of-dicts. Reverse transformation of :func:`_lod_to_dol()`. """ keys = list(dol.keys()) lod = [] for i in range(len(dol[keys[0]])): lod.append({k: v[i] for k, v in dol.items()}) return lod
9e8a98d2502797ae27cae88ab2a0ec7fda4aff34
9,457
import uuid def get_temp_entity_table_name() -> str: """Returns a random table name for uploading the entity dataframe""" return "feast_entity_df_" + uuid.uuid4().hex
62d8b0ca2d58fb813db88caa753f93e412c62ad0
9,462