content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
from typing import Counter def remove_unpopulated_classes(_df, target_column, threshold): """ Removes any row of the df for which the label in target_column appears less than threshold times in the whole frame (not enough populated classes) :param df: The dataframe to filter :param target_column: The target column with labels :param threshold: the number of appearances a label must respect :return: The filtered dataframe """ count = Counter(_df[target_column]) valid = [k for k in count.keys() if count[k] >= threshold] _df = _df[_df[target_column].isin(valid)] return _df
2ed31cfd3883a3856501dabff935028824141181
706,713
def check_all_flash(matrix_2d): """ Check if all octopuses flashed. :param matrix_2d: 2D matrix :return: Boolean """ for line in matrix_2d: for digit in line: if digit != 0: return True return False
9dca0174cd0272773e9b9330977bd3fac86f413a
706,717
def _BreadthFirstSearch(to_visit, children, visited_key=lambda x: x): """Runs breadth first search starting from the nodes in |to_visit| Args: to_visit: the starting nodes children: a function which takes a node and returns the nodes adjacent to it visited_key: a function for deduplicating node visits. Defaults to the identity function (lambda x: x) Returns: A list of nodes which are reachable from any node in |to_visit| by calling |children| any number of times. """ to_visit = list(to_visit) seen = set(map(visited_key, to_visit)) for node in to_visit: for child in children(node): key = visited_key(child) if key not in seen: seen.add(key) to_visit.append(child) return to_visit
1c7153f61af81bb4bd9a06e0213bfcee4aab5cb8
706,719
def get_index_freq(freqs, fmin, fmax): """Get the indices of the freq between fmin and fmax in freqs """ f_index_min, f_index_max = -1, 0 for freq in freqs: if freq <= fmin: f_index_min += 1 if freq <= fmax: f_index_max += 1 # Just check if f_index_max is not out of bound f_index_max = min(len(freqs) - 1, f_index_max) f_index_min = max(0, f_index_min) return f_index_min, f_index_max
f3e014626d763f18ce6b661cabeb244bfabe9782
706,727
import random def superpixel_colors( num_pix:int = 1536, schema:str = 'rgb', interleave:int = 1, stroke:str = '', ) -> list: """ Generate color (attribute) list for superpixel SVG paths Parameters ---------- num_pix : int Number of super pixels to account for (default = 1536) schema : str Either of 'rgb' or 'random' interleave : int RGB interleave value (default = 1) stroke : str String that is inserted into ever attribute at the end, e.g. to account for a stroke, such as 'stroke="#808080"'. Please note that the entire tag=value (pairs) must be given! Returns ------- colors : list List of attributes suitable for superpixel_outlines (SVG) """ colors = [''] * num_pix if not schema in ['random', 'rgb']: raise ValueError('invalid schema requested.') if schema == 'rgb': if stroke: for idx in range(num_pix): val = interleave * idx colors[idx] = 'fill="#{0:02x}{1:02x}{2:02x}" {3:s}'.format( val % 256, (val // 256) % 256, (val // 65536) % 256, stroke) else: for idx in range(num_pix): val = interleave * idx colors[idx] = 'fill="#{0:02x}{1:02x}{2:02x}"'.format( val % 256, (val // 256) % 256, (val // 65536) % 256) else: # IMPORT DONE HERE TO SAVE TIME AT MODULE INIT if stroke: for idx in range(num_pix): colors[idx] = 'fill="#{0:06x} {1:s}"'.format( random.randrange(16777216), stroke) else: for idx in range(num_pix): colors[idx] = 'fill="#{0:06x}"'.format( random.randrange(16777216)) return colors
7a574b48dff30126052c2acd5d06e01a9f4a9af0
706,729
def load_sentences(filename): """give us a list of sentences where each sentence is a list of tokens. Assumes the input file is one sentence per line, pre-tokenized.""" out = [] with open(filename) as infile: for line in infile: line = line.strip() tokens = line.split() out.append(tokens) return out
6a4c458f9a0d9b17eaa38c38570dacc4c40e86c0
706,735
def tar_cat(tar, path): """ Reads file and returns content as bytes """ mem = tar.getmember(path) with tar.extractfile(mem) as f: return f.read()
f07f00156c34bd60eea7fcae5d923ea9f1650f6f
706,738
def __get_base_name(input_path): """ /foo/bar/test/folder/image_label.ext --> test/folder/image_label.ext """ return '/'.join(input_path.split('/')[-3:])
5df2ef909f4b570cf6b6224031ad705d16ffff42
706,739
def unf_gas_density_kgm3(t_K, p_MPaa, gamma_gas, z): """ Equation for gas density :param t_K: temperature :param p_MPaa: pressure :param gamma_gas: specific gas density by air :param z: z-factor :return: gas density """ m = gamma_gas * 0.029 p_Pa = 10 ** 6 * p_MPaa rho_gas = p_Pa * m / (z * 8.31 * t_K) return rho_gas
6e41802367bbe70ab505ae5db89ee3e9a32e7d7c
706,741
def scale(value, upper, lower, min_, max_): """Scales value between upper and lower values, depending on the given minimun and maximum value. """ numerator = ((lower - upper) * float((value - min_))) denominator = float((max_ - min_)) return numerator / denominator + upper
3e13c80b765cffb1e75a6856d343bd9a88c353e9
706,743
def Flatten(nmap_list): """Flattens every `.NestedMap` in nmap_list and concatenate them.""" ret = [] for x in nmap_list: ret += x.Flatten() return ret
c630869b725d69338830e1a14ef920d6d1e87ade
706,744
def left_index_iter(shape): """Iterator for the left boundary indices of a structured grid.""" return range(0, shape[0] * shape[1], shape[1])
c7da6f5de48d0446cb0729593d3dc0eb95f5ab9a
706,745
def add_numbers(a, b): """Sums the given numbers. :param int a: The first number. :param int b: The second number. :return: The sum of the given numbers. >>> add_numbers(1, 2) 3 >>> add_numbers(50, -8) 42 """ return a + b
7d9a0c26618a2aee5a8bbff6a65e315c33594fde
706,746
def convert_acl_to_iam_policy(acl): """Converts the legacy ACL format to an IAM Policy proto.""" owners = acl.get('owners', []) readers = acl.get('readers', []) if acl.get('all_users_can_read', False): readers.append('allUsers') writers = acl.get('writers', []) bindings = [] if owners: bindings.append({'role': 'roles/owner', 'members': owners}) if readers: bindings.append({'role': 'roles/viewer', 'members': readers}) if writers: bindings.append({'role': 'roles/editor', 'members': writers}) return {'bindings': bindings}
990cdb6a51a696cf2b7825af94cf4265b2229be9
706,748
import re def convert_to_snake_case(string: str) -> str: """Helper function to convert column names into snake case. Takes a string of any sort and makes conversions to snake case, replacing double- underscores with single underscores.""" s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', string) draft = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() return draft.replace('__', '_')
2a8de69a6915e87e46582a1af7a7897ff6fd97ce
706,750
def WHo_mt(dist, sigma): """ Speed Accuracy model for generating finger movement time. :param dist: euclidian distance between points. :param sigma: speed-accuracy trade-off variance. :return: mt: movement time. """ x0 = 0.092 y0 = 0.0018 alpha = 0.6 x_min = 0.006 x_max = 0.06 k_alpha = 0.12 if dist == 0: dist = 0.0000001 mt = pow((k_alpha * pow(((sigma - y0) / dist), (alpha - 1))), 1 / alpha) + x0 return mt
36d8b7e913df658b52f1f03617d0b9817091d0ef
706,755
def find_next_sibling_position(element, tag_type): """ Gets current elements next sibling's (chosen by provided tag_type) actual character position in html document :param element: Whose sibling to look for, type: An object of class bs4.Tag :param tag_type: sibling tag's type (e.g. p, h2, div, span etc. ), type: A string :return: An Integer specifying character pos. in html, infinite when no sibling is found """ nxt_sib = element.find_next_sibling(tag_type) return float("inf") if nxt_sib is None else nxt_sib.sourcepos
9b912fd9b7d30e81d6b4c2fec0e0573017b51a83
706,756
def chunks(l, n): """ Split list in chunks - useful for controlling memory usage """ if n < 1: n = 1 return [l[i:i + n] for i in range(0, len(l), n)]
d878aeb50bd42c9f5a2060f4bb2747aecb1a3b58
706,758
def FilterKeptAttachments( is_description, kept_attachments, comments, approval_id): """Filter kept attachments to be a subset of last description's attachments. Args: is_description: bool, if the comment is a change to the issue description. kept_attachments: list of ints with the attachment ids for attachments kept from previous descriptions, if the comment is a change to the issue description. comments: list of IssueComment PBs for the issue we want to edit. approval_id: int id of the APPROVAL_TYPE fielddef, if we're editing an approval description, or None otherwise. Returns: A list of kept_attachment ids that are a subset of the last description. """ if not is_description: return None attachment_ids = set() for comment in reversed(comments): if comment.is_description and comment.approval_id == approval_id: attachment_ids = set([a.attachment_id for a in comment.attachments]) break kept_attachments = [ aid for aid in kept_attachments if aid in attachment_ids] return kept_attachments
89732832db557835a5dea1ef10229bfdd809d304
706,759
import logging def lookup_cpe(vendor, product, cpe_type, cpe_table, remap): """Identify the correct vendor and product values for a CPE This function attempts to determine the correct CPE using vendor and product values supplied by the caller as well as a remapping dictionary for mapping these values to more correct values used by NIST. For example, the remapping might tell us that a value of 'alpine' for the vendor string should be 'aplinelinux' instead, or for product 'solaris' should be 'sunos'. This function should only emit values seen in the official NIST CPE list which is provided to it in cpe_table. Lookup priority: 1. Original vendor / product 2. Original vendor / remap product 3. Remap vendor / original product 4. Remap vendor / remap product Args: vendor (str): vendor name product (str): product name cpe_type (str): CPE type - o, a, h, etc. cpe_table (dict): dict containing the official NIST CPE data remap (dict): dict containing the remapping values Returns: success, vendor, product """ if ( vendor in cpe_table[cpe_type] and product in cpe_table[cpe_type][vendor] ): # Hot path, success with original values return True, vendor, product # Everything else depends on a remap of some sort. # get the remappings for this one vendor string. vendor_remap = remap.get(vendor, None) if vendor_remap: # If we have product remappings, work that angle next possible_product = None if ( vendor_remap.get('products', None) and product in vendor_remap['products'] ): possible_product = vendor_remap['products'][product] if (vendor in cpe_table[cpe_type] and possible_product and possible_product in cpe_table[cpe_type][vendor]): # Found original vendor, remap product return True, vendor, possible_product # Start working the process to find a match with a remapped vendor name if vendor_remap.get('vendor', None): new_vendor = vendor_remap['vendor'] if new_vendor in cpe_table[cpe_type]: if product in cpe_table[cpe_type][new_vendor]: # Found remap vendor, original product return True, new_vendor, product if possible_product and possible_product in cpe_table[cpe_type][new_vendor]: # Found remap vendor, remap product return True, new_vendor, possible_product logging.error("Product %s from vendor %s invalid for CPE %s and no mapping", product, vendor, cpe_type) return False, None, None
5a6e2e735daa50d3d2a19022db002ebfc647335c
706,761
def formalize_rules(list_rules): """ Gives an list of rules where facts are separeted by coma. Returns string with rules in convinient form (such as 'If' and 'Then' words, etc.). """ text = '' for r in list_rules: t = [i for i in r.split(',') if i] text += 'If %s,\n' % t[0] for i in t[1:-1]: text += ' %s,\n' % i text += 'Then: %s.\n' % t[-1] return text
d8fbb024f38ae097efa42f95efe6b5d3b5adbd71
706,764
def strip_spectral_type(series, return_mask=False): """ Strip spectral type from series of string Args: series (pd.Series): series of object names (strings) return_mask (bool): returns boolean mask True where there is a type Returns: no_type (pd.Series): series without spectral types type_mask (pd.Series): boolean mask where type is given """ type_mask = series.str.match('\\([OBAFGKM]\\)') no_type = series.copy() no_type[type_mask] = series[type_mask].str.slice(start=4) return (no_type, type_mask) if return_mask else no_type
65b91749742b229637819582b1158554b1a457ea
706,777
def expand_groups(node_id, groups): """ node_id: a node ID that may be a group groups: store group IDs and list of sub-ids return value: a list that contains all group IDs deconvoluted """ node_list = [] if node_id in groups.keys(): for component_id in groups[node_id]: node_list.extend(expand_groups(component_id, groups)) else: node_list.extend([node_id]) return node_list
4c4b9c569a85396f201c589635b6ecea3807ddc2
706,778
def sw_update_opts_w_name_db_model_to_dict(sw_update_opts, subcloud_name): """Convert sw update options db model plus subcloud name to dictionary.""" result = {"id": sw_update_opts.id, "name": subcloud_name, "subcloud-id": sw_update_opts.subcloud_id, "storage-apply-type": sw_update_opts.storage_apply_type, "compute-apply-type": sw_update_opts.compute_apply_type, "max-parallel-computes": sw_update_opts.max_parallel_computes, "alarm-restriction-type": sw_update_opts.alarm_restriction_type, "default-instance-action": sw_update_opts.default_instance_action, "created-at": sw_update_opts.created_at, "updated-at": sw_update_opts.updated_at} return result
c9c1703d9e4d0b69920d3ab06e5bf19fbb622103
706,780
from bs4 import BeautifulSoup import re def scrape_urls(html_text, pattern): """Extract URLs from raw html based on regex pattern""" soup = BeautifulSoup(html_text,"html.parser") anchors = soup.find_all("a") urls = [a.get("href") for a in anchors] return [url for url in urls if re.match(pattern, url)!=None]
dfba40df7894db91575b51a82d89fef0f824d362
706,786
def _gen_parabola(phase: float, start: float, mid: float, end: float) -> float: """Gets a point on a parabola y = a x^2 + b x + c. The Parabola is determined by three points (0, start), (0.5, mid), (1, end) in the plane. Args: phase: Normalized to [0, 1]. A point on the x-axis of the parabola. start: The y value at x == 0. mid: The y value at x == 0.5. end: The y value at x == 1. Returns: The y value at x == phase. """ mid_phase = 0.5 delta_1 = mid - start delta_2 = end - start delta_3 = mid_phase ** 2 - mid_phase coef_a = (delta_1 - delta_2 * mid_phase) / delta_3 coef_b = (delta_2 * mid_phase ** 2 - delta_1) / delta_3 coef_c = start return coef_a * phase ** 2 + coef_b * phase + coef_c
bdd808339e808a26dd1a4bf22552a1d32244bb02
706,789
def pkgdir(tmpdir, monkeypatch): """ temp directory fixture containing a readable/writable ./debian/changelog. """ cfile = tmpdir.mkdir('debian').join('changelog') text = """ testpkg (1.1.0-1) stable; urgency=medium * update to 1.1.0 * other rad packaging updates * even more cool packaging updates that take a lot of text to describe so the change wraps on multiple lines -- Ken Dreyer <[email protected]> Tue, 06 Jun 2017 14:46:37 -0600 testpkg (1.0.0-2redhat1) stable; urgency=medium * update to 1.0.0 (rhbz#123) -- Ken Dreyer <[email protected]> Mon, 05 Jun 2017 13:45:36 -0600 """.lstrip("\n") cfile.write(text) monkeypatch.chdir(tmpdir) return tmpdir
0717aba1d5181e48eb11fa1e91b72933cda1af14
706,790
import calendar def validate_days(year, month, day): """validate no of days in given month and year >>> validate_days(2012, 8, 31) 31 >>> validate_days(2012, 8, 32) 31 """ total_days = calendar.monthrange(year, month) return (total_days[1] if (day > total_days[1]) else day)
7499dc9654ec9ffd7f534cf27444a3236dd82e81
706,793
def get_targets(args): """ Gets the list of targets for cmake and kernel/build.sh :param args: The args variable generated by parse_parameters :return: A string of targets suitable for cmake or kernel/build.sh """ if args.targets: targets = args.targets elif args.full_toolchain: targets = "all" else: targets = "AArch64;ARM;BPF;Hexagon;Mips;PowerPC;RISCV;SystemZ;X86" return targets
81eb31fe416303bc7e881ec2c10cfeeea4fdab05
706,795
def _format_warning(message, category, filename, lineno, line=None): # noqa: U100, E501 """ Simple format for warnings issued by ProPlot. See the `internal warning call signature \ <https://docs.python.org/3/library/warnings.html#warnings.showwarning>`__ and the `default warning source code \ <https://github.com/python/cpython/blob/master/Lib/warnings.py>`__. """ return f'{filename}:{lineno}: ProPlotWarning: {message}\n'
f5709df0a84d9479d6b895dccb3eae8292791f74
706,796
def buscaBinariaIterativa(alvo, array): """ Retorna o índice do array em que o elemento alvo está contido. Considerando a coleção recebida como parâmetro, identifica e retor- na o índice em que o elemento especificado está contido. Caso esse elemento não esteja presente na coleção, retorna -1. Utiliza uma abordagem iterativa. Parameters ---------- alvo : ? Elemento cujo índice está sendo buscado array : list A lista cujo índice do elemento deve ser identificado Return ------ index : int O índice em que o elemento alvo está armazenado """ min = 0 max = len(array) - 1 while (min <= max): mid = (min + max) // 2 if (array[mid] == alvo): return mid else: if (array[mid] < alvo): min = mid + 1 else: max = mid - 1 return -1
e74fed0781b3c1bed7f5f57713a06c58bcbde107
706,803
from pathlib import Path import json def get_reference_data(fname): """ Load JSON reference data. :param fname: Filename without extension. :type fname: str """ base_dir = Path(__file__).resolve().parent fpath = base_dir.joinpath('reference', 'data', fname + '.json') with fpath.open() as f: return json.load(f)
73880586393ce9463a356d69880f2f285058637f
706,807
import struct def little_endian_uint32(i): """Return the 32 bit unsigned integer little-endian representation of i""" s = struct.pack('<I', i) return struct.unpack('=I', s)[0]
07f72baaf8f7143c732fd5b9e56b0b7d02d531bd
706,808
def new_automation_jobs(issues): """ :param issues: issues object pulled from Redmine API :return: returns a new subset of issues that are Status: NEW and match a term in AUTOMATOR_KEYWORDS) """ new_jobs = {} for issue in issues: # Only new issues if issue.status.name == 'New': # Strip whitespace and make lowercase ('subject' is the job type i.e. Diversitree) subject = issue.subject.lower().replace(' ', '') # Check for presence of an automator keyword in subject line if subject == 'iridaretrieve': new_jobs[issue] = subject return new_jobs
74c9c96aeeea1d15384d617c266daa4d49f3a203
706,809
def _filter_out_variables_not_in_dataframe(X, variables): """Filter out variables that are not present in the dataframe. Function removes variables that the user defines in the argument `variables` but that are not present in the input dataframe. Useful when ussing several feature selection procedures in a row. The dataframe input to the first selection algorithm likely contains more variables than the input dataframe to subsequent selection algorithms, and it is not possible a priori, to say which variable will be dropped. Parameters ---------- X: pandas DataFrame variables: string, int or list of (strings or int). Returns ------- filtered_variables: List of variables present in `variables` and in the input dataframe. """ # When variables is not defined, keep it like this and return None. if variables is None: return None # If an integer or a string is provided, convert to a list. if not isinstance(variables, list): variables = [variables] # Filter out elements of variables that are not in the dataframe. filtered_variables = [var for var in variables if var in X.columns] # Raise an error if no column is left to work with. if len(filtered_variables) == 0: raise ValueError( "After filtering no variable remaining. At least 1 is required." ) return filtered_variables
63b4cce75741a5d246f40c5b88cfebaf818b3482
706,814
import re def process_ref(paper_id): """Attempt to extract arxiv id from a string""" # if user entered a whole url, extract only the arxiv id part paper_id = re.sub("https?://arxiv\.org/(abs|pdf|ps)/", "", paper_id) paper_id = re.sub("\.pdf$", "", paper_id) # strip version paper_id = re.sub("v[0-9]+$", "", paper_id) # remove leading arxiv, i.e., such that paper_id=' arXiv: 2001.1234' is still valid paper_id = re.sub("^\s*arxiv[:\- ]", "", paper_id, flags=re.IGNORECASE) return paper_id
a1c817f1ae7b211973efd6c201b5c13e1a91b57b
706,817
def del_none(dictionary): """ Recursively delete from the dictionary all entries which values are None. Args: dictionary (dict): input dictionary Returns: dict: output dictionary Note: This function changes the input parameter in place. """ for key, value in list(dictionary.items()): if value is None: del dictionary[key] elif isinstance(value, dict): del_none(value) return dictionary
48b76272ed20bbee38b5293ede9f5d824950aec5
706,820
def survival_df(data, t_col="t", e_col="e", label_col="Y", exclude_col=[]): """ Transform original DataFrame to survival dataframe that would be used in model training or predicting. Parameters ---------- data: DataFrame Survival data to be transformed. t_col: str Column name of data indicating time. e_col: str Column name of data indicating events or status. label_col: str Name of new label in transformed survival data. exclude_col: list Columns to be excluded. Returns ------- DataFrame: Transformed survival data. Negtive values in label are taken as right censored. """ x_cols = [c for c in data.columns if c not in [t_col, e_col] + exclude_col] # Negtive values are taken as right censored data.loc[:, label_col] = data.loc[:, t_col] data.loc[data[e_col] == 0, label_col] = - data.loc[data[e_col] == 0, label_col] return data[x_cols + [label_col]]
8d35c27a75340d5c6535727e0e419fc0548d6094
706,823
def _escape_pgpass(txt): """ Escape a fragment of a PostgreSQL .pgpass file. """ return txt.replace('\\', '\\\\').replace(':', '\\:')
3926f683a2715ff1d41d8433b525793e8214f7a9
706,829
from typing import OrderedDict def arr_to_dict(arr, ref_dict): """ Transform an array of data into a dictionary keyed by the same keys in ref_dict, with data divided into chunks of the same length as in ref_dict. Requires that the length of the array is the sum of the lengths of the arrays in each entry of ref_dict. The other dimensions of the input array and reference dict can differ. Arguments --------- arr : array Input array to be transformed into dictionary. ref_dict : dict Reference dictionary containing the keys used to construct the output dictionary. Returns ------- out : dict Dictionary of values from arr keyed with keys from ref_dict. """ out = OrderedDict() idx = 0 assert len(arr) == sum([len(v) for v in ref_dict.values()]) for k, bd in ref_dict.items(): out[k] = arr[idx : idx + len(bd)] idx += len(bd) return out
55339447226cdd2adafe714fa12e144c6b38faa2
706,830
from typing import Any from typing import Set from typing import KeysView def to_set(data: Any) -> Set[Any]: """Convert data to a set. A single None value will be converted to the empty set. ```python x = fe.util.to_set(None) # set() x = fe.util.to_set([None]) # {None} x = fe.util.to_set(7) # {7} x = fe.util.to_set([7, 8]) # {7,8} x = fe.util.to_set({7}) # {7} x = fe.util.to_set((7)) # {7} ``` Args: data: Input data, within or without a python container. The `data` must be hashable. Returns: The input `data` but inside a set instead of whatever other container type used to hold it. """ if data is None: return set() if not isinstance(data, set): if isinstance(data, (tuple, list, KeysView)): data = set(data) else: data = {data} return data
df2649d0b7c7c2323984edd3eeea76eff0eab4d2
706,839
import mimetypes def get_mimetype(path): """ Get (guess) the mimetype of a file. """ mimetype, _ = mimetypes.guess_type(path) return mimetype
7677259fcdf052f9647fe41e4b4cb71d83ea50cd
706,840
def id_number_checksum(gd): """ Calculates a Swedish ID number checksum, using the Luhn algorithm """ n = s = 0 for c in (gd['year'] + gd['month'] + gd['day'] + gd['serial']): # Letter? It's an interimspersonnummer and we substitute the letter # with 1. if c.isalpha(): c = 1 tmp = ((n % 2) and 1 or 2) * int(c) if tmp > 9: tmp = sum([int(i) for i in str(tmp)]) s += tmp n += 1 if (s % 10) == 0: return 0 return (((s // 10) + 1) * 10) - s
bbf0a9fa7f6ed2c2bfc414173fd2ac9e9c1d8835
706,841
def del_none(d): """ Delete dict keys with None values, and empty lists, recursively. """ for key, value in d.items(): if value is None or (isinstance(value, list) and len(value) == 0): del d[key] elif isinstance(value, dict): del_none(value) return d
46cf9e331c633f5f69b980f3b10c96306d3478c2
706,842
def get_last_position(fit, warmup=False): """Parse last position from fit object Parameters ---------- fit : StanFit4Model warmup : bool If True, returns the last warmup position, when warmup has been done. Otherwise function returns the first sample position. Returns ------- list list contains a dictionary of last draw from each chain. """ fit._verify_has_samples() positions = [] extracted = fit.extract(permuted=False, pars=fit.model_pars, inc_warmup=warmup) draw_location = -1 if warmup: draw_location += max(1, fit.sim["warmup"]) chains = fit.sim["chains"] for i in range(chains): extract_pos = {key : values[draw_location, i] for key, values in extracted.items()} positions.append(extract_pos) return positions
28ec10c4f90ac786053334f593ffd3ade27b1fc5
706,847
import importlib def _version(lib_name): """ Returns the version of a package. If version cannot be determined returns "available" """ lib = importlib.import_module(lib_name) if hasattr(lib, "__version__"): return lib.__version__ else: return "available"
cec49d2de66d2fc3a7ed3c89259711bdf40bbe8e
706,850
from pathlib import Path def mkdir(path_str): """ Method to create a new directory or directories recursively. """ return Path(path_str).mkdir(parents=True, exist_ok=True)
1621fd5f4d74b739de0b17933c1804faabf44a2f
706,851
def horner(n,c,x0): """ Parameters ---------- n : integer degree of the polynomial. c : float coefficients of the polynomial. x0 : float where we are evaluating the polynomial. Returns ------- y : float the value of the function evaluated at x0. z : float the value of the derivative evaluated at x0. """ y=c[n] z=c[n] for i in range(n-1,0,-1): y= x0*y+c[i] z=x0*z+y y=x0*y+c[0] #this computes the b0 return y,z
adf3f3772d12d5bed0158045ad480cee8454cb5c
706,852
import gzip def _compression_safe_opener(fname): """Determine whether to use *open* or *gzip.open* to read the input file, depending on whether or not the file is compressed. """ f = gzip.open(fname, "r") try: f.read(1) opener = gzip.open except IOError: opener = open finally: f.close() return opener
4c44da2ae15c63ccd6467e6e893a3c590c20a7e9
706,854
from typing import Iterable from typing import Any from typing import Iterator import itertools def prepend( iterable: Iterable[Any], value: Any, *, times: int = 1, ) -> Iterator[Any]: """Return an iterator with a specified value prepended. Arguments: iterable: the iterable to which the value is to be prepended value: the value to prepend to the iterable Keyword Arguments: times: number of times to prepend the value (optional; default is 1) Returns: iterator prepending the specified value(s) to the items of the iterable Examples: >>> list(prepend(range(5), -1)) [-1, 0, 1, 2, 3, 4] >>> list(prepend(['off to work we go'], 'hi ho', times=2)) ['hi ho', 'hi ho', 'off to work we go'] """ return itertools.chain([value] * times, iterable)
659bc3616238f5e40865505c006c1369f20e33d3
706,856
def _with_extension(base: str, extension: str) -> str: """ Adds an extension to a base name """ if "sus" in base: return f"{extension}{base}" else: return f"{base}{extension}"
5a1253763808127f296c3bcb04c07562346dea2d
706,857
def fake_execute_default_reply_handler(*ignore_args, **ignore_kwargs): """A reply handler for commands that haven't been added to the reply list. Returns empty strings for stdout and stderr. """ return '', ''
e73bd970030c4f78aebf2913b1540fc1b370d906
706,860
def empty_items(item_list, total): """ Returns a list of null objects. Useful when you want to always show n results and you have a list of < n. """ list_length = len(item_list) expected_total = int(total) if list_length != expected_total: return range(0, expected_total-list_length) return ''
12848fe61457b2d138a2fcd074fb6ec6d09cbaf5
706,861
import struct def _read_string(fp): """Read the next sigproc-format string in the file. Parameters ---------- fp : file file object to read from. Returns ------- str read value from the file """ strlen = struct.unpack("I", fp.read(struct.calcsize("I")))[0] return fp.read(strlen).decode()
346a65e6be15f593c91dde34cb45c53cb5731877
706,862
def make_attrstring(attr): """Returns an attribute string in the form key="val" """ attrstring = ' '.join(['%s="%s"' % (k, v) for k, v in attr.items()]) return '%s%s' % (' ' if attrstring != '' else '', attrstring)
fbaf2b763b4b1f4399c45c3a19698d0602f0b224
706,863
from typing import Callable from typing import Iterable from typing import List def get_index_where(condition: Callable[..., bool], iterable: Iterable) -> List[int]: """Return index values where `condition` is `True`.""" return [idx for idx, item in enumerate(iterable) if condition(item)]
6f99086730dfc2ab1f87df90632bc637fc6f2b93
706,864
from typing import Sequence def argmax(sequence: Sequence) -> int: """Find the argmax of a sequence.""" return max(range(len(sequence)), key=lambda i: sequence[i])
58cc1d0e952a7f15ff3fca721f43c4c658c41de1
706,866
def read_data_from_device(device, location): """ Reads text data from device and returns it as output Args: location ('str'): Path to the text file Raises: FileNotFoundError: File Does not Exist Returns: Data ('str'): Text data read from the device """ # IMPORTANT # ========= # This API does not require the device to have network connection # copy_from_device is the other API that behaves similar to this one, # but it requires network connection since it uses SCP try: return device.execute("cat {}".format(location)) except Exception: # Throw file not found error when encounter generic error raise FileNotFoundError("File {} does not exist.".format(location))
f6895d25f9f9e68ec33bb2d8f693999a7e3a2812
706,867
from typing import Set def tagify(tail=u'', head=u'', sep=u'.'): """ Returns namespaced event tag string. Tag generated by joining with sep the head and tail in that order head and tail may be a string or a list, tuple, or Set of strings If head is a list, tuple or Set Then join with sep all elements of head individually Else join in whole as string prefix If tail is a list, tuple or Set Then join with sep all elements of tail individually Else join in whole as string suffix If either head or tail is empty then do not exhibit in tag """ if isinstance(head, (list, tuple, Set)): # list like so expand parts = list(head) else: # string like so put in list parts = [head] if isinstance(tail, (list, tuple, Set)): # listlike so extend parts parts.extend(tail) else: # string like so append parts.append(tail) return sep.join([part for part in parts if part])
ddebdc0c4224db428a4338fd1e4c61137ac2d5c5
706,869
def classify_design_space(action: str) -> int: """ The returning index corresponds to the list stored in "count": [sketching, 3D features, mating, visualizing, browsing, other organizing] Formulas for each design space action: sketching = "Add or modify a sketch" + "Copy paste sketch" 3D features = "Commit add or edit of part studio feature" + "Delete part studio feature" - "Add or modify a sketch" mating = "Add assembly feature" + "Delete assembly feature" + "Add assembly instance" + "Delete assembly instance" visualizing = "Start assembly drag" + "Animate action called" browsing = Opening a tab + Creating a tab + Deleting a tab + Renaming a tab other organizing = "Create version" + "Cancel Operation" + "Undo Redo Operation" + "Merge branch" + "Branch workspace" + "Update version" :param action: the action to be classified :return: the index of the action type that this action is accounted for; if the action does not belong to any category, return -1 Note: "Add or modify a sketch" is special (+1 for sketching and -1 for 3D features), return -10 """ # Creating a sketch is special as it affects both the sketching and the 3D features counts if action == "Add or modify a sketch": return -10 # Sketching elif action == "Copy paste sketch": return 0 # 3D features elif action in ["Commit add or edit of part studio feature", "Delete part studio feature"]: return 1 # Mating elif action in ["Add assembly feature", "Delete assembly feature", "Add assembly instance" "Delete assembly instance"]: return 2 # Visualizing elif action in ["Start assembly drag", "Animate action called"]: return 3 # Browsing elif "Tab" in action and ("opened" in action or "created" in action or "deleted" in action or "renamed" in action): return 4 # Other organizing elif action in ["Create version", "Cancel Operation", "Undo Redo Operation", "Merge branch", "Branch workspace", "Update version"]: return 5 # Not classified (Optional: print out the unclassified actions) else: return -1
22dc68aa23258691b0d4b9f1b27a9e8451b275d9
706,870
import hashlib def get_sha256_hash(plaintext): """ Hashes an object using SHA256. Usually used to generate hash of chat ID for lookup Parameters ---------- plaintext: int or str Item to hash Returns ------- str Hash of the item """ hasher = hashlib.sha256() string_to_hash = str(plaintext) hasher.update(string_to_hash.encode('utf-8')) hash = hasher.hexdigest() return hash
79735973b8ad73823662cc428513ef393952b681
706,871
def jaccard(list1, list2): """calculates Jaccard distance from two networks\n | Arguments: | :- | list1 (list or networkx graph): list containing objects to compare | list2 (list or networkx graph): list containing objects to compare\n | Returns: | :- | Returns Jaccard distance between list1 and list2 """ intersection = len(list(set(list1).intersection(list2))) union = (len(list1) + len(list2)) - intersection return 1- float(intersection) / union
1056c3d5a592bea9a575c24e947a91968b931000
706,874
def default_argument_preprocessor(args): """Return unmodified args and an empty dict for extras""" extras = {} return args, extras
2031dde70dbe54beb933e744e711a0bf8ecaed99
706,875
def readFPs(filepath): """Reads a list of fingerprints from a file""" try: myfile = open(filepath, "r") except: raise IOError("file does not exist:", filepath) else: fps = [] for line in myfile: if line[0] != "#": # ignore comments line = line.rstrip().split() fps.append(line[0]) return fps
96d483360c411a27a3b570875f61344ef4dae573
706,883
import math def point_in_ellipse(origin, point, a, b, pa_rad, verbose=False): """ Identify if the point is inside the ellipse. :param origin A SkyCoord defining the centre of the ellipse. :param point A SkyCoord defining the point to be checked. :param a The semi-major axis in arcsec of the ellipse :param b The semi-minor axis in arcsec of the ellipse :param pa_rad The position angle of the ellipse. This is the angle of the major axis measured in radians East of North (or CCW from the y axis). """ # Convert point to be in plane of the ellipse, accounting for distortions at high declinations p_ra_dist = (point.icrs.ra.degree - origin.icrs.ra.degree)* math.cos(origin.icrs.dec.rad) p_dec_dist = point.icrs.dec.degree - origin.icrs.dec.degree # Calculate the angle and radius of the test opoint relative to the centre of the ellipse # Note that we reverse the ra direction to reflect the CCW direction radius = math.sqrt(p_ra_dist**2 + p_dec_dist**2) diff_angle = (math.pi/2 + pa_rad) if p_dec_dist == 0 else math.atan(p_ra_dist / p_dec_dist) - pa_rad # Obtain the point position in terms of the ellipse major and minor axes minor = radius * math.sin(diff_angle) major = radius * math.cos(diff_angle) if verbose: print ('point relative to ellipse centre angle:{} deg radius:{:.4f}" maj:{:.2f}" min:{:.2f}"'.format(math.degrees(diff_angle), radius*3600, major*3600, minor*3600)) a_deg = a / 3600.0 b_deg = b / 3600.0 # Calc distance from origin relative to a and b dist = math.sqrt((major / a_deg) ** 2 + (minor / b_deg) ** 2) if verbose: print("Point %s is %f from ellipse %f, %f, %f at %s." % (point, dist, a, b, math.degrees(pa_rad), origin)) return round(dist,3) <= 1.0
9c4b056c205b8d25e80211adb0eeb1cdfaf4c11c
706,885
def isNumberString(value): """ Checks if value is a string that has only digits - possibly with leading '+' or '-' """ if not value: return False sign = value[0] if (sign == '+') or (sign == '-'): if len(value) <= 1: return False absValue = value[1:] return absValue.isdigit() else: if len(value) <= 0: return False else: return value.isdigit()
06feaab112e184e6a01c2b300d0e4f1a88f2250e
706,886
from typing import Union from typing import Dict from typing import Any from typing import List def _func_length(target_attr: Union[Dict[str, Any], List[Any]], *_: Any) -> int: """Function for returning the length of a dictionary or list.""" return len(target_attr)
b66a883c763c93d9a62a7c09324ab8671d325d05
706,887
def configuration_filename(feature_dir, proposed_splits, split, generalized): """Calculates configuration specific filenames. Args: feature_dir (`str`): directory of features wrt to dataset directory. proposed_splits (`bool`): whether using proposed splits. split (`str`): train split. generalized (`bool`): whether GZSL setting. Returns: `str` containing arguments in appropriate form. """ return '{}{}_{}{}.pt'.format( feature_dir, ('_proposed_splits' if proposed_splits else ''), split, '_generalized' if generalized else '', )
a3fc2c23746be7ed17f91820dd30a8156f91940c
706,888
def _fileobj_to_fd(fileobj): """Return a file descriptor from a file object. Parameters: fileobj -- file object or file descriptor Returns: corresponding file descriptor Raises: ValueError if the object is invalid """ if isinstance(fileobj, int): fd = fileobj else: try: fd = int(fileobj.fileno()) except (AttributeError, TypeError, ValueError): raise ValueError('Invalid file object: {!r}'.format(fileobj) ) from None if fd < 0: raise ValueError('Invalid file descriptor: {}'.format(fd)) return fd
8b1bea4083c0ecf481c712c8b06c76257cea43db
706,890
import base64 def hex_to_base64(hex_): """ Converts hex string to base64 """ return base64.b64encode(bytes.fromhex(hex_))
26f42b25c9e804bc1b786aadab033db104882f4b
706,891
import math def hard_negative_mining(loss, labels, neg_pos_ratio=3): """ 用于训练过程中正负例比例的限制.默认在训练时,负例数量是正例数量的三倍 Args: loss (N, num_priors): the loss for each example. labels (N, num_priors): the labels. neg_pos_ratio: 正负例比例: 负例数量/正例数量 """ pos_mask = labels > 0 num_pos = pos_mask.long().sum(dim=1, keepdim=True) num_neg = num_pos * neg_pos_ratio loss[pos_mask] = -math.inf # 无穷 # 两次sort 找出元素在排序中的位置 _, indexes = loss.sort(dim=1, descending=True) # descending 降序 ,返回 value,index _, orders = indexes.sort(dim=1) neg_mask = orders < num_neg # loss 降序排, 背景为-无穷, 选择排前num_neg的 负无穷,也就是 背景 return pos_mask | neg_mask
3b2e38ab2b0bbd9732fceafdfd023ea220b3c5eb
706,894
def find_routes(paths) -> list: """returns routes as tuple from path as list\ like 1,2,3 --> (1,2)(2,3)""" routes = [] for path in paths: for i in range(len(path)): try: route = (path[i], path[i + 1]) if route not in routes: routes.append(route) except IndexError: pass return routes
67fb8eb575dd45879f5e5b465a7886f2a2387b26
706,898
def deploy_tester_contract( web3, contracts_manager, deploy_contract, contract_deployer_address, get_random_address, ): """Returns a function that can be used to deploy a named contract, using conract manager to compile the bytecode and get the ABI""" def f(contract_name, libs=None, args=None): json_contract = contracts_manager.get_contract(contract_name) contract = deploy_contract( web3, contract_deployer_address, json_contract['abi'], json_contract['bin'], args, ) return contract return f
ee925e9632f3bfd66a843d336bd287c92543b2ed
706,899
def mil(val): """convert mil to mm""" return float(val) * 0.0254
9071b0116a7062ef93d6bee56a08db2b9bec906a
706,901
def ask_number(question, low, high): """Poproś o podanie liczby z określonego zakresu.""" response = None while type(response) != int: try: response = int(input(question)) while response not in range(low, high): response = int(input(question)) except ValueError: print("Value must be a number") return response
fdae37e6a0cd34d36b647a23f4a0f58cad46680a
706,902
from pathlib import Path def input_file_path(directory: str, file_name: str) -> Path: """Given the string paths to the result directory, and the input file return the path to the file. 1. check if the input_file is an absolute path, and if so, return that. 2. if the input_file is a relative path, combine it with the result_directory and return that. The resultant path must exist and be a file, otherwise raise an FileNotFoundException. """ path_to_file = Path(file_name) if path_to_file.is_absolute() and path_to_file.is_file(): return path_to_file input_directory_path = Path(directory) path_to_file = input_directory_path / path_to_file if path_to_file.is_file(): return path_to_file.resolve() else: raise FileNotFoundError( 'did not find the input file using result_directory={directory}, input_file={input_file}'.format( directory=directory, input_file=file_name ) )
dd866a5f8b6f776238269844d64686f7fb28347c
706,904
def ft32m3(ft3): """ft^3 -> m^3""" return 0.028316847*ft3
74f55f722c7e90be3fa2fc1f79f506c44bc6e9bc
706,908
def _calculate_target_matrix_dimension(m, kernel, paddings, strides): """ Calculate the target matrix dimension. Parameters ---------- m: ndarray 2d Matrix k: ndarray 2d Convolution kernel paddings: tuple Number of padding in (row, height) on one side. If you put 2 padding on the left and 2 padding on the right, specify 2. strides: tuple Step size in (row, height) Returns ------- out: tuple Tuple containing (number of rows, number of columns) Raises ------ ValueError If kernel size is greater than m in any axis after padding """ source_height = m.shape[0] source_width = m.shape[1] padding_row = paddings[0] padding_column = paddings[1] kernel_height = kernel.shape[0] kernel_width = kernel.shape[1] if kernel_height > (source_height + padding_row) or kernel_width > (source_width + padding_column): raise ValueError("Kernel size is larger than the matrix") row_stride = strides[0] col_stride = strides[1] # (source_height - kernel_height)/strides[0] is how many steps you can go down. # + 1 to include the start position. target_height = int((source_height + padding_row - kernel_height) / row_stride) + 1 target_width = int((source_width + padding_column - kernel_width) / col_stride) + 1 return (target_height, target_width)
77b5cabd7101b957a27fc422d1ed1715525400a0
706,909
def pretty_duration(seconds): """Return a human-readable string for the specified duration""" if seconds < 2: return '%d second' % seconds elif seconds < 120: return '%d seconds' % seconds elif seconds < 7200: return '%d minutes' % (seconds // 60) elif seconds < 48 * 3600: return '%d hours' % (seconds // 3600) else: return '%d days' % (seconds // (24 * 3600))
8e34addedeeb98e1e028fa9374fcc8c4f134a9f7
706,910
import ipaddress def is_valid_ip(ip: str) -> bool: """ Args: ip: IP address Returns: True if the string represents an IPv4 or an IPv6 address, false otherwise. """ try: ipaddress.IPv4Address(ip) return True except ValueError: try: ipaddress.IPv6Address(ip) return True except ValueError: return False
aa1d3b19828dd8c3dceaaa8d9d1017cc16c1f73b
706,913
def tree_to_newick_rec(cur_node): """ This recursive function is a helper function to generate the Newick string of a tree. """ items = [] num_children = len(cur_node.descendants) for child_idx in range(num_children): s = '' sub_tree = tree_to_newick_rec(cur_node.descendants[child_idx]) if sub_tree != '': s += '(' + sub_tree + ')' s += cur_node.descendants[child_idx].name items.append(s) return ','.join(items)
751d46dbb4e3a5204900601164410b5bf7f0578b
706,915
def indexData_x(x, ukn_words): """ Map each word in the given data to a unique integer. A special index will be kept for "out-of-vocabulary" words. :param x: The data :return: Two dictionaries: one where words are keys and indexes values, another one "reversed" (keys->index, values->words) """ # Retrieve all words used in the data (with duplicates) all_text = [w for e in x for w in e] # Create a DETERMINISTIC set of all words used = set() words = [x for x in all_text if x not in used and (used.add(x) or True)] print("Number of entries: ",len(all_text)) print("Individual entries: ",len(words)) # Assign an integer index for each individual word word2ind = {word: index for index, word in enumerate(words, 2)} ind2word = {index: word for index, word in enumerate(words, 2)} # To deal with out-of-vocabulary words word2ind.update({ukn_words:1}) ind2word.update({1:ukn_words}) # The index '0' is kept free in both dictionaries return word2ind, ind2word
3f6ffd97d33400c3418b78ad3b383766cc07bee3
706,917
def shimizu_mirioka(XYZ, t, a=0.75, b=0.45): """ The Shinizu-Mirioka Attractor. x0 = (0.1,0,0) """ x, y, z = XYZ x_dt = y y_dt = (1 - z) * x - a * y z_dt = x**2 - b * z return x_dt, y_dt, z_dt
60e5b52e1755de8bcc966364d828d47b05af3723
706,918
def pack_bidirectional_lstm_state(state, num_layers): """ Pack the hidden state of a BiLSTM s.t. the first dimension equals to the number of layers. """ assert (len(state) == 2 * num_layers) _, batch_size, hidden_dim = state.size() layers = state.view(num_layers, 2, batch_size, hidden_dim).transpose(1, 2).contiguous() state = layers.view(num_layers, batch_size, -1) return state
de102ce55deceb5ca7211def122dc2767c35cdd3
706,920
from typing import Dict def _build_request_url( base: str, params_dict: Dict[str, str]) -> str: """Returns an URL combined from base and parameters :param base: base url :type base: str :param params_dict: dictionary of parameter names and values :type params_dict: Dict[str, str] :return: a complete url :rtype: str """ parameters = "&".join([f"{k}={v}" for k, v in params_dict.items()]) url = base + "?" + parameters return url
30e27cf55692884be408218403c2f94279516ad2
706,922
def sub_vectors(a, b): """Subtracts two vectors. Args: pos1 (tuple[int]): first position pos1:(tuple[int]): second position Returns: tuple[int]: element wise subtraction Examples: >>> sub_vectors((1,4,6), (1,3,7)) (0, 1, -1) """ return tuple(a[i] - b[i] for i in range(3))
02c35bf46311142a3f3e90cd803d908c6ff63896
706,924
def merge_dictionaries(default_dictionary, user_input_dictionary, path=None): """Merges user_input_dictionary into default dictionary; default values will be overwritten by users input.""" return {**default_dictionary, **user_input_dictionary}
ea600efcd69e920ae536fa2f22a4c883a71d8ad3
706,929
def is_correlated(corr_matrix, feature_pairs, rho_threshold=0.8): """ Returns dict where the key are the feature pairs and the items are booleans of whether the pair is linearly correlated above the given threshold. """ results = {} for pair in feature_pairs: f1, f2 = pair.split("__") corr = corr_matrix[f1][f2] results[pair] = round(corr, 3) >= rho_threshold return results
18afa0cc24f5d9205cde3c8ad23f70d73b5c395b
706,932
def get_bin_values(base_dataset, bin_value): """Gets the values to be used when sorting into bins for the given dataset, from the configured options.""" values = None if bin_value == "results": values = base_dataset.get_output() elif bin_value == "all": # We set all values to 0, assuming single bin will also set its value to 0. values = [0] * base_dataset.get_number_of_samples() else: raise Exception(f"Invalid bin value configured: {bin_value}") return values
cf2419066d6e642e65d9a8747081ebfee417ed64
706,934
async def get_temperatures(obj): """Get temperatures as read by the thermostat.""" return await obj["madoka"].temperatures.query()
b4643d9c40f6aa8953c598dd572d291948ef34a4
706,935
import math def _meters_per_pixel(zoom, lat=0.0, tilesize=256): """ Return the pixel resolution for a given mercator tile zoom and lattitude. Parameters ---------- zoom: int Mercator zoom level lat: float, optional Latitude in decimal degree (default: 0) tilesize: int, optional Mercator tile size (default: 256). Returns ------- Pixel resolution in meters """ return (math.cos(lat * math.pi / 180.0) * 2 * math.pi * 6378137) / ( tilesize * 2 ** zoom )
467d23bd437f153345c67c8c1cab1a086fde4995
706,939
def manhattanDistance( xy1, xy2 ): """Returns the Manhattan distance between points xy1 and xy2""" return abs( xy1[0] - xy2[0] ) + abs( xy1[1] - xy2[1] )
ce0ee21237f253b1af33fbf088292405fd046fe3
706,940
from datetime import datetime def get_datetime_now(t=None, fmt='%Y_%m%d_%H%M_%S'): """Return timestamp as a string; default: current time, format: YYYY_DDMM_hhmm_ss.""" if t is None: t = datetime.now() return t.strftime(fmt)
c4fc830b7ede9d6f52ee81c014c03bb2ef5552dc
706,942
def is_firstline(text, medicine, disease): """Detect if first-line treatment is mentioned with a medicine in a sentence. Use keyword matching to detect if the keywords "first-line treatment" or "first-or second-line treatment", medicine name, and disease name all appear in the sentence. Parameters ---------- text : str A single sentence. medicine : str A medicine's name. Returns ------- bool Return True if the medicine and first-line treatment are mentioned in the sentence, False otherwise. Examples -------- Import the module >>> from biomarker_nlp import biomarker_extraction Example >>> txt = "TECENTRIQ, in combination with carboplatin and etoposide, is indicated for the first-line treatment of adult patients with extensive-stage small cell lung cancer (ES-SCLC)." >>> medicine = "TECENTRIQ" >>> disease = "small cell lung cancer" >>> biomarker_extraction.is_firstline(text = txt, medicine = medicine, disease = disease) True """ text = text.lower() medicine = medicine.lower() disease = disease.lower() if medicine in text and ('first-line treatment' in text or 'first-or second-line treatment' in text) and disease in text: return True else: return False
c9f8a31c6089c4f7545780028ccb1a033372c284
706,943
def get_output_attribute(out, attribute_name, cuda_device, reduction="sum"): """ This function handles processing/reduction of output for both DataParallel or non-DataParallel situations. For the case of multiple GPUs, This function will sum all values for a certain output attribute in various batches together. Parameters --------------------- :param out: Dictionary, output of model during forward pass, :param attribute_name: str, :param cuda_device: list or int :param reduction: (string, optional) reduction to apply to the output. Default: 'sum'. """ if isinstance(cuda_device, list): if reduction == "sum": return out[attribute_name].sum() elif reduction == "mean": return out[attribute_name].sum() / float(len(out[attribute_name])) else: raise ValueError("invalid reduction type argument") else: return out[attribute_name]
c09ff6a3dd4ae2371b1bbec12d4617e9ed6c6e1e
706,948
import collections def _get_ordered_label_map(label_map): """Gets label_map as an OrderedDict instance with ids sorted.""" if not label_map: return label_map ordered_label_map = collections.OrderedDict() for idx in sorted(label_map.keys()): ordered_label_map[idx] = label_map[idx] return ordered_label_map
4c5e56789f57edda61409f0693c3bccb57ddc7cf
706,951
def split_to_sentences(data): """ Split data by linebreak "\n" Args: data: str Returns: A list of sentences """ sentences = data.split('\n') # Additional clearning (This part is already implemented) # - Remove leading and trailing spaces from each sentence # - Drop sentences if they are empty strings. sentences = [s.strip() for s in sentences] sentences = [s for s in sentences if len(s) > 0] return sentences
56540da88e982615e3874ab9f6fd22229a076565
706,956
import warnings def get_integer(val=None, name="value", min_value=0, default_value=0): """Returns integer value from input, with basic validation Parameters ---------- val : `float` or None, default None Value to convert to integer. name : `str`, default "value" What the value represents. min_value : `float`, default 0 Minimum allowed value. default_value : `float` , default 0 Value to be used if ``val`` is None. Returns ------- val : `int` Value parsed as an integer. """ if val is None: val = default_value try: orig = val val = int(val) except ValueError: raise ValueError(f"{name} must be an integer") else: if val != orig: warnings.warn(f"{name} converted to integer {val} from {orig}") if not val >= min_value: raise ValueError(f"{name} must be >= {min_value}") return val
9c967a415eaac58a4a4778239859d1f6d0a87820
706,960
import re def _get_variable_name(param_name): """Get the variable name from the tensor name.""" m = re.match("^(.*):\\d+$", param_name) if m is not None: param_name = m.group(1) return param_name
4f6258667383c80b584054af20ac9a61cf25381f
706,961
def display(choices, slug): """ Get the display name for a form choice based on its slug. We need this function because we want to be able to store ACS data using the human-readable display name for each field, but in the code we want to reference the fields using their slugs, which are easier to change. :param choices: A list of tuples representing Django-style form choices. :param slug: The slug of the choice to select. :return: The display name for the given slug. """ for choice_slug, display_name in choices: if choice_slug == slug: return display_name raise NameError('No choice for for slug {} in {}'.format(slug, str(choices)))
e177fa4596de8a9921d05216d51344e95dce89ab
706,964