content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
from itertools import product def allowed_couplings(coupling, flow, free_id, symmetries): """Iterator over all the allowed Irreps for free_id in coupling if the other two couplings are fixed. """ if len(coupling) != 3: raise ValueError(f'len(coupling) [{len(coupling)}] != 3') if len(flow) != 3: raise ValueError(f'len(flow) [{len(flow)}] != 3') other_ids = [0, 1, 2] other_ids.remove(free_id) other_c = [coupling[o] for o in other_ids] other_f = [flow[o] for o in other_ids] this_f = flow[free_id] def fermionic_constraint(oirr, oflow, tflow): yield sum(oirr) % 2 def U1_constraint(oirr, oflow, tflow): sign = {True: 1, False: -1} yield sign[not tflow] * sum(sign[f] * x for x, f in zip(oirr, oflow)) def pg_constraint(oirr, oflow, tflow): yield oirr[0] ^ oirr[1] def SU2_constraint(oirr, oflow, tflow): return range(abs(oirr[0] - oirr[1]), oirr[0] + oirr[1] + 1, 2) constraint = { 'fermionic': fermionic_constraint, 'U(1)': U1_constraint, 'SU(2)': SU2_constraint, 'seniority': U1_constraint, 'C1': pg_constraint, 'Ci': pg_constraint, 'C2': pg_constraint, 'Cs': pg_constraint, 'D2': pg_constraint, 'C2v': pg_constraint, 'C2h': pg_constraint, 'D2h': pg_constraint } for ncoupling in product(*[constraint[s](c, other_f, this_f) for *c, s in zip(*other_c, symmetries)]): yield ncoupling
1e2d71edc68b8ecebfa3e09eae17e17a381d82b4
707,113
def arrToDict(arr): """ Turn an array into a dictionary where each value maps to '1' used for membership testing. """ return dict((x, 1) for x in arr)
3202aac9a6c091d7c98fd492489dbcf2300d3a02
707,118
def getPercentGC(img, nbpix) : """Determines if a page is in grayscale or colour mode.""" if img.mode != "RGB" : img = img.convert("RGB") gray = 0 for (r, g, b) in img.getdata() : if not (r == g == b) : # optimize : if a single pixel is no gray the whole page is colored. return { "G" : 0.0, "C" : 100.0 } return { "G" : 100.0, "C" : 0.0 }
e8ee682889e0f9284cecfcf57cf260b7056c1879
707,119
def convert_to_dtype(data, dtype): """ A utility function converting xarray, pandas, or NumPy data to a given dtype. Parameters ---------- data: xarray.Dataset, xarray.DataArray, pandas.Series, pandas.DataFrame, or numpy.ndarray dtype: str or numpy.dtype A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g. np.int16, np.float32) to convert the data to. """ if dtype is None: # Don't convert the data type. return data return data.astype(dtype)
ec3130311fe9c136707d5afb8f564b4f89067f4e
707,120
def strip_headers(data): """ Strips headers from data #depreciate""" try: return data['items'] except (TypeError, KeyError) as e: print(e) return data
2eb044e45043f103fff76bfa47007dbcd4aa49c7
707,121
import pytz def str_to_timezone(tz): """ 从字符串构建时区 """ return pytz.timezone(tz) if tz else pytz.utc
02c004171f50ceb4b60272769036634f6778c791
707,122
def find_matching_format_function(word_with_formatting, format_functions): """ Finds the formatter function from a list of formatter functions which transforms a word into itself. Returns an identity function if none exists """ for formatter in format_functions: formatted_word = formatter(word_with_formatting) if word_with_formatting == formatted_word: return formatter return lambda word: word
3d2ce0956de4c8ca0de6d0d21f8bbd718247caff
707,123
import csv from datetime import datetime def convert_to_csv(items): """ Args: items: all arns in a region from the DynamoDB query as a list returns: csv_body: body of the csv file to write out """ fieldnames = ["Package", "Package Version", "Status", "Expiry Date", "Arn"] # sort by package, and then created date (oldest to newest) sorted_items = sorted(items, key=lambda i: (i["pckg"].lower(), i["crtdDt"])) with open("/tmp/packages.csv", "w", newline="") as csvfile: writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() for item in sorted_items: # convert datetime to human readable try: if item["exDt"]: item["exDt"] = datetime.utcfromtimestamp(item["exDt"]).isoformat() except KeyError: item["exDt"] = "" csv_item = { "Package": item["pckg"], "Package Version": item["pckgVrsn"], "Arn": item["arn"], "Status": item["dplySts"], "Expiry Date": item["exDt"], } writer.writerow(csv_item) with open("/tmp/packages.csv", "r") as csvfile: csv_text = csvfile.read() return csv_text
6e651065f06595e9b964bee1b8dab2965e0076f6
707,124
import re def match(text: str, pattern: str) -> bool: """ Match a text against a given regular expression. :param text: string to examine. :param pattern: regular expression. :returns: ``True`` if pattern matches the string. """ return re.match(pattern, text) is not None
a59d71283766c5079e8151e8be49501246218001
707,125
import platform def get_os(): """Get the current operating system. :returns: The OS platform (str). """ return platform.system()
307c6c94573733d900b2e31cfc8bcf3db8b6e5b7
707,126
import time import math def time_since(since, m_padding=2, s_padding=2): """Elapsed time since last record point.""" now = time.time() s = now - since m = math.floor(s / 60) s -= m * 60 return '{}m:{}s'.format(str(int(m)).zfill(m_padding), str(int(s)).zfill(s_padding))
62641b723bf286f54280bb5c6fb1d54c9753907c
707,127
from typing import Tuple import math def euler_to_quaternion(roll: float = 0, pitch: float = 0, yaw: float = 0) -> Tuple[float, float, float, float]: """ Convert Euler to Quaternion Args: roll (float): roll angle in radian (x-axis) pitch (float): pitch angle in radian (y-axis) yaw (float): yaw angle in radian (z-axis) Returns: Tuple[float, float, float, float]: x, y, z, w """ # Abbreviations for the various angular functions cy = math.cos(yaw * 0.5) sy = math.sin(yaw * 0.5) cp = math.cos(pitch * 0.5) sp = math.sin(pitch * 0.5) cr = math.cos(roll * 0.5) sr = math.sin(roll * 0.5) # Quaternion w = cr * cp * cy + sr * sp * sy x = sr * cp * cy - cr * sp * sy y = cr * sp * cy + sr * cp * sy z = cr * cp * sy - sr * sp * cy return x, y, z, w
e8346172f07510c377e14827842eb18f1631402e
707,128
import os def get_dashboard_oauth_client_id(): """Gets the client ID used to authenticate with Identity-Aware Proxy from the environment variable DASHBOARD_OAUTH_CLIENT_ID.""" return os.environ.get('DASHBOARD_OAUTH_CLIENT_ID')
37a0ba23dab00d2e43fc1cbd124713d2231d91a9
707,129
import os def is_running(process): """Returns True if the requested process looks like it's still running""" if not process[0]: return False # The process doesn't exist if process[1]: return process[1].poll() == None try: # check if the process is active by sending a dummy signal os.kill(process[0]['pid'], 0) except ProcessLookupError: return False return True
7dc002da5bbd87c5d8d8745fd49e6723478186c4
707,131
def table_exists(conn, table_name, schema=False): """Checks if a table exists. Parameters ---------- conn A Psycopg2 connection. table_name : str The table name. schema : str The schema to which the table belongs. """ cur = conn.cursor() table_exists_sql = ('select * from information_schema.tables ' f'where table_name={table_name!r}') if schema: table_exists_sql += f' and table_schema={schema!r}' cur.execute(table_exists_sql) return bool(cur.rowcount)
c9b698afbe795a6a73ddfb87b2725c3c4205f35e
707,132
def aggregate_by_player_id(statistics, playerid, fields): """ Inputs: statistics - List of batting statistics dictionaries playerid - Player ID field name fields - List of fields to aggregate Output: Returns a nested dictionary whose keys are player IDs and whose values are dictionaries of aggregated stats. Only the fields from the fields input will be aggregated in the aggregated stats dictionaries. """ players = {} # create nested dict with outer keys of player ids and inner dict of fields for dic in statistics: if dic[playerid] not in players: players[dic[playerid]] = {playerid: dic[playerid]} for field in fields: players[dic[playerid]][field] = 0 # loop through statistics again, incrementing field values for dic in statistics: for field in fields: players[dic[playerid]][field] += int(dic[field]) return players
c137fc8820f8898ebc63c54de03be5b919fed97a
707,133
import pickle def loadStatesFromFile(filename): """Loads a list of states from a file.""" try: with open(filename, 'rb') as inputfile: result = pickle.load(inputfile) except: result = [] return result
cc2f64a977ff030ec6af94d3601c094e14f5b584
707,134
import re def is_mismatch_before_n_flank_of_read(md, n): """ Returns True if there is a mismatch before the first n nucleotides of a read, or if there is a mismatch before the last n nucleotides of a read. :param md: string :param n: int :return is_mismatch: boolean """ is_mismatch = False flank_mm_regex = r"^(\d+).*[ACGT](\d+)$" flank_mm = re.findall(flank_mm_regex,md) if flank_mm: flank_mm = flank_mm[0] if flank_mm[1]: if int(flank_mm[1]) < n: is_mismatch = True if flank_mm[0]: if int(flank_mm[0]) < n: is_mismatch = True return is_mismatch
1e41c67e29687d93855ed212e2d9f683ef8a88d7
707,135
import time def duration(func): """ 计时装饰器 """ def wrapper(*args, **kwargs): print('2') start = time.time() f = func(*args, **kwargs) print(str("扫描完成, 用时 ") + str(int(time.time()-start)) + "秒!") return f return wrapper
c55a941574a92cbe70c9b265eaa39563b91ab45a
707,136
def _read_txt(file_path: str) -> str: """ Read specified file path's text. Parameters ---------- file_path : str Target file path to read. Returns ------- txt : str Read txt. """ with open(file_path) as f: txt: str = f.read() return txt
5f0657ee223ca9f8d96bb612e35304a405d2339e
707,137
def load_data(data_map,config,log): """Collect data locally and write to CSV. :param data_map: transform DataFrame map :param config: configurations :param log: logger object :return: None """ for key,df in data_map.items(): (df .coalesce(1) .write .csv(f'{config["output"]}/{key}', mode='overwrite', header=True)) return None
2b690c4f5970df7f9e98ce22970ce3eb892f15bc
707,139
import logging def _filter_credential_warning(record) -> bool: """Rewrite out credential not found message.""" if ( not record.name.startswith("azure.identity") or record.levelno != logging.WARNING ): return True message = record.getMessage() if ".get_token" in message: if message.startswith("EnvironmentCredential"): print("Attempting to sign-in with environment variable credentials...") if message.startswith("AzureCliCredential"): print("Attempting to sign-in with Azure CLI credentials...") if message.startswith("ManagedIdentityCredential"): print("Attempting to sign-in with Managed Instance credentials...") print("Falling back to interactive logon.") return not message
bc9d2a96ccadfbdb297af86bbdf0f80ab8d2dafa
707,140
def extract_first_value_in_quotes(line, quote_mark): """ Extracts first value in quotes (single or double) from a string. Line is left-stripped from whitespaces before extraction. :param line: string :param quote_mark: type of quotation mark: ' or " :return: Dict: 'value': extracted value; 'remainder': the remainder after extraction 'error' empty string if success or 'syntax' otherwise; """ line = line.lstrip() result = {'value': '', 'remainder': line, 'error': 'syntax'} if len(line) < 2: return result if line[0] != quote_mark: return result next_qm_pos = line.find(quote_mark, 1) if next_qm_pos == -1: return result result['value'] = line[1:next_qm_pos] result['remainder'] = line[next_qm_pos + 1:] result['error'] = '' return result
4f614cbbb3a1a04ece0b4da63ea18afb32c1c86b
707,141
import yaml def read_login_file(): """ Parse the credentials file into username and password. Returns ------- dict """ with open('.robinhood_login', 'r') as login_file: credentials = yaml.safe_load(login_file) return credentials
16ef8a74c9523ac0809e80995069c3bbc0e8f8c0
707,142
def format_dependency(dependency: str) -> str: """Format the dependency for the table.""" return "[coverage]" if dependency == "coverage" else f"[{dependency}]"
981a38074dbfb1f332cc49bce2c6d408aad3e9e2
707,143
import os import csv def map_pao1_genes(gene_list): """Takes a list of PAO1 genes and returns the corresponding PA14 names.""" pa14_pao1_mapping = dict() mapping_path = os.path.join(os.getcwd(), 'data', 'ortholuge_pa14_to_pao1_20190708.tsv') with open(mapping_path) as mapping: reader = csv.reader(mapping, delimiter='\t') for row in reader: pa14_pao1_mapping[row[4]] = row[10] pa14_genes = [pa14_pao1_mapping[gene] for gene in gene_list if gene in pa14_pao1_mapping.keys()] return pa14_genes
675cf26d259bee1f6ff148f1a4ad2a71b8253ef5
707,144
def get_counts_by_domain(df): """ Parameters: df (pandas.Dataframe) - form of `get_counts_df` output Returns: pandas.Dataframe """ columns = ['study', 'study_label', 'domain_code', 'domain_label'] df2 = df.groupby(columns, as_index=False)[["count", "subjects"]].max() return df2
544aaa734858209c36c84d87bb6beb05761a5194
707,145
def plot(ax, x, y): """Plot """ return ax._plot(x, y)
90cc2616d21e3c1239524437f653f85602c1984b
707,146
def horizontal_plate_natual_convection_2(Gr, Pr): """hot side downward, or cold side upward """ """ 1e5 < Ra < 1e10 """ Ra = Gr * Pr return 0.27 * Ra**0.25
bc44118e871e977a7ecb6a877f7232b837d1bf0e
707,147
def _unpickle_injected_object(base_class, mixin_class, class_name=None): """ Callable for the pickler to unpickle objects of a dynamically created class based on the InjectableMixin. It creates the base object from the original base class and re-injects the mixin class when unpickling an object. :param type base_class: The base class of the pickled object before adding the mixin via injection. :param type mixin_class: The :class:`InjectableMixin` subclass that was injected into the pickled object. :param str class_name: The class name of the pickled object's dynamically created class. :return: The initial unpickled object (before the pickler restores the object's state). """ obj = base_class.__new__(base_class, ()) return mixin_class.inject_into_object(obj, class_name)
1821509506ad31dcdb21f07a2b83c544ff3c3eb3
707,148
from pathlib import Path import re def parse_endfblib(libdir): """Parse ENDF/B library Parametres: ----------- libdir : str directory with ENDFB file structure""" filepaths = [] nuclidnames = [] endf_dir = Path(libdir) neutron_files = tuple((endf_dir / "neutrons").glob("*endf")) for n in neutron_files: filepaths.append(n.absolute()) nuclidnames.append(n.name.split('_')[1] + re.split("^0*", n.name.split('_')[2][:-5])[-1]) return nuclidnames, filepaths
3587b849132e4b2eeb6ad184bf58755340473bd9
707,149
import colorsys def hsl_to_rgb(hsl): """Convert hsl colorspace values to RGB.""" # Convert hsl to 0-1 ranges. h = hsl[0] / 359. s = hsl[1] / 100. l = hsl[2] / 100. hsl = (h, s, l) # returns numbers between 0 and 1 tmp = colorsys.hls_to_rgb(h, s, l) # convert to 0 to 255 r = int(round(tmp[0] * 255)) g = int(round(tmp[1] * 255)) b = int(round(tmp[2] * 255)) return (r, g, b)
4417ce8468e71b7139b57fe270809c7030b2c3df
707,151
def find_external_nodes(digraph): """Return a set of external nodes in a directed graph. External nodes are node that are referenced as a dependency not defined as a key in the graph dictionary. """ external_nodes = set() for ni in digraph: for nj in digraph[ni]: if nj not in digraph: external_nodes.add(nj) return external_nodes
de63af1b649e450214907dd704bde782820d393d
707,152
import six def strip(val): """ Strip val, which may be str or iterable of str. For str input, returns stripped string, and for iterable input, returns list of str values without empty str (after strip) values. """ if isinstance(val, six.string_types): return val.strip() try: return list(filter(None, map(strip, val))) except TypeError: return val
893986e69f6d64167f45daf30dacb72f4b7f2bff
707,153
import math def tau_polinomyal_coefficients(z): """ Coefficients (z-dependent) for the log(tau) formula from Raiteri C.M., Villata M. & Navarro J.F., 1996, A&A 315, 105-115 """ log_z = math.log10(z) log_z_2 = log_z ** 2 a0 = 10.13 + 0.07547 * log_z - 0.008084 * log_z_2 a1 = -4.424 - 0.7939 * log_z - 0.1187 * log_z_2 a2 = 1.262 + 0.3385 * log_z + 0.05417 * log_z_2 return [a0, a1, a2]
ebef7d773eeb400ef87553fc5838ee2cb97d0669
707,154
def get_all_playlist_items(playlist_id, yt_client): """ Get a list of video ids of videos currently in playlist """ return yt_client.get_playlist_items(playlist_id)
c7a8cc806b552b1853eba1d8223aa00225d5539e
707,155
def get_library_isotopes(acelib_path): """ Returns the isotopes in the cross section library Parameters ---------- acelib_path : str Path to the cross section library (i.e. '/home/luke/xsdata/endfb7/sss_endfb7u.xsdata') Returns ------- iso_array: array array of isotopes in cross section library: """ lib_isos_list = [] with open(acelib_path, 'r') as f: lines = f.readlines() for line in lines: iso = line.split()[0] lib_isos_list.append(iso) return lib_isos_list
d93d319b84c02b8156c5bad0998f5943a5bbe8ae
707,156
import json def odict_to_json(odict): """ Dump an OrderedDict into JSON series """ json_series = json.dumps(odict) return json_series
d18a4e0f0d11a2c529edb395671052f15ad8071d
707,157
def encode_data(data): """ Helper that converts :class:`str` or :class:`bytes` to :class:`bytes`. :class:`str` are encoded with UTF-8. """ # Expect str or bytes, return bytes. if isinstance(data, str): return data.encode('utf-8') elif isinstance(data, bytes): return data else: raise TypeError("data must be bytes or str")
3cd54389719439e8f18cf02b110af07799c946b5
707,158
def _get_bit(h, i): """Return specified bit from string for subsequent testing""" h1 = int.from_bytes(h, 'little') return (h1 >> i) & 0x01
b9b672c87b35369dc86abec7005dfeed3e99eb67
707,159
def go_down_right_reward(nobs, high_pos, agent_num, act): """ Return a reward for going to the low or right side of the board :param nobs: The current observation :param high_pos: Tuple of lowest and most-right position :param agent_num: The id of the agent to check (0-3) :return: The reward for going down or right """ # only give rewards if a new highest point is reached bomb_bonus = 0 if act[agent_num] == 5: bomb_bonus = 0.00 if nobs[agent_num]['position'][0] > high_pos[0]: return 1 + bomb_bonus, (nobs[agent_num]['position'][0], high_pos[1]) elif nobs[agent_num]['position'][1] > high_pos[1]: return 1 + bomb_bonus, (high_pos[0], nobs[agent_num]['position'][1]) else: return 0 + bomb_bonus, high_pos
bd8c6f01b55e14cc498cc251b1c0cc92340506c7
707,160
def make_sid_cookie(sid, uri): """Given a sid (from a set-cookie) figure out how to send it back""" # sometime near 0.92, port got dropped... # uritype, uribody = urllib.splittype(uri) # host, path = urllib.splithost(uribody) # host, port = urllib.splitnport(host) # if port == -1: # port = dict(http=80, https=443)[uritype] # we want to throw here cookiename = "JIFTY_SID_HIVEMINDER" return "%s=%s" % (cookiename, sid)
d194bcb8f47acfbbab9d7405ff9a23069b74f077
707,161
def identity_filter(element_tuple): """ element_tuple est consitute des (name, attrs) de chaque element XML recupere par la methode startElement """ return element_tuple
c50208f345f40acce58df86cdae4432aae24cf4b
707,162
from typing import Any def safe_string(value: Any) -> str: """ Consistently converts a value to a string. :param value: The value to stringify. """ if isinstance(value, bytes): return value.decode() return str(value)
0ba8dcfe028ac6c45e0c17f9ba02014c2f746c4d
707,163
from typing import Union def tp(selector:Union[str, tuple]="@s", selector2:Union[str, tuple]=("~", "~", "~")): """ selector:Union[str, tuple] -> The position to be moved from selector2:Union[str, tuple] -> The position to be moved to """ if not ((isinstance(selector, str) or isinstance(selector, tuple)) and (isinstance(selector2, str) or isinstance(selector2, tuple))): return "## Tp command hasn't been configured properly ##" if isinstance(selector, tuple): if len(selector) < 3: selector = ("~", "~", "~") return f"tp {selector[0]} {selector[1]} {selector[2]}\n" else: if isinstance(selector2, tuple): if len(selector2) < 3: selector2 = ("~", "~", "~") return f"tp {selector} {selector2[0]} {selector2[1]} {selector2[2]}\n" else: return f"tp {selector} {selector2}\n"
81b3baf308f412bae3718fe165028a970fe56bda
707,164
from typing import List def count_short_tail_keywords(keywords: List[str]) -> int: """ Returns the count of short tail keywords in a list of keywords. Parameters: keywords (List[str]): list with all keywords as strings. Returns: total (int): count of short tail keywords (1 o 2 words per keyword). """ total = 0 for keyword in keywords: keyword_list = keyword.split() if len(keyword_list) > 1 and len(keyword_list) < 3: total += 1 return total
1af42d71be75d9279584a8c3edc090a39ec6cf77
707,165
def is_odd(number): """Determine if a number is odd.""" if number % 2 == 0: return False else: return True
4efe5114f2e25431808492c768abc0f750e63225
707,166
def fmt_quil_str(raw_str): """Format a raw Quil program string Args: raw_str (str): Quil program typed in by user. Returns: str: The Quil program with leading/trailing whitespace trimmed. """ raw_quil_str = str(raw_str) raw_quil_str_arr = raw_quil_str.split('\n') trimmed_quil_str_arr = [qs.strip() for qs in raw_quil_str_arr] trimmed_quil_str = '\n'.join([x for x in trimmed_quil_str_arr]) return trimmed_quil_str
e95c26f3de32702d6e44dc09ebbd707da702d964
707,167
from typing import Optional from typing import Callable from typing import Literal def _not_json_encodable(message: str, failure_callback: Optional[Callable[[str], None]]) -> Literal[False]: """ Utility message to fail (return `False`) by first calling an optional failure callback. """ if failure_callback: failure_callback(message) return False
6979261a5f14a32c1ae34d01bad346344f38ed14
707,168
def bitwise_dot(x, y): """Compute the dot product of two integers bitwise.""" def bit_parity(i): n = bin(i).count("1") return int(n % 2) return bit_parity(x & y)
074b09a92e3e697eb08b8aaefa6ffd05d58698f4
707,169
import os def get_env(env_name: str) -> str: """ Safely read an environment variable. Raises errors if it is not defined or it is empty. :param env_name: the name of the environment variable :return: the value of the environment variable """ if env_name not in os.environ: raise KeyError(f"{env_name} not defined") env_value: str = os.environ[env_name] if not env_value: raise ValueError(f"{env_name} has yet to be configured") return env_value
742a251561e02f59da667d8ebc586d5e0b399103
707,170
def validate_mash(seq_list, metadata_reports, expected_species): """ Takes a species name as a string (i.e. 'Salmonella enterica') and creates a dictionary with keys for each Seq ID and boolean values if the value pulled from MASH_ReferenceGenome matches the string or not :param seq_list: List of OLC Seq IDs :param metadata_reports: Dictionary retrieved from get_combined_metadata() :param expected_species: String containing expected species :return: Dictionary with Seq IDs as keys and True/False as values """ seq_status = {} for seqid in seq_list: print('Validating MASH reference genome for {} '.format(seqid)) df = metadata_reports[seqid] observed_species = df.loc[df['SeqID'] == seqid]['MASH_ReferenceGenome'].values[0] if observed_species == expected_species: seq_status[seqid] = True else: seq_status[seqid] = False return seq_status
9eb4fd6e1f156a4fed3cc0be0c5b7153a05b038b
707,171
def redirect_to_url(url): """ Return a bcm dictionary with a command to redirect to 'url' """ return {'mode': 'redirect', 'url': url}
01e4deb80bbd8f8e119c99d64001866c6cd644d9
707,172
def sqrt(number): """ Calculate the floored square root of a number Args: number(int): Number to find the floored squared root Returns: (int): Floored Square Root """ assert number >= 0, 'Only square root of positive numbers are valid' start = 0 end = number res = None while start <= end: middle = (start + end) // 2 square = middle ** 2 next_square = (middle + 1) ** 2 if square <= number and next_square > number: res = middle break if square > number: end = middle - 1 else: start = middle + 1 return res
7ed4d547e0dbabebff7ffdf1e368817a415cbb9e
707,173
from typing import Mapping from typing import OrderedDict def walk_json(d, func): """ Walk over a parsed JSON nested structure `d`, apply `func` to each leaf element and replace it with result """ if isinstance(d, Mapping): return OrderedDict((k, walk_json(v, func)) for k, v in d.items()) elif isinstance(d, list): return [walk_json(v, func) for v in d] else: return func(d)
cc977f4cf3eaec03bd591fa4cd1e44ab5717caee
707,174
def sum_fn(xnum, ynum): """ A function which performs a sum """ return xnum + ynum
61a1ae2e4b54348b9e3839f7f2779edd03f181df
707,176
def matlabize(s): """Make string s suitable for use as a MATLAB function/script name""" s = s.replace(' ', '_') s = s.replace('.', '_') s = s.replace('-', '_') assert len(s) <= 63 # MATLAB function/script name length limitation return s
5dccb9497a3ee28dae5fb7de6e15a1fa02f144cf
707,177
def get_spec_res(z=2.2, spec_res=2.06, pix_size=1.8): """ Calculates the pixel size (pix_size) and spectral resolution (spec_res) in km/s for the MOCK SPECTRA. arguments: z, redshift. spec_res, spectral resoloution in Angst. pixel_size in sngst. returns: (pixel_size, spec_res) in km/s """ # conversion factor from Angstrom to km/s at any redshift conv_fac = 3e5*0.000823/(1+z) return(pix_size*conv_fac, spec_res*conv_fac)
597db8ce00c071624b0877fe211ab9b01ec889de
707,178
def user_enter_state_change_response(): """ Prompts the user to enter a key event response. nothing -> str """ return input('>> ')
22da5cb99fa603c3dff04e8afd03cb9fae8210cd
707,179
def overlap(a, b): """check if two intervals overlap. Positional arguments: a -- First interval. b -- Second interval. """ return a[1] > b[0] and a[0] < b[1]
88860f46a94eb53f1d6f636211916dd828c83550
707,180
def decipher(string, key, a2i_dict, i2a_dict): """ This function is BASED on https://github.com/jameslyons/pycipher """ key = [k.upper() for k in key] ret = '' for (i, c) in enumerate(string): i = i % len(key) ret += i2a_dict[(a2i_dict[c] - a2i_dict[key[i]]) % len(a2i_dict)] return ret
a414892f8ccf18ab5d3189b662b284939c931382
707,181
import platform def get_default_command() -> str: """get_default_command returns a command to execute the default output of g++ or clang++. The value is basically `./a.out`, but `.\a.exe` on Windows. The type of return values must be `str` and must not be `pathlib.Path`, because the strings `./a.out` and `a.out` are different as commands but same as a path. """ if platform.system() == 'Windows': return r'.\a.exe' return './a.out'
d06abdefab189f9c69cba70d9dab25ce83bebc75
707,182
def object_type(r_name): """ Derives an object type (i.e. ``user``) from a resource name (i.e. ``users``) :param r_name: Resource name, i.e. would be ``users`` for the resource index URL ``https://api.pagerduty.com/users`` :returns: The object type name; usually the ``type`` property of an instance of the given resource. :rtype: str """ if r_name.endswith('ies'): # Because English return r_name[:-3]+'y' else: return r_name.rstrip('s')
b74e373691edf8a8b78c2a3ff5d7b9666504330a
707,183
def convert_to_roman_numeral(number_to_convert): """ Converts Hindi/Arabic (decimal) integers to Roman Numerals. Args: param1: Hindi/Arabic (decimal) integer. Returns: Roman Numeral, or an empty string for zero. """ arabic_numbers = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1) roman_numerals = ('M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I') result = "" for index, arabic_number in enumerate(arabic_numbers): count = int(number_to_convert / arabic_number) result += roman_numerals[index] * count number_to_convert -= arabic_numbers[index] * count return result
f970517a7c2d1ceb13ec025d6d446499ce5c21ff
707,184
def _from_atoms_and_bonds(atm_dct, bnd_dct): """ Construct a molecular graph from atom and bond dictionaries. format: gra = (atm_dct, bnd_dct) :param atm_dct: atom dictionary :type atm_dct: dict :param bnd_dct: bond dictionary :type bnd_dct: dict :rtype: (dict, dict) """ atm_dct = dict(atm_dct) bnd_dct = dict(bnd_dct) atm_keys = set(atm_dct.keys()) bnd_keys = set(bnd_dct.keys()) assert all(bnd_key <= atm_keys for bnd_key in bnd_keys) return (atm_dct, bnd_dct)
0bd2d37442e2a141d9a0f81f77b6b45c5b82c06a
707,185
import textwrap def public_key(): """ returns public key """ return textwrap.dedent(''' -----BEGIN RSA PUBLIC KEY----- MIIBCgKCAQEAwBLTc+75h13ZyLWlvup0OmbhZWxohLMMFCUBClSMxZxZdMvyzBnW +JpOQuvnasAeTLLtEDWSID0AB/EG68Sesr58Js88ORUw3VrjObiG15/iLtAm6hiN BboTqd8jgWr1yC3LfNSKJk82qQzHJPlCO9Gc5HcqvWrIrqrJL2kwjOU66U/iRxJu dyOrz0sBkVgfwDBqNS96L0zFQCqk70w9KyOJqe4JNJUtBas6lbwgChDU4/B3BDW5 PYJy2Pp8MSs2n1lhrUkXxRnj+Vl5wLQLdwog1XAGu2J8pIckPg/aB7mB/fSlFihU bnFlRlgHrlh8gyNYztbGWKMrQ4Bz2831PQIDAQAB -----END RSA PUBLIC KEY----- ''')
4d27c3e72714bccd885178c05598f0f1d8d7914d
707,186
import os def ReadKeywordValueInFile(filename,keyword): """ Get value in the expression of keyword=vlaue in file :param str filenname: file name :param str keywors: keyword string :return: value(str) - value string """ value=None; lenkey=len(keyword) if not os.path.exists(filename): return value fmomenu=False; found=False f=open(filename) dat=''; lenkey=len(keyword); leftpar=0; rightpar=0 for s in f.readlines(): cm=s.find('#') if cm > 0: s=s[:cm] s=s.strip() if len(s) == 0: continue items=s.split() for item in items: if item[:lenkey] == keyword: found=True; value=item.split('=')[1]; value=value.strip() break f.close() if not found: value=None return value
cea315129a39384a80de227d17f647d24fcd27c7
707,187
from typing import List import os def add_abspath(dirs: List): """Recursively append the absolute path to the paths in a nested list If not a list, returns the string with absolute path. """ if isinstance(dirs, list): for i, elem in enumerate(dirs): if isinstance(elem, str): dirs[i] = os.path.abspath(elem) else: dirs[i] = add_abspath(elem) return dirs else: return os.path.abspath(dirs)
544fb5bb680b6a7874c7364090109ee3cdc75632
707,188
import re def navigation_target(m) -> re.Pattern: """A target to navigate to. Returns a regular expression.""" if hasattr(m, 'any_alphanumeric_key'): return re.compile(re.escape(m.any_alphanumeric_key), re.IGNORECASE) if hasattr(m, 'navigation_target_name'): return re.compile(m.navigation_target_name) return re.compile(re.escape(m.text), re.IGNORECASE)
62cc847f5454e76afb128fd752b7fa83fd2e167e
707,189
def get_table_names(self, connection, schema=None, **kw): """ Get table names Args: connection (): schema (): **kw: Returns: """ return self._get_table_or_view_names( ["r", "e"], connection, schema, **kw )
e66ae9eb284e10785c7172ab36c79b25a48dce47
707,190
import re def wikify(value): """Converts value to wikipedia "style" of URLS, removes non-word characters and converts spaces to hyphens and leaves case of value. """ value = re.sub(r'[^\w\s-]', '', value).strip() return re.sub(r'[-\s]+', '_', value)
dc4504ea6eb7905b5e18a1d1f473a4f337697b26
707,192
def _tolist(arg): """ Assure that *arg* is a list, e.g. if string or None are given. Parameters ---------- arg : Argument to make list Returns ------- list list(arg) Examples -------- >>> _tolist('string') ['string'] >>> _tolist([1,2,3]) [1, 2, 3] >>> _tolist(None) [None] """ if isinstance(arg, str): return [arg] try: return list(arg) except TypeError: return [arg]
e4293991eeb6d15470511281680af44353232c37
707,193
def ConvertToFloat(line, colnam_list): """ Convert some columns (in colnam_list) to float, and round by 3 decimal. :param line: a dictionary from DictReader. :param colnam_list: float columns :return: a new dictionary """ for name in colnam_list: line[name] = round(float(line[name]), 3) return line
e95fd6cfa9bb57060fdd835eea139fd9c67bc211
707,194
from typing import List import json def transform_application_assigned_users(json_app_data: str) -> List[str]: """ Transform application users data for graph consumption :param json_app_data: raw json application data :return: individual user id """ users: List[str] = [] app_data = json.loads(json_app_data) for user in app_data: users.append(user["id"]) return users
625c8f662b364bb3fe63bb26b06eaca57ae8be79
707,195
def get_day_suffix(day): """ Returns the suffix of the day, such as in 1st, 2nd, ... """ if day in (1, 21, 31): return 'st' elif day in (2, 12, 22): return 'nd' elif day in (3, 23): return 'rd' else: return 'th'
7d9277303357de5405b3f6894cda24726d60ad47
707,196
def depends_on(*args): """Caches a `Model` parameter based on its dependencies. Example ------- >>> @property >>> @depends_on('x', 'y') >>> def param(self): >>> return self.x * self.y Parameters ---------- args : list of str List of parameters this parameter depends on. """ cache = {} def _wrapper(fn): def _fn(self): key = tuple(getattr(self, arg) for arg in args) if key not in cache: cache[key] = fn(self) return cache[key] return _fn return _wrapper
09cdb0ad7601a953eafd01e3e19c0bdfb10dccb2
707,197
import os def devlocation()->str: """ :return: 'local' or 'github """ return os.getenv('DEVLOCATION') or 'local'
f4ab9af75258f6c72786a4cfbbe2d7a7661873c0
707,199
import os def repo_path(): """ little function to help resolve location of doctest_files back in repository :return: the absolute path to the root of the repository. """ return os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
2fc1263d0c6c68e6f2e11c97d55fe9f539fb1b70
707,200
def _parse_continuous_records(prepared_page, section_dict): """Handle parsing a continuous list of records.""" # import pdb; pdb.set_trace() columns = 6 start = prepared_page.index('Date and time') for i, column in enumerate(prepared_page[start:start + columns]): column_index = start + i values = prepared_page[column_index + columns::columns] if column in section_dict: section_dict[column] = section_dict[column] + values else: section_dict[column] = values return section_dict
7ddcb52433828d37ce6e0cac5d51d8fcfb249296
707,201
def power_law_at_2500(x, amp, slope, z): """ Power law model anchored at 2500 AA This model is defined for a spectral dispersion axis in Angstroem. :param x: Dispersion of the power law :type x: np.ndarray :param amp: Amplitude of the power law (at 2500 A) :type amp: float :param slope: Slope of the power law :type slope: float :param z: Redshift :type z: float :return: Power law model :rtype: np.ndarray """ return amp * (x / (2500. * (z+1.))) ** slope
508227f332f652d00c785074c20f9acefbce9258
707,202
def extract_vuln_id(input_string): """ Function to extract a vulnerability ID from a message """ if 'fp' in input_string.lower(): wordlist = input_string.split() vuln_id = wordlist[-1] return vuln_id else: return None
06673f2b401472185c8a3e6fc373d39c171791db
707,203
import os def ensure_paths_for_args(args): """ Ensure all arguments with paths are absolute & have simplification removed Just apply os.path.abspath & os.path.expanduser :param args: the arguments given from argparse :returns: an updated args """ args.seqs_of_interest = os.path.abspath( os.path.expanduser(args.seqs_of_interest)) args.assembly_dir = os.path.abspath(os.path.expanduser(args.assembly_dir)) if args.output is not None: args.output = os.path.abspath(os.path.expanduser(args.output)) if args.cons is not None: args.cons = os.path.abspath(os.path.expanduser(args.cons)) if args.index_file is not None: args.index_file = os.path.abspath(os.path.expanduser(args.index_file)) if args.existing_data is not None: args.existing_data = os.path.abspath(os.path.expanduser(args.existing_data)) return args
e15b64f2856954bbb7f61d44084d01bb8cdc53ba
707,204
from typing import Any def _element( html_element: str, html_class: str, value: Any, is_visible: bool, **kwargs, ) -> dict: """ Template to return container with information for a <td></td> or <th></th> element. """ if "display_value" not in kwargs: kwargs["display_value"] = value return { "type": html_element, "value": value, "class": html_class, "is_visible": is_visible, **kwargs, }
4ce4d2ff9f547470d4a875508c40d3ae2a927ba0
707,205
def get_gene_summary(gene): """Gets gene summary from a model's gene.""" return { gene.id: { "name": gene.name, "is_functional": gene.functional, "reactions": [{rxn.id: rxn.name} for rxn in gene.reactions], "annotation": gene.annotation, "notes": gene.notes, } }
dd9cb3f8e9841a558898c67a16a02da1b39479d2
707,206
def tle_fmt_float(num,width=10): """ Return a left-aligned signed float string, with no leading zero left of the decimal """ digits = (width-2) ret = "{:<.{DIGITS}f}".format(num,DIGITS=digits) if ret.startswith("0."): return " " + ret[1:] if ret.startswith("-0."): return "-" + ret[2:]
686cb4061e5cf2ad620b85b0e66b96a8cd1c3abf
707,207
import importlib import re def load_class_by_path(taskpath): """ Given a taskpath, returns the main task class. """ return getattr(importlib.import_module(re.sub(r"\.[^.]+$", "", taskpath)), re.sub(r"^.*\.", "", taskpath))
a9601dafbc73635d81732a0f3747fd450e393d76
707,208
def is_vulgar(words, sentence): """Checks if a given line has any of the bad words from the bad words list.""" for word in words: if word in sentence: return 1 return 0
f8ff64f1d29313c145ebbff8fef01961e14cfd1f
707,209
import os def _norm_path(path): """ Decorator function intended for using it to normalize a the output of a path retrieval function. Useful for fixing the slash/backslash windows cases. """ def normalize_path(*args): return os.path.normpath(path(*args)) return normalize_path
5d86cc9fdab4ed9643398e2741bcf5f90d8b97e5
707,210
import re def matchNoSpaces(value): """Match strings with no spaces.""" if re.search('\s', value): return False return True
6b33c6b500f78664c04ef8c507e9b25fa19c760d
707,211
def cli_list(apic, args): """Implement CLI command `list`. """ # pylint: disable=unused-argument instances = apic.get_instances() if instances: print('\n'.join(apic.get_instances())) return 0
7b96b1a7cf85c86627382143e1e0786956546ec1
707,212
def get_number(line, position): """Searches for the end of a number. Args: line (str): The line in which the number was found. position (int): The starting position of the number. Returns: str: The number found. int: The position after the number found. """ word = "" for pos, char in enumerate(line[position:]): if char.isdigit() or char == ".": word += char else: return word, position + pos return word, len(line)
df41a1b53953b912e5ce5d6d9b3d69c4133460f1
707,213
def levelize_smooth_or_improve_candidates(to_levelize, max_levels): """Turn parameter in to a list per level. Helper function to preprocess the smooth and improve_candidates parameters passed to smoothed_aggregation_solver and rootnode_solver. Parameters ---------- to_levelize : {string, tuple, list} Parameter to preprocess, i.e., levelize and convert to a level-by-level list such that entry i specifies the parameter at level i max_levels : int Defines the maximum number of levels considered Returns ------- to_levelize : list The parameter list such that entry i specifies the parameter choice at level i. Notes -------- This routine is needed because the user will pass in a parameter option such as smooth='jacobi', or smooth=['jacobi', None], and this option must be "levelized", or converted to a list of length max_levels such that entry [i] in that list is the parameter choice for level i. The parameter choice in to_levelize can be a string, tuple or list. If it is a string or tuple, then that option is assumed to be the parameter setting at every level. If to_levelize is inititally a list, if the length of the list is less than max_levels, the last entry in the list defines that parameter for all subsequent levels. Examples -------- >>> from pyamg.util.utils import levelize_smooth_or_improve_candidates >>> improve_candidates = ['gauss_seidel', None] >>> levelize_smooth_or_improve_candidates(improve_candidates, 4) ['gauss_seidel', None, None, None] """ # handle default value (mutable) # improve_candidates=(('block_gauss_seidel', # {'sweep': 'symmetric', 'iterations': 4}), # None) # -> make it a list if isinstance(to_levelize, tuple): if isinstance(to_levelize[0], tuple): to_levelize = list(to_levelize) if isinstance(to_levelize, (str, tuple)): to_levelize = [to_levelize for i in range(max_levels)] elif isinstance(to_levelize, list): if len(to_levelize) < max_levels: mlz = max_levels - len(to_levelize) toext = [to_levelize[-1] for i in range(mlz)] to_levelize.extend(toext) elif to_levelize is None: to_levelize = [(None, {}) for i in range(max_levels)] return to_levelize
8b302b8cae04adae010607c394c2e5059aa46eeb
707,214
def get_max_num_context_features(model_config): """Returns maximum number of context features from a given config. Args: model_config: A model config file. Returns: An integer specifying the max number of context features if the model config contains context_config, None otherwise """ meta_architecture = model_config.WhichOneof("model") meta_architecture_config = getattr(model_config, meta_architecture) if hasattr(meta_architecture_config, "context_config"): return meta_architecture_config.context_config.max_num_context_features
1df5d220e30cfa5b440c0063149e2ebaf896352a
707,215
import hashlib def hashname(name, secsalt): """Obtain a sha256 hash from a name.""" m = hashlib.sha256() m.update((name + secsalt).encode("utf-8")) return m.hexdigest()
0db5fbf39eed899162535b6647a047f49e39fa34
707,216
def parse_encoding_header(header): """ Break up the `HTTP_ACCEPT_ENCODING` header into a dict of the form, {'encoding-name':qvalue}. """ encodings = {'identity':1.0} for encoding in header.split(","): if(encoding.find(";") > -1): encoding, qvalue = encoding.split(";") encoding = encoding.strip() qvalue = qvalue.split('=', 1)[1] if(qvalue != ""): encodings[encoding] = float(qvalue) else: encodings[encoding] = 1 else: encodings[encoding] = 1 return encodings
0d423ad51ff14589b5858681cf32a0f318e6dbfa
707,217
def get_data_meta_path(either_file_path: str) -> tuple: """get either a meta o rr binary file path and return both as a tuple Arguments: either_file_path {str} -- path of a meta/binary file Returns: [type] -- (binary_path, meta_path) """ file_stripped = '.'.join(either_file_path.split('.')[:-1]) return tuple([file_stripped + ext for ext in ['.bin', '.meta']])
0456186cd99d5899e2433ac9e44ba0424077bcc0
707,218
def cmd(func, *args, **kwargs): """Takes a function followed by its arguments""" def command(*a, **ka): return func(*args, **kwargs) return command
9ace378335461080b51dce4936c9a8e0965b3454
707,219
from typing import List def find_domain_field(fields: List[str]): """Find and return domain field value.""" field_index = 0 for field in fields: if field == "query:": field_value = fields[field_index + 1] return field_value field_index += 1 return None
fac45f0bd7cead3ad1ec01307c6c623c8d39dbd4
707,220
from typing import List import glob import csv def get_result(dir_path: str) -> List[float]: """試合のログ(csv)から勝敗データを抽出する Args: file_path (str): 抽出したい試合のログが格納されているパス Returns: List[float]: 勝率データ """ files = glob.glob(dir_path + "*.csv") result = [] for file in files: csv_file = open(file, "r") csv_data = csv.reader(csv_file, delimiter=",", doublequote=True, lineterminator="\r\n", quotechar='"', skipinitialspace=True) win = 0 lose = 0 for data in csv_data: if int(data[1]) >= int(data[2]): win += 1 else: lose += 1 result.append(win/(win+lose)) return result
52f6e1d5e432ec1d56524654cba2ddae9c60426c
707,221
def get_local_info(hass): """Get HA's local location config.""" latitude = hass.config.latitude longitude = hass.config.longitude timezone = str(hass.config.time_zone) elevation = hass.config.elevation return latitude, longitude, timezone, elevation
1fdefbad46c7cdb58abdc36f7d8799aa1e4af87c
707,222