content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def get_pairs(scores): """ Returns pairs of indexes where the first value in the pair has a higher score than the second value in the pair. Parameters ---------- scores : list of int Contain a list of numbers Returns ------- query_pair : list of pairs This contains a list of pairs of indexes in scores. """ query_pair = [] for query_scores in scores: temp = sorted(query_scores, reverse=True) pairs = [] for i in range(len(temp)): for j in range(len(temp)): if temp[i] > temp[j]: pairs.append((i,j)) query_pair.append(pairs) return query_pair
1d4bf17dffb7ec8b934701254448e5a7dfe41cf9
2,816
import random def random_choice(gene): """ Randomly select a object, such as strings, from a list. Gene must have defined `choices` list. Args: gene (Gene): A gene with a set `choices` list. Returns: object: Selected choice. """ if not 'choices' in gene.__dict__: raise KeyError("'choices' not defined in this gene, please include a list values!") return random.choice(gene.choices)
8a01a2039a04262aa4fc076bdd87dbf760f45253
2,817
import string def check_if_punctuations(word: str) -> bool: """Returns ``True`` if ``word`` is just a sequence of punctuations.""" for c in word: if c not in string.punctuation: return False return True
64ba5f9dc69c59490a2ea69e7c2d938151d71b37
2,818
from random import shuffle def shuffle_list(*ls): """ shuffle multiple list at the same time :param ls: :return: """ l = list(zip(*ls)) shuffle(l) return zip(*l)
ec46e4a8da2c04cf62da2866d2d685fc796887e5
2,820
def nodes(G): """Returns an iterator over the graph nodes.""" return G.nodes()
3a1a543f1af4d43c79fd0083eb77fedd696547ec
2,821
def process_input(input_string, max_depth): """ Clean up the input, convert it to an array and compute the longest array, per feature type. """ # remove the quotes and extra spaces from the input string input_string = input_string.replace('"', '').replace(', ', ',').strip() # convert the string to an array and also track the longest array, so # we know how many levels for the feature type. tmp = [] if input_string: tmp = input_string.split(',') if max_depth < len(tmp): max_depth = len(tmp) # return the array and the depth return tmp, max_depth
ca0fddd0b3bf145c7fc0654212ae43f02799b466
2,822
def construct_tablepath(fmdict, prefix=''): """ Construct a suitable pathname for a CASA table made from fmdict, starting with prefix. prefix can contain a /. If prefix is not given, it will be set to "ephem_JPL-Horizons_%s" % fmdict['NAME'] """ if not prefix: prefix = "ephem_JPL-Horizons_%s" % fmdict['NAME'] return prefix + "_%.0f-%.0f%s%s.tab" % (fmdict['earliest']['m0']['value'], fmdict['latest']['m0']['value'], fmdict['latest']['m0']['unit'], fmdict['latest']['refer'])
95041aab91ac9994ef2068d5e05f6cd63969d94e
2,823
def get_text(im): """ 得到图像中的文本部分 """ return im[3:24, 116:288]
86db2a16372aacb6cde29a2bf16c84f14f65d715
2,824
def recursiveUpdate(target, source): """ Recursively update the target dictionary with the source dictionary, leaving unfound keys in place. This is different than dict.update, which removes target keys not in the source :param dict target: The dictionary to be updated :param dict source: The dictionary to be integrated :return: target dict is returned as a convenience. This function updates the target dict in place. :rtype: dict """ for k, v in source.items(): if isinstance(v, dict): target[k] = recursiveUpdate(target.get(k, {}), v) else: target[k] = v return target
e1c11d0801be9526e8e73145b1dfc7be204fc7d0
2,825
import argparse def parse_arguments(): """Parse command line arguments.""" parser = argparse.ArgumentParser() parser.add_argument( '--output', type=str, required=False, help='GCS URL where results will be saved as a CSV.') parser.add_argument( '--query', type=str, required=True, help='The SQL query to be run in BigQuery') parser.add_argument( '--dataset_id', type=str, required=True, help='Dataset of the destination table.') parser.add_argument( '--table_id', type=str, required=True, help='Name of the destination table.') parser.add_argument( '--project', type=str, required=True, help='The GCP project to run the query.') args = parser.parse_args() return args
9bbf5d16e94b5cac8ff230592d2cbe544e771e7a
2,826
def get_module_id_from_event(event): """ Helper function to get the module_id from an EventHub message """ if "iothub-connection-module_id" in event.message.annotations: return event.message.annotations["iothub-connection-module-id".encode()].decode() else: return None
e183824fff183e3f95ef35c623b13245eb68a8b7
2,828
from typing import Collection def A006577(start: int = 0, limit: int = 20) -> Collection[int]: """Number of halving and tripling steps to reach 1 in '3x+1' problem, or -1 if 1 is never reached. """ def steps(n: int) -> int: if n == 1: return 0 x = 0 while True: if n % 2 == 0: n //= 2 else: n = 3 * n + 1 x += 1 if n < 2: break return x return [steps(n) for n in range(start, start + limit)]
47829838af8e2fdb191fdefa755e728db9c09559
2,831
def rss(x, y, w, b): """residual sum of squares for linear regression """ return sum((yi-(xi*wi+b))**2 for xi, yi, wi in zip(x,y, w))
955e0b5e3dcf8373fe5ef1b95244d06abe512084
2,835
def deserialize(member, class_indexing): """ deserialize """ class_name = member[0].text if class_name in class_indexing: class_num = class_indexing[class_name] else: return None bnx = member.find('bndbox') box_x_min = float(bnx.find('xmin').text) box_y_min = float(bnx.find('ymin').text) box_x_max = float(bnx.find('xmax').text) box_y_max = float(bnx.find('ymax').text) width = float(box_x_max - box_x_min + 1) height = float(box_y_max - box_y_min + 1) # try: # ignore = float(member.find('ignore').text) # except ValueError: ignore = 0.0 return [class_num, box_x_min, box_y_min, width, height, ignore]
087102acec79ec5d0ecad91453885579c2395895
2,838
def parabolic(f, x): """ Quadratic interpolation in order to estimate the location of a maximum https://gist.github.com/endolith/255291 Args: f (ndarray): a vector a samples x (int): an index on the vector Returns: (vx, vy): the vertex coordinates of a parabola passing through x and its neighbors """ xv = 1/2. * (f[x-1] - f[x+1]) / (f[x-1] - 2 * f[x] + f[x+1]) + x yv = f[x] - 1/4. * (f[x-1] - f[x+1]) * (xv - x) return (xv, yv)
4373ee6390f3523d0fd69487c27e05522bd8c230
2,839
import re def get_better_loci(filename, cutoff): """ Returns a subset of loci such that each locus includes at least "cutoff" different species. :param filename: :param cutoff: :return: """ f = open(filename) content = f.read() f.close() loci = re.split(r'//.*|', content) better_loci = [] for locus in loci: found_species = set() for line in locus.strip().split("\n"): if line == "": continue (individual, sequence) = line[1:].split() found_species.add(individual.split("_")[-1]) if len(found_species) >= cutoff: better_loci.append(locus) return better_loci
e2d563c9d0568cef59ea0280aae61a78bf4a6e7b
2,840
import math def paginate_data(data_list, page=1 ,per_page=10): """将数据分页返回""" pages = int(math.ceil(len(data_list) / per_page)) page = int(page) per_page = int(per_page) has_next = True if pages > page else False has_prev = True if 1 < page <= int(pages) else False items = data_list[(page-1)*per_page : page*per_page] return { "item_list": items, "page": page, "total": len(data_list), "pages": pages, "has_next": has_next, "next_num": page + 1 if has_next else None, "per_page": per_page, "has_prev": has_prev, "prev_num": page - 1 if has_prev else None }
63a4602462e0c2e38329107b10b5d72b63c3108d
2,841
import six def canonicalize_monotonicity(monotonicity, allow_decreasing=True): """Converts string constants representing monotonicity into integers. Args: monotonicity: The monotonicities hyperparameter of a `tfl.layers` Layer (e.g. `tfl.layers.PWLCalibration`). allow_decreasing: If decreasing monotonicity is considered a valid monotonicity. Returns: monotonicity represented as -1, 0, 1, or None. Raises: ValueError: If monotonicity is not in the set {-1, 0, 1, 'decreasing', 'none', 'increasing'} and allow_decreasing is True. ValueError: If monotonicity is not in the set {0, 1, 'none', 'increasing'} and allow_decreasing is False. """ if monotonicity is None: return None if monotonicity in [-1, 0, 1]: if not allow_decreasing and monotonicity == -1: raise ValueError( "'monotonicities' must be from: [0, 1, 'none', 'increasing']. " "Given: {}".format(monotonicity)) return monotonicity elif isinstance(monotonicity, six.string_types): if monotonicity.lower() == "decreasing": if not allow_decreasing: raise ValueError( "'monotonicities' must be from: [0, 1, 'none', 'increasing']. " "Given: {}".format(monotonicity)) return -1 if monotonicity.lower() == "none": return 0 if monotonicity.lower() == "increasing": return 1 raise ValueError("'monotonicities' must be from: [-1, 0, 1, 'decreasing', " "'none', 'increasing']. Given: {}".format(monotonicity))
a9d0870d03f11d7bdff4c8f673cd78d072fa8478
2,843
def add_gdp(df, gdp, input_type="raw", drop=True): """Adds the `GDP` to the dataset. Assuming that both passed dataframes have a column named `country`. Parameters ---------- df : pd.DataFrame Training of test dataframe including the `country` column. gdp : pd.DataFrame Mapping between `country` and `GDP` input_type : {"raw", "aggregated"} Whether the operation should run on the raw, or the aggregated dataset. drop : bool Whether the old country columns should be droped. Returns ------- pd.DataFrame The passed `df` with a new column corresponding to the mapped GDP. """ def stringify(maybe_string): # Handles Unicode country names like "Côte d’Ivoire" , "Réunion" etc, as well as countries only existing # in one of the two dataframes. try: return str(maybe_string) except UnicodeEncodeError: return "Unknown" if input_type == "aggregated": country_cols = [col for col in df.columns if col.startswith("country") and col != "country"] def inverse_ohe(row): for c in country_cols: if row[c] == 1: return c.split("_")[1] df["country"] = df.apply(inverse_ohe, axis=1) if drop: df = df.drop(country_cols, axis=1) elif input_type != "raw": msg = "Only {} and {} are supported. \n" + \ "\tThe former assumes the original form where only the JSON has been flattened.\n" + \ "\tThe latter assumes that OHE has already occured on top." raise ValueError(msg) df["country"] = df["country"].fillna("Unknown").apply(stringify) result = df.merge(gdp, on="country", how='left') if drop: result.drop("country", axis=1, inplace=True) return result
72e2b5fe839f3dbc71ca2def4be442535a0adb84
2,844
import argparse def get_options(cmd_args): """ Argument Parser. """ parser = argparse.ArgumentParser( prog='activitygen.py', usage='%(prog)s -c configuration.json', description='SUMO Activity-Based Mobility Generator') parser.add_argument( '-c', type=str, dest='config', required=True, help='JSON configuration file.') parser.add_argument( '--profiling', dest='profiling', action='store_true', help='Enable Python3 cProfile feature.') parser.add_argument( '--no-profiling', dest='profiling', action='store_false', help='Disable Python3 cProfile feature.') parser.set_defaults(profiling=False) return parser.parse_args(cmd_args)
e8ddde36e83df2ca46652e0f104c718e8f747715
2,845
import doctest def _test(): """ >>> solve("axyb", "abyxb") axb """ global chr def chr(x): return x doctest.testmod()
1ba052fbf066cee92ad2088b9562443c727292df
2,846
def ravel_group_params(parameters_group): """Take a dict(group -> {k->p}) and return a dict('group:k'-> p) """ return {f'{group_name}:{k}': p for group_name, group_params in parameters_group.items() for k, p in group_params.items()}
4a768e89cd70b39bea4f658600690dcb3992a710
2,847
def index(): """Returns a 200, that's about it!!!!!!!""" return 'Wow!!!!!'
f6d8a765556d2d6a1c343bb0ab1a9d4a6c5fd6ba
2,848
import os def file_sort_key(file): """Calculate the sort key for ``file``. :param file: The file to calculate the sort key for :type file: :class:`~digi_edit.models.file.File` :return: The sort key :rtype: ``tuple`` """ path = file.attributes['filename'].split(os.path.sep) path_len = len(path) key = [] for idx, element in enumerate(path): if idx < path_len - 1: key.append((1, element)) else: key.append((0, element)) return tuple(key)
1997e48c2355816d88e930e9fb3369096a227b63
2,849
def _parse_path(**kw): """ Parse leaflet `Path` options. http://leafletjs.com/reference-1.2.0.html#path """ color = kw.pop('color', '#3388ff') return { 'stroke': kw.pop('stroke', True), 'color': color, 'weight': kw.pop('weight', 3), 'opacity': kw.pop('opacity', 1.0), 'lineCap': kw.pop('line_cap', 'round'), 'lineJoin': kw.pop('line_join', 'round'), 'dashArray': kw.pop('dash_array', None), 'dashOffset': kw.pop('dash_offset', None), 'fill': kw.pop('fill', False), 'fillColor': kw.pop('fill_color', color), 'fillOpacity': kw.pop('fill_opacity', 0.2), 'fillRule': kw.pop('fill_rule', 'evenodd'), 'bubblingMouseEvents': kw.pop('bubbling_mouse_events', True), }
02d3810ad69a1a0b8f16d61e661e246aea5c09cc
2,851
from typing import Optional import time from datetime import datetime def time_struct_2_datetime( time_struct: Optional[time.struct_time], ) -> Optional[datetime]: """Convert struct_time to datetime. Args: time_struct (Optional[time.struct_time]): A time struct to convert. Returns: Optional[datetime]: A converted value. """ return ( datetime.fromtimestamp(time.mktime(time_struct)) if time_struct is not None else None )
705b09428d218e8a47961e247b62b9dfd631a41f
2,853
import argparse def _parse_input(): """ A function for handling terminal commands. :return: The path to the experiment configuration file. """ parser = argparse.ArgumentParser(description='Performs CNN analysis according to the input config.') parser.add_argument('-i', '--experiments_file', default='experiments_config.json', type=str, help='A path to the experiments config file.') args = parser.parse_args() experiments_config_path = args.experiments_file return experiments_config_path
5486a1fee5eeb6b69f857d45f9e3e1a7f924ae5b
2,854
import time def wait_for_compute_jobs(nevermined, account, jobs): """Monitor and wait for compute jobs to finish. Args: nevermined (:py:class:`nevermined_sdk_py.Nevermined`): A nevermined instance. account (:py:class:`contracts_lib_py.account.Account`): Account that published the compute jobs. jobs (:obj:`list` of :obj:`tuple`): A list of tuples with each tuple containing (service_agreement_id, compute_job_id). Returns: :obj:`list` of :obj:`str`: Returns a list of dids produced by the jobs Raises: ValueError: If any of the jobs fail """ failed = False dids = set() while True: finished = 0 for i, (sa_id, job_id) in enumerate(jobs): status = nevermined.assets.compute_status(sa_id, job_id, account) print(f"{job_id}: {status['status']}") if status["status"] == "Failed": failed = True if status["status"] == "Succeeded": finished += 1 dids.add(status["did"]) if failed: for i, (sa_id, job_id) in enumerate(jobs): logs = nevermined.assets.compute_logs(sa_id, job_id, account) for line in logs: print(f"[{line['podName']}]: {line['content']}") raise ValueError("Some jobs failed") if finished == len(jobs): break # move up 4 lines print("\u001B[4A") time.sleep(5) return list(dids)
98370b8d596f304630199578a360a639507ae3c3
2,855
import struct def parse_tcp_packet(tcp_packet): """read tcp data.http only build on tcp, so we do not need to support other protocols.""" tcp_base_header_len = 20 # tcp header tcp_header = tcp_packet[0:tcp_base_header_len] source_port, dest_port, seq, ack_seq, t_f, flags = struct.unpack(b'!HHIIBB6x', tcp_header) # real tcp header len tcp_header_len = ((t_f >> 4) & 0xF) * 4 # skip extension headers if tcp_header_len > tcp_base_header_len: pass # body body = tcp_packet[tcp_header_len:] return source_port, dest_port, flags, seq, ack_seq, body
fa1b1050609cce8ca23ca5bac6276a681f560659
2,857
def find_balanced(text, start=0, start_sep='(', end_sep=')'): """ Finds balanced ``start_sep`` with ``end_sep`` assuming that ``start`` is pointing to ``start_sep`` in ``text``. """ if start >= len(text) or start_sep != text[start]: return start balanced = 1 pos = start + 1 while pos < len(text): token = text[pos] pos += 1 if token == end_sep: if balanced == 1: return pos balanced -= 1 elif token == start_sep: balanced += 1 return start
15c17a216405028b480efa9d12846905a1eb56d4
2,858
def gen_urdf_material(color_rgba): """ :param color_rgba: Four element sequence (0 to 1) encoding an rgba colour tuple, ``seq(float)`` :returns: urdf element sequence for an anonymous material definition containing just a color element, ``str`` """ return '<material name=""><color rgba="{0} {1} {2} {3}"/></material>'.format(*color_rgba)
d0fe1a706c932ad1a6f14aa3a9d9471de70650b9
2,860
def gather_along_dim_with_dim_single(x, target_dim, source_dim, indices): """ This function indexes out a target dimension of a tensor in a structured way, by allowing a different value to be selected for each member of a flat index tensor (@indices) corresponding to a source dimension. This can be interpreted as moving along the source dimension, using the corresponding index value in @indices to select values for all other dimensions outside of the source and target dimensions. A common use case is to gather values in target dimension 1 for each batch member (target dimension 0). Args: x (torch.Tensor): tensor to gather values for target_dim (int): dimension to gather values along source_dim (int): dimension to hold constant and use for gathering values from the other dimensions indices (torch.Tensor): flat index tensor with same shape as tensor @x along @source_dim Returns: y (torch.Tensor): gathered tensor, with dimension @target_dim indexed out """ assert len(indices.shape) == 1 assert x.shape[source_dim] == indices.shape[0] # unsqueeze in all dimensions except the source dimension new_shape = [1] * x.ndimension() new_shape[source_dim] = -1 indices = indices.reshape(*new_shape) # repeat in all dimensions - but preserve shape of source dimension, # and make sure target_dimension has singleton dimension expand_shape = list(x.shape) expand_shape[source_dim] = -1 expand_shape[target_dim] = 1 indices = indices.expand(*expand_shape) out = x.gather(dim=target_dim, index=indices) return out.squeeze(target_dim)
06fbba5478ddb21cda9a555c41c94c809244537c
2,861
def extract_p(path, dict_obj, default): """ try to extract dict value in key path, if key error provide default :param path: the nested dict key path, separated by '.' (therefore no dots in key names allowed) :param dict_obj: the dictinary object from which to extract :param default: a default return value if key error :return: extracted value """ if dict_obj is None: return default keys = path.split('.') tmp_iter = dict_obj for key in keys: try: # dict.get() might make KeyError exception unnecessary tmp_iter = tmp_iter.get(key, default) except KeyError: return default return tmp_iter
1a563212e229e67751584885c5db5ac19157c37f
2,863
def group_bars(note_list): """ Returns a list of bars, where each bar is a list of notes. The start and end times of each note are rescaled to units of bars, and expressed relative to the beginning of the current bar. Parameters ---------- note_list : list of tuples List of notes to group into bars. """ bar_list = [] current_bar = [] current_bar_start_time = 0 for raw_note in note_list: if raw_note[0] != -1: current_bar.append(raw_note) elif raw_note[0] == -1: quarter_notes_per_bar = raw_note[2] - current_bar_start_time current_bar_scaled = [] for note in current_bar: current_bar_scaled.append((note[0], note[1], min([(note[2] - current_bar_start_time) / quarter_notes_per_bar, 1]), min([(note[3] - current_bar_start_time) / quarter_notes_per_bar, 1]))) bar_list.append(current_bar_scaled) current_bar = [] current_bar_start_time = raw_note[2] return bar_list
3b12a7c7e2395caa3648abf152915ece4b325599
2,865
import re def sample(s, n): """Show a sample of string s centered at position n""" start = max(n - 8, 0) finish = min(n + 24, len(s)) return re.escape(s[start:finish])
565f69224269ed7f5faa538d40ce277714144577
2,868
import math def encode_integer_compact(value: int) -> bytes: """Encode an integer with signed VLQ encoding. :param int value: The value to encode. :return: The encoded integer. :rtype: bytes """ if value == 0: return b"\0" if value < 0: sign_bit = 0x40 value = -value else: sign_bit = 0 n_bits = value.bit_length() n_bytes = 1 + int(math.ceil((n_bits - 6) / 7)) buf = bytearray(n_bytes) for i in range(n_bytes - 1, 0, -1): buf[i] = 0x80 | (value & 0x7F) value >>= 7 buf[0] = 0x80 | sign_bit | (value & 0x3F) buf[-1] &= 0x7F return bytes(buf)
daf9ed4a794754a3cd402e8cc4c3e614857941fe
2,869
def max_surplus(redemptions, costs, traders): """ Calculates the maximum possible surplus """ surplus = 0 transactions = 0.5 * traders for redemption, cost in zip(redemptions, costs): if redemption >= cost: surplus += ((redemption - cost) * transactions) return surplus
6dd452de1b8726c475c9b95d8c24a2f57fe71516
2,870
import re def parse_dblife(file): """Parse an DBLife file, returning a tuple: positions: list of (x,y) co-ordinates comments: all comments in file, as a list of strings, one per line. """ lines = file.split("\n") comments = [] positions = [] x = 0 y = 0 dblife_pattern = r"((\d*)(\.|O|o|\*))*" for line in lines: line = line.strip().rstrip() if line.startswith("!"): comments.append(line[2:]) # check if this is part of the pattern if re.match(dblife_pattern, line): count = 0 for char in line: # repeat counts if char.isdigit(): count *= 10 count += int(char) # blanks if char in ".": if count != 0: x += int(count) else: x += 1 count = 0 # ons if char in "oO*": if count != 0: for i in range(count): positions.append((x, y)) x += 1 else: positions.append((x, y)) x += 1 count = 0 count = 0 # newlines y += 1 x = 0 count = 0 return positions, comments
b2d54240280b657c82d8a70da9e9f0ce47a92c7a
2,871
def get_html_subsection(name): """ Return a subsection as HTML, with the given name :param name: subsection name :type name: str :rtype: str """ return "<h2>{}</h2>".format(name)
2e0f37a7bb9815eda24eba210d8518e64595b9b7
2,872
import ast def filter_funcs(node) -> bool: """Filter to get functions names and remove dunder names""" if not isinstance(node, ast.FunctionDef): return False elif node.name.startswith('__') or node.name.endswith('__'): return False else: return True
022181afa887965af0f2d4c5ec33de07b8a3c089
2,874
import random def shuffle(answers): """ Returns mixed answers and the index of the correct one, assuming the first answer is the correct one. """ indices = list(range(len(answers))) random.shuffle(indices) correct = indices.index(0) answers = [answers[i] for i in indices] return answers, correct
e597b4aeb65fecf47f4564f2fddb4d76d484707a
2,875
from typing import List from pathlib import Path def get_requirements(req_file: str) -> List[str]: """ Extract requirements from provided file. """ req_path = Path(req_file) requirements = req_path.read_text().split("\n") if req_path.exists() else [] return requirements
3433cd117bbb0ced7ee8238e36f20c69e15c5260
2,876
def func(*listItems): """ 1、遍历所有的列表元素 2、遍历所有的列表元素里面的所有元素放进去一个列表里面 3、排序这个列表,返回最大的那个元素 """ tmp_list=[] for item in listItems: if isinstance(item,list): for i in item: tmp_list.append(i) tmp_list=list(filter(lambda k:isinstance(k,int),tmp_list)) tmp_list.sort(reverse=True) max_value=tmp_list[0] return max_value
adbef2744871f1d8f714cbf2a71d4321e3fb72f5
2,877
def align_dataframes(framea, frameb, fill_value = 0.0): """Use pandas DataFrame structure to align two-dimensional data :param framea: First pandas dataframe to align :param frameb: Other pandas dataframe to align :param fill_value: default fill value (0.0 float) return: tuple of aligned frames """ zeroframe = frameb.copy() zeroframe[:] = fill_value aligneda = framea.add(zeroframe, fill_value = fill_value) zeroframe = framea.copy() zeroframe[:] = fill_value alignedb = frameb.add(zeroframe, fill_value = fill_value) return aligneda, alignedb
86a5e8c399ab47a10715af6c90d0901c2207597c
2,878
import os def get_snippet(path): """Get snippet source string""" current_file_dir = os.path.dirname(__file__) absolute_path = os.path.join(current_file_dir, path) with open(absolute_path) as src: return src.read()
e101a25c61313d0531e0c38e27b120d56fcd8a47
2,881
def dm2skin_normalizeWeightsConstraint(x): """Constraint used in optimization that ensures the weights in the solution sum to 1""" return sum(x) - 1.0
79024cb70fd6cbc3c31b0821baa1bcfb29317043
2,884
def methodInDB(method_name, dict_link, interface_db_cursor): #checks the database to see if the method exists already """ Method used to check the database to see if a method exists in the database returns a list [Boolean True/False of if the method exists in the db, dictionary link/ID] """ crsr = interface_db_cursor #splitting method into parts if "::" in method_name: method = method_name.split('::') cn = method[0].strip() mn = '::'.join(method[1:]).strip() else: cn = "Unknown" mn = method_name if dict_link == '': #dict link should only be empty on the initial call # search for any method with the same name and class crsr.execute("SELECT class_name, method_name, method_text, dict_link FROM methods WHERE class_name = ? AND method_name = ?", (cn, mn)) res = crsr.fetchall() if len(res) == 0: #method not in table return [False, ''] else: # found something, verify it is right if len(res) == 1: print('Method found in database.') if res[0][0] == 'Unknown': print(res[0][1]) else: print('::'.join(res[0][0:2])) print(res[0][2]) print('Is this the correct method? (Y/N)') #prompt the user to confirm that this is the right method k = input() k = k.strip() while( k not in ['N', 'n', 'Y', 'y' ] ): print('Invalid input, try again') k = input() if k == 'Y' or k == 'y': return [True, res[0][3]] elif k == 'N' or k == 'n': return [False, ''] elif len(res) > 1: print("\nMethod found in database") count = 1 for r in res: tmp = str(count) + ': ' print(tmp) if r[0] == 'Unknown': print(r[1]) else: print('::'.join(r[0:2])) print(r[2],'\n') count += 1 print('Which one of these is the correct method?\nPut 0 for none of them.') #if there are multiple versions of the method in the db # prompt the user to select which method is the right method, prints the method text k = input() try: k = int(k) except: k = -1 while( int(k) > len(res) or int(k) < 0 ): print("Invalid input: try again please") k = input() try: k = int(k) except: k = -1 if k == 0: return [False, ''] elif k > 0 and k <= len(res): return [True, res[k-1][3]] else: #there is a dict_link, can check for exact, usually what happens crsr.execute("SELECT class_name, method_name FROM methods WHERE class_name = ? AND method_name = ? AND dict_link = ?", (cn, mn, dict_link)) #simple sql select res = crsr.fetchall() if len(res) == 0: #method not in table return [False, dict_link] elif len(res) > 0: # we found something return [True, dict_link]
8dc3ecc256b696a06906e63a461c241ff429e8ae
2,885
import glob def find_pkg(pkg): """ Find the package file in the repository """ candidates = glob.glob('/repo/' + pkg + '*.rpm') if len(candidates) == 0: print("No candidates for: '{0}'".format(pkg)) assert len(candidates) == 1 return candidates[0]
ac91f34ed7accd2c81e1c68e143319998de9cdf3
2,888
import os import re def get_shares(depth): """ this is pretty janky, again, but simply grab the list of directories under /mnt/user0, an an unraid-specific shortcut to access shares """ rootdir = "/mnt/user0/" shares = [] pattern = "('\w+')" with os.scandir(rootdir) as p: depth -= 1 for entry in p: #yield entry.path if entry.is_dir() and depth > 0: sharematch = re.search(pattern, str(entry)) if sharematch: # extract share name utilizing the grouping regex and remove single quotes share_name = sharematch.group(1) share_name = str(share_name.replace("'","")) shares.append(share_name) shares.sort() return(shares)
0079618cc30a4c02dec2441a64ae7aa6207c765a
2,889
def _check_eq(value): """Returns a function that checks whether the value equals a particular integer. """ return lambda x: int(x) == int(value)
4d2a02727afd90dbc012d252b01ed72f745dc564
2,891
def calcPhase(star,time): """ Calculate the phase of an orbit, very simple calculation but used quite a lot """ period = star.period phase = time/period return phase
4b282d9e4fdb76a4358d895ba30b902328ce030c
2,893
from datetime import datetime def secBetweenDates(dateTime0, dateTime1): """ :param dateTime0: :param dateTime1: :return: The number of seconds between two dates. """ dt0 = datetime.strptime(dateTime0, '%Y/%m/%d %H:%M:%S') dt1 = datetime.strptime(dateTime1, '%Y/%m/%d %H:%M:%S') timeDiff = ((dt1.timestamp()) - (dt0.timestamp())) return timeDiff
d9e2f839d8a7c10fbde8009ea1f69db56a222426
2,894
def inv(n: int, n_bits: int) -> int: """Compute the bitwise inverse. Args: n: An integer. n_bits: The bit-width of the integers used. Returns: The binary inverse of the input. """ # We should only invert the bits that are within the bit-width of the # integers we use. We set this mask to set the other bits to zero. bit_mask = (1 << n_bits) - 1 # e.g. 0b111 for n_bits = 3 return ~n & bit_mask
5be1eaf13490091096b8cd13fdbcdbbbe43760da
2,895
import requests from bs4 import BeautifulSoup def get_image_links_from_imgur(imgur_url): """ Given an imgur URL, return a list of image URLs from it. """ if 'imgur.com' not in imgur_url: raise ValueError('given URL does not appear to be an imgur URL') urls = [] response = requests.get(imgur_url) if response.status_code != 200: raise ValueError('there was something wrong with the given URL') soup = BeautifulSoup(response.text, 'html5lib') # this is an album if '/a/' in imgur_url: matches = soup.select('.album-view-image-link a') urls += [x['href'] for x in matches] # directly linked image elif 'i.imgur.com' in imgur_url: urls.append(imgur_url) # single-image page else: try: urls.append(soup.select('.image a')[0]['href']) except IndexError: pass # clean up image URLs urls = [url.strip('/') for url in urls] urls = ['http://{}'.format(url) if not url.startswith('http') else url for url in urls] return urls
19d8f994cd1730c23fdf5d6105e8db916da67d15
2,898
def check_for_features(cmph5_file, feature_list): """Check that all required features present in the cmph5_file. Return a list of features that are missing. """ aln_group_path = cmph5_file['AlnGroup/Path'][0] missing_features = [] for feature in feature_list: if feature not in cmph5_file[aln_group_path].keys(): missing_features.append(feature) return missing_features
2d51e1389e6519607001ad2b0006581e6a876ddd
2,899
def is_three(x): """Return whether x is three. >>> search(is_three) 3 """ return x == 3
a57266892eebf684945d0d841ede67965c751f1a
2,900
import re def compress_sparql(text: str, prefix: str, uri: str) -> str: """ Compress given SPARQL query by replacing all instances of the given uri with the given prefix. :param text: SPARQL query to be compressed. :param prefix: prefix to use as replace. :param uri: uri instance to be replaced. :return: compressed SPARQL query. """ bordersremv = lambda matchobj: prefix + ":" + re.sub(f"[<>]|({uri})", "", matchobj.group(0)) return re.sub(f"<?({uri}).*>?", bordersremv, text)
b86ceebadb262730fb4dec90b43e04a09d9c9541
2,901
def createNewClasses(df, sc, colLabel): """ Divide the data into classes Parameters ---------- df: Dataframe Spark Dataframe sc: SparkContext object SparkContext object colLabel: List Items that considered Label logs_dir: string Directory for log file Return ---------- colCat: List Items that is considered categories colNum: List Items that is considered numerical values """ rdd = sc.parallelize(df.dtypes) colCat = rdd.map(lambda i: i[0] if (i[1]=='string' or i[1]=='boolean' and i[0] not in colLabel) else None).filter(lambda i: i != None).collect() colNum = rdd.map(lambda i: i[0] if (i[1]=='double' and i[0] not in colLabel) else None).filter(lambda i: i != None).collect() print(f"Label: {colLabel} \nCategories: {colCat}\nNumerical: {colNum}") return colCat, colNum
e28e5240bca65bd602234b6560b58d934012f530
2,902
def remove_characters(text, characters_to_remove=None): """ Remove various auxiliary characters from a string. This function uses a hard-coded string of 'undesirable' characters (if no such string is provided), and removes them from the text provided. Parameters: ----------- text : str A piece of text to remove characters from. characters_to_remove : str A string of 'undesirable' characters to remove from the text. Returns: -------- text : str A piece of text with undesired characters removed. """ # chars = "\\`*_{}[]()<>#+-.!$%@" if characters_to_remove is None: characters_to_remove = "\\`*_{}[]()<>#+!$%@" for c in characters_to_remove: if c in text: text = text.replace(c, '') return text
d2864983bfa3d58c631ff91a8719d45392f4bf42
2,903
def use(*authenticator_classes): """ A decorator to attach one or more :class:`Authenticator`'s to the decorated class. Usage: from thorium import auth @auth.use(BasicAuth, CustomAuth) class MyEngine(Endpoint): ... OR @auth.use(BasicAuth) @auth.use(CustomAuth) class MyEngine(Endpoint): ... :param authenticator_classes: One or more :class:`Authenticator` class definitions. """ def wrapped(cls): if not cls._authenticator_classes: cls._authenticator_classes = [] cls._authenticator_classes.extend(authenticator_classes) return cls return wrapped
27aeb7711c842540a1ed77a76cebeb61e0342f1e
2,905
import math def mutual_information(co_oc, oi, oj, n): """ :param co_oc: Number of co occurrences of the terms oi and oj in the corpus :param oi: Number of occurrences of the term oi in the corpus :param oj: Number of occurrences of the term oi in the corpus :param n: Total number of words in the corpus :return: """ e = (oi * oj)/n return math.log2(co_oc/e)
76c27295c7e757282573eab71f2bb7cfd3df74cb
2,906
def is_dark(color: str) -> bool: """ Whether the given color is dark of bright Taken from https://github.com/ozh/github-colors """ l = 0.2126 * int(color[0:2], 16) + 0.7152 * int(color[2:4], 16) + 0.0722 * int(color[4:6], 16) return False if l / 255 > 0.65 else True
80fe2c4bd42b20fedff11ef200ae5ca246d4489d
2,907
from datetime import datetime def get_date_input_examples(FieldClass) -> list: """ Generate examples for a valid input value. :param FieldClass: InputField :return: List of input examples. """ r = [] for f in FieldClass.input_formats: now = datetime.now() r.append(now.strftime(f)) return r
e0b73aac49ac2bbd6423faa3e5e5ebfb81c2d7b7
2,908
def merge_options(custom_options, **default_options): """ Utility function to merge some default options with a dictionary of custom_options. Example: custom_options = dict(a=5, b=3) merge_options(custom_options, a=1, c=4) --> results in {a: 5, b: 3, c: 4} """ merged_options = default_options merged_options.update(custom_options) return merged_options
a1676c9304f3c231aefaeb107c8fb6f5a8251b26
2,909
def _filter_nones(centers_list): """ Filters out `None` from input list Parameters ---------- centers_list : list List potentially containing `None` elements Returns ------- new_list : list List without any `None` elements """ return [c for c in centers_list if c is not None]
031e878ebc8028deea238f5ac902ca55dba72a6d
2,910
def isolate_integers(string): """Isolate positive integers from a string, returns as a list of integers.""" return [int(s) for s in string.split() if s.isdigit()]
cc95f7a37e3ae258ffaa54ec59f4630c600e84e1
2,911
import json def store_barbican_secret_for_coriolis( barbican, secret_info, name='Coriolis Secret'): """ Stores secret connection info in Barbican for Coriolis. :param barbican: barbican_client.Client instance :param secret_info: secret info to store :return: the HREF (URL) of the newly-created Barbican secret """ payload = json.dumps(secret_info) secret = barbican.secrets.create( name=name, payload=payload, payload_content_type='application/json') secret_ref = secret.store() return secret_ref
218bf941203dd12bc78fc7a87d6a2f9f21761d57
2,912
def get_children_templates(pvc_enabled=False): """ Define a list of all resources that should be created. """ children_templates = { "service": "service.yaml", "ingress": "ingress.yaml", "statefulset": "statefulset.yaml", "configmap": "configmap.yaml", "secret": "secret.yaml", } if pvc_enabled: children_templates["pvc"] = "pvc.yaml" return children_templates
25db24b03542b1365529bbf1814e2fb801337022
2,913
def update_hirsch_index(depth_node_dict, minimum_hirsch_value, maximum_hirsch_value): """ Calculates the Hirsch index for a radial tree. Note that we have a slightly different definition of the Hirsch index to the one found in: Gómez, V., Kaltenbrunner, A., & López, V. (2008, April). Statistical analysis of the social network and discussion threads in slashdot. In Proceedings of the 17th international conference on World Wide Web (pp. 645-654). ACM. Inputs: - depth_node_dict: A map from node depth to node ids as a python dictionary. - minimum_hirsch_value: This is the previous Hirsch value. - maximum_hirsch_value: This is the depth of the latest node added to the tree. Output: - hirsch: The Hirsch index. """ # This is the previous hirsch index value. hirsch_index = minimum_hirsch_value if maximum_hirsch_value > minimum_hirsch_value: adopters = depth_node_dict[maximum_hirsch_value] width = len(adopters) if width >= maximum_hirsch_value: hirsch_index = maximum_hirsch_value return hirsch_index
2fdf5ca6aa216eacb3f18cd2f91875d02e0740ea
2,914
def normalize(features): """ Scale data in provided series into [0,1] range. :param features: :return: """ return (features - features.min()) / (features.max() - features.min())
a85d77e37e71c732471d7dcd42ae1aef2181f6dc
2,915
from datetime import datetime import time def dateToUsecs(datestring): """Convert Date String to Unix Epoc Microseconds""" dt = datetime.strptime(datestring, "%Y-%m-%d %H:%M:%S") return int(time.mktime(dt.timetuple())) * 1000000
cba081ae63523c86572463249b4324f2183fcaaa
2,917
import os def glob(loader, node): """Construct glob expressions.""" value = loader.construct_scalar(node)[len('~+/'):] return os.path.join( os.path.dirname(loader.name), value )
e8976fdac21f8decb85bb05a23bacc929d1d56eb
2,918
import argparse def parse_args(): """Parse command-line arguments.""" parser = argparse.ArgumentParser(description='DSNT human pose model info') parser.add_argument( '--model', type=str, metavar='PATH', required=True, help='model state file') parser.add_argument( '--gpu', type=int, metavar='N', default=0, help='index of the GPU to use') args = parser.parse_args() return args
1365cf3b60004baa8fa6f07ae755d79f6d952e95
2,919
def find_core(read, core, core_position_sum, core_position_count, start = -1): """ Find the core sequence, trying "average" position first for efficiency. """ if start < 0 and core_position_count > 0: core_position = round(core_position_sum/core_position_count) if len(read) > core_position+len(core): if read[core_position:core_position+len(core)]==core: return core_position return read.find(core, start+1)
3a0de472194db00fac4e65a2b0e15cfa351eb70f
2,920
def find_vertical_bounds(hp, T): """ Finds the upper and lower bounds of the characters' zone on the plate based on threshold value T :param hp: horizontal projection (axis=1) of the plate image pixel intensities :param T: Threshold value for bound detection :return: upper and lower bounds """ N = len(hp) # Find lower bound i = 0 while ~((hp[i] <= T) & (hp[i+1] > T)) & (i < int(N/2)): i += 1 lower_bound = 0 if i == int(N/2) else i # Find superior bound i = N-1 while ~((hp[i-1] > T) & (hp[i] <= T)) & (i > int(N/2)): i -= 1 upper_bound = i return [lower_bound, upper_bound]
8520c3b638cafe1cfb2d86cc7ce8c3f28d132512
2,921
def get_dosage_ann(): """ Convenience function for getting the dosage and snp annotation """ dos = {} s_ann = {} dos_path =\ ("/export/home/barnarj/CCF_1000G_Aug2013_DatABEL/CCF_1000G_Aug2013_Chr" "{0}.dose.double.ATB.RNASeq_MEQTL.txt") SNP_ANNOT =\ ("/proj/genetics/Projects/shared/Studies/Impute_CCF_Arrythmia/" "Projects/CCF/Projects/ATB/Projects/ATB_RNASeq/OutputData/" "ATB.RNASeq_Variant_Ann.bed.gz") return(dos, s_ann)
792caa3c9b6326178ca5a706b694c52cf1bddccc
2,922
import types import typing import re def function_arguments(function_name: str, services_module: types.ModuleType) -> typing.List[str]: """Get function arguments for stan::services `function_name`. This function parses a function's docstring to get argument names. This is an inferior method to using `inspect.Signature.from_callable(function)`. Unfortunately, pybind11 does not support this use of `inspect`. A compiled `services_module` is required for the lookup. Only simple function arguments are returned. For example, callback writers and var_context arguments are dropped. Arguments: function_name: Name of the function. services_module (module): Compiled model-specific services extension module. Returns: Argument names for `function_name`. """ function = getattr(services_module, f"{function_name}_wrapper") docstring = function.__doc__ # first line look something like this: function_name(arg1: int, arg2: int, ...) -> int function_name_with_arguments = docstring.split(" -> ", 1).pop(0) parameters = re.findall(r"(\w+): \w+", function_name_with_arguments) # remove arguments which are specific to the wrapper arguments_exclude = {"socket_filename"} return list(filter(lambda arg: arg not in arguments_exclude, parameters))
01a12d97c6b154159c4ba2d142e1374a008befe3
2,923
def cost_n_moves(prev_cost: int, weight: int = 1) -> int: """ 'g(n)' cost function that adds a 'weight' to each move.""" return prev_cost + weight
77a737d68f2c74eaba484b36191b95064b05e1a9
2,924
import codecs import re import glob import os def get_email_dict(txt_dir): """ :param txt_dir: the input directory containing all text files. :return: a dictionary where the key is the publication ID and the value is the list of authors' email addresses. """ def chunk(text_file, page_limit=2000): fin = codecs.open(text_file, encoding='utf-8') doc = [] n = 0 for line in fin: line = line.strip().lower() if line: doc.append(line) n += len(line) if n > page_limit: break return ' '.join(doc) re_email = re.compile('[({\[]?\s*([a-z0-9\.\-_]+(?:\s*[,;|]\s*[a-z0-9\.\-_]+)*)\s*[\]})]?\s*@\s*([a-z0-9\.\-_]+\.[a-z]{2,})') email_dict = {} for txt_file in glob.glob(os.path.join(txt_dir, '*.txt')): # print(txt_file) try: doc = chunk(txt_file) except UnicodeDecodeError: continue emails = [] for m in re_email.findall(doc): ids = m[0].replace(';', ',').replace('|', ',') domain = m[1] if ',' in ids: emails.extend([ID.strip()+'@'+domain for ID in ids.split(',') if ID.strip()]) else: emails.append(ids+'@'+domain) if emails: key = os.path.basename(txt_file)[:-4] email_dict[key] = emails return email_dict
b7d70c8ec13bc2350e7291f8bf68026de4638bbc
2,925
def validate_listable_type(*atype): """Validate a list of atype. @validate_listable_type(str) def example_func(a_list): return a_list @validate_listable_type(int) def example_int_func(a_list): return a_list """ if len(atype) != 1: raise ValueError("Expected one arg. Got {n} args.".format(n=len(atype))) type_ = atype[0] def wrap(f): def wrapped_f(*args, **kw): for arg in args[0]: if not isinstance(arg, type_): raise TypeError("Expected type {t}. Got type {x} for {v}.".format(t=type_, x=type(arg), v=args)) return f(*args) return wrapped_f return wrap
691737184fca8bdcc7f4c3779af86b9a041b71dc
2,926
def meh(captcha): """Returns the sum of the digits which match the next one in the captcha input string. >>> meh('1122') 3 >>> meh('1111') 4 >>> meh('1234') 0 >>> meh('91212129') 9 """ result = 0 for n in range(len(captcha)): if captcha[n] == captcha[(n + 1) % len(captcha)]: result += int(captcha[n]) return result
2ff68455b7bb826a81392dba3bc8899374cbcc3e
2,927
def goodput_for_range(endpoint, first_packet, last_packet): """Computes the goodput (in bps) achieved between observing two specific packets""" if first_packet == last_packet or \ first_packet.timestamp_us == last_packet.timestamp_us: return 0 byte_count = 0 seen_first = False for packet in endpoint.packets: if packet == last_packet: break if packet == first_packet: seen_first = True if not seen_first: continue # Packet contributes to goodput if it was not retransmitted if not packet.is_lost(): byte_count += packet.data_len time_us = last_packet.timestamp_us - first_packet.timestamp_us return byte_count * 8 * 1E6 / time_us
aea56993771c1a250dacdfccf8328c7a0d3ce50b
2,929
import numpy def agg_double_list(l): """ @param l: @type l: @return: @rtype: """ # l: [ [...], [...], [...] ] # l_i: result of each step in the i-th episode s = [numpy.sum(numpy.array(l_i), 0) for l_i in l] s_mu = numpy.mean(numpy.array(s), 0) s_std = numpy.std(numpy.array(s), 0) return s_mu, s_std
82b67e70caccb1f5d430e8e9f0a9c75348d3bc7a
2,930
def get_string_from_bytes(byte_data, encoding="ascii"): """Decodes a string from DAT file byte data. Note that in byte form these strings are 0 terminated and this 0 is removed Args: byte_data (bytes) : the binary data to convert to a string encoding (string) : optional, the encoding type to use when converting """ string_bytes = byte_data[0:(len(byte_data) - 1)] # strip off the 0 at the end of the string string = string_bytes.decode(encoding) return string
c07523139e2509fcc19b2ce1d9a933fcb648abfd
2,931
def default_component(): """Return a default component.""" return { 'host': '192.168.0.1', 'port': 8090, 'name': 'soundtouch' }
780dd84ff613f2bccb56f560e5de77e9d57d9d5a
2,932
def is_free(board: list, pos: int) -> bool: """checks if pos is free or filled""" return board[pos] == " "
64b75aa5d5b22887495e631e235632e080646422
2,933
import argparse def _create_parser(): """ Creates argparser for SISPO which can be used for CLI and options """ parser = argparse.ArgumentParser(usage="%(prog)s [OPTION] ...", description=__file__.__doc__) parser.add_argument("-i", "--inputdir", action="store", default=None, type=str, help="Path to 'definition.json' file") parser.add_argument("-o", "--outputdir", action="store", default=None, type=str, help="Path to results directory") parser.add_argument("-n", "--name", action="store", default=None, type=str, help="Name of simulation scenario") parser.add_argument("--verbose", action="store_true", help="Verbose output, displays log also on STDOUT") parser.add_argument("--with-sim", action="store_true", dest="with_sim", help="If set, SISPO will simulate the scenario") parser.add_argument("--with-render", action="store_true", dest="with_render", help="If set, SISPO will render the scenario") parser.add_argument("--with-compression", action="store_true", dest="with_compression", help="If set, SISPO will compress images") parser.add_argument("--with-reconstruction", action="store_true", dest="with_reconstruction", help="If set, SISPO will attempt reconstruction.") parser.add_argument("--restart", action="store_true", help="Use cProfiler and write results to log.") parser.add_argument("--profile", action="store_true", help="Use cProfiler and write results to log.") parser.add_argument("-v", "--version", action="store_true", help="Prints version number.") parser.add_argument("--with-plugins", action="store_true", dest="with_plugins", help="Plugins that are run before rendering.") return parser
f1f62b8be37139c8c73293b376e0b9bd0540e5c5
2,934
def serialize_measurement(measurement): """Serializes a `openff.evaluator.unit.Measurement` into a dictionary of the form `{'value', 'error'}`. Parameters ---------- measurement : openff.evaluator.unit.Measurement The measurement to serialize Returns ------- dict of str and str A dictionary representation of a openff.evaluator.unit.Measurement with keys of {"value", "error"} """ return {"value": measurement.value, "error": measurement.error}
69eedd9006c63f5734c762d6113495a913d5a8c4
2,935
def rename_record_columns(records, columns_to_rename): """ Renames columns for better desc and to match Socrata column names :param records: list - List of record dicts :param columns_to_rename: dict - Dict of Hasura columns and matching Socrata columns """ for record in records: for column, rename_value in columns_to_rename.items(): if column in record.keys(): record[rename_value] = record.pop(column) return records
41d5cc90a368f61e8ce138c54e9f5026bacd62b9
2,936
def total (initial, *positionals, **keywords): """ Simply sums up all the passed numbers. """ count = initial for n in positionals: count += n for n in keywords: count += keywords[n] return count
2df0b37ddec7e4bcdd30d302d1b7297cec0ef3cc
2,937
def get_unsigned_js_val(abs_val: int, max_unit: int, abs_limit: int) -> int: """Get unsigned remaped joystick value in reverse range (For example if the limit is 2000, and the input valueis also 2000, the value returned will be 1. And with the same limit, if the input value is 1, the output value wwill be 2000. The same applies to the values in between). This evenly devides the value so that the maximum js range is remapped to a value in the range of the specified limit. abs_val - The current joystick value max_unit - The maximum value to remap the joystick value abs_limit - The maximum range of the joystick """ inc = abs_limit / max_unit # ignoring signs to keep results positive if abs_val > 0: abs_val *= -1 val = int((abs_val / inc) + max_unit) # if the value is zero, return 1 (maximum range) if val == 0: val = 1 return val
6e77d76423ffeef756291924d00cbdbb2c03cc07
2,938
def remove_list_by_name(listslist, name): """ Finds a list in a lists of lists by it's name, removes and returns it. :param listslist: A list of Twitter lists. :param name: The name of the list to be found. :return: The list with the name, if it was found. None otherwise. """ for i in range(len(listslist)): if listslist[i].name == name: return listslist.pop(i)
356a7d12f3b2af9951327984ac6d55ccb844bf72
2,939
import math def song_clicks_metric(ranking): """ Spotify p :param ranking: :return: """ if 1 in ranking: first_idx = ranking.index(1) return math.floor(first_idx / 10) return 51 @staticmethod def print_subtest_results(sub_test_names, metric_names, results): (num_subtest, num_metrics) = results.shape print('{0: <15}'.format("Subtest"),"\t", end="") for i in range(num_metrics): print(metric_names[i], "\t", end="") print() for st in range(num_subtest): print('{0: <15}'.format(sub_test_names[st]), "\t", end="") for m in range(num_metrics): print(np.round(results[st][m],decimals=3), "\t", end="") print() @staticmethod def print_overall_results(metric_names, results): print('{0: <15}'.format(""),"\t", end="") for i in range(len(metric_names)): print(metric_names[i], "\t", end="") print() print('{0: <15}'.format("Overall"),"\t", end="") for m in range(len(metric_names)): print(np.round(results[m],decimals=3), "\t", end="") print()
ec6400e7929a2ab0f7f691fffa0ecb3be039b012
2,940
import copy def merge_reports(master: dict, report: dict): """ Merge classification reports into a master list """ keys = master.keys() ret = copy.deepcopy(master) for key in keys: scores = report[key] for score, value in scores.items(): ret[key][score] += [value] return ret
3ac633c38a8bb73a57841138cba8cbb80091cf04
2,941
def summarize_single_OLS(regression, col_dict, name, is_regularized=False): """Return dataframe aggregating over-all stats from a dictionary-like object containing OLS result objects.""" reg = regression try: col_dict['rsquared'][name] = reg.rsquared except AttributeError: col_dict['rsquared'][name] = 'NA' try: col_dict['rsquared_adj'][name] = reg.rsquared_adj except AttributeError: col_dict['rsquared_adj'][name] = 'NA' col_dict['f_pvalue'][name] = reg.f_pvalue col_dict['condition_number'][name] = reg.condition_number col_dict['regularized'][name] = is_regularized if not is_regularized: outliers = reg.outlier_test(method='fdr_bh')['fdr_bh(p)'] <= 0.05 col_dict['n_outliers'][name] = (outliers).sum() col_dict['outliers'][name] = ','.join(outliers.index[outliers].values) else: col_dict['n_outliers'][name] = "NA" col_dict['outliers'][name] = "NA" col_dict['aic'][name] = reg.aic return col_dict
b7dd8dfac6cf1b743491ae4e1abfc20fb73e8f31
2,943
def is_int(var): """ is this an integer (ie, not a float)? """ return isinstance(var, int)
09924c6ea036fc7ee1add6ccbefc3fb0c9696345
2,944
def returnstringpacket(pkt): """Returns a packet as hex string""" myString = "" for c in pkt: myString += "%02x" % c return myString
866ef7c69f522d4a2332798bdf97a966740ea0e4
2,945
def _markfoundfiles(arg, initargs, foundflags): """Mark file flags as found.""" try: pos = initargs.index(arg) - 1 except ValueError: pos = initargs.index("../" + arg) - 1 # In cases where there is a single input file as the first parameter. This # should cover cases such as: # exec input.file # exec input.file > output.file if arg == initargs[0]: foundflags.append("<") # Other cases should pretty much be formats like: # exec -flag file -flag file -flag file elif (len(initargs) > 1 and initargs[pos][0] == "-" and initargs[pos] not in foundflags): foundflags.append(initargs[pos]) # Or cases like exec -flag file -flag file inputfile > outputfile elif (len(initargs) > 1 and initargs[pos][0] != "-" and initargs[pos] not in foundflags): foundflags.append("<") return foundflags
e27ca91de403a6364cbebc8ee4ee835a9335dccc
2,946
def get_conversion_option(shape_records): """Prompts user for conversion options""" print("1 - Convert to a single zone") print("2 - Convert to one zone per shape (%d zones) (this can take a while)" % (len(shape_records))) import_option = int(input("Enter your conversion selection: ")) return import_option
7608c588960eb3678970e0d4467c67ff9f17a331
2,952
from functools import reduce import operator def product_consec_digits(number, consecutive): """ Returns the largest product of "consecutive" consecutive digits from number """ digits = [int(dig) for dig in str(number)] max_start = len(digits) - consecutive return [reduce(operator.mul, digits[i:i + consecutive]) for i in range(max_start + 1)]
2df16f7445e6d579b632e86904b77ec93e52a1f3
2,953