content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def file_size(value, fmt="{value:.1f} {suffix}", si=False): """ Takes a raw number of bytes and returns a humanized filesize. """ if si: base = 1000 suffixes = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") else: base = 1024 suffixes = ("B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB") max_suffix_index = len(suffixes) - 1 for i, suffix in enumerate(suffixes): unit = base ** (i + 1) if value < unit or i == max_suffix_index: return fmt.format(value=(base * value / unit), suffix=suffix)
272250966c0d301a86a136a7e84af6049e9fe47f
705,464
def OpenCredentials(cred_path: str): """ Opens and parses an AWS credentials file. :param cred_path: Path to the file containing the credentials :return: A dict containing the credentials """ with open(cred_path) as file: keys, values = map(lambda s: s.strip().split(','), file) credentials = dict(zip(keys, values)) return credentials
2f224a92b6c3999a45f6d73bb90504663614a1ac
705,465
def _remove_trailing_string(content, trailing): """ Strip trailing component `trailing` from `content` if it exists. Used when generating names from view classes. """ if content.endswith(trailing) and content != trailing: return content[:-len(trailing)] return content
775bafba5ea518e03499c9351b74ac472c265c9a
705,467
def find_loop_size( public_key, subject=7 ): """ To transform a subject number, start with the value 1. Then, a number of times called the loop size, perform the following steps: - Set the value to itself multiplied by the subject number. - Set the value to the remainder after dividing the value by 20201227 After the desired loop size, the subject number 7 is transformed into the public key itself. """ loops = 0 value = 1 while value != public_key: loops += 1 value *= subject value = value % 20201227 return loops
831f5f3e9867b06640493226fa35a89251f5aad5
705,468
import re def get_skip_report_step_by_index(skip_report_list): """Parse the missed step from skip a report. Based on the index within the skip report file (each line a report), the missed step for this entry gets extracted. In case no step could be found, the whole entry could not been parsed or no report for this index exists, the step is 'None'. """ def extract_step(index): skip_report_entry = ( skip_report_list[index] if index < len(skip_report_list) else "" ) step_findings = re.findall( "^([0-9]+),0x[0-9,a-f]+,[0-9,-]+ [0-9,:]+$", skip_report_entry.strip() ) step = int(step_findings[0]) if len(step_findings) == 1 else None return step return extract_step
7aa46050702aba07902ceec586175fce2226e1e3
705,470
def _prefix_with_swift_module(path, resource_info): """Prepends a path with the resource info's Swift module, if set. Args: path: The path to prepend. resource_info: The resource info struct. Returns: The path with the Swift module name prepended if it was set, or just the path itself if there was no module name. """ swift_module = resource_info.swift_module if swift_module: return swift_module + "-" + path return path
f2a12f59a3c30c09fa20d65b806779ad47f49b90
705,471
def extract_kernel_version(kernel_img_path): """ Extracts the kernel version out of the given image path. The extraction logic is designed to closely mimick the logic Zipl configuration to BLS conversion script works, so that it is possible to identify the possible issues with kernel images. :param str kernel_img_path: The path to the kernel image. :returns: Extracted kernel version from the given path :rtype: str """ # Mimick bash substitution used in the conversion script, see: # https://github.com/ibm-s390-linux/s390-tools/blob/b5604850ab66f862850568a37404faa647b5c098/scripts/zipl-switch-to-blscfg#L168 if 'vmlinuz-' in kernel_img_path: fragments = kernel_img_path.rsplit('/vmlinuz-', 1) return fragments[1] if len(fragments) > 1 else fragments[0] fragments = kernel_img_path.rsplit('/', 1) return fragments[1] if len(fragments) > 1 else fragments[0]
2f75b220ff3e68b8c2ae2a046b7c604a786b05b8
705,477
def match(input_character, final_answer): """ :param input_character: str, allow users to input a string that will be verified whether there are any matches with the final answer. :param final_answer: str, the final answer. :return: str, return the matching result that could consist of '-' and letters. """ result = "" for f in final_answer: if f == input_character: result += input_character else: result += '-' if final_answer.find(input_character) != -1: print('You are correct!') else: print('There is no ' + input_character + '\'s in the word.') return result
4323cd2eefa00126baad11576cdc9a29fe94ec0b
705,479
import calendar def get_month_day_range(date): """ For a date 'date' returns the start and end date for the month of 'date'. Month with 31 days: >>> date = datetime.date(2011, 7, 27) >>> get_month_day_range(date) (datetime.date(2011, 7, 1), datetime.date(2011, 7, 31)) Month with 28 days: >>> date = datetime.date(2011, 2, 15) >>> get_month_day_range(date) (datetime.date(2011, 2, 1), datetime.date(2011, 2, 28)) """ first_day = date.replace(day = 1) last_day = date.replace(day = calendar.monthrange(date.year, date.month)[1]) return first_day, last_day
610ff43b0e637afba780119c76181c6ff033a299
705,481
def leia_dinheiro(msg): """ -> Recebe um valor digitado pelo usuário e verifica se é um valor númerico válido :param msg: Mensagem a ser mostrada ao usuário :return: Retorno o valor digitado pelo usuário caso seja válido """ while True: num = input(msg).strip().replace(',', '.') # Substitui as vírgulas por pontos if num.replace('.', '').isdigit(): # 'Exluí' os pontos num = float(num) break else: print(f'\033[1;31mERRO! \"{num}\" não é um preço válido.\033[m') return num
aa8e21243009af1fde6d6c5e9cb611acff36369e
705,482
def _octet_bits(o): """ Get the bits of an octet. :param o: The octets. :return: The bits as a list in LSB-to-MSB order. :rtype: list """ if not isinstance(o, int): raise TypeError("o should be an int") if not (0 <= o <= 255): raise ValueError("o should be between 0 and 255 inclusive") bits = [0] * 8 for i in range(8): if 1 == o & 1: bits[i] = 1 o = o >> 1 return bits
f472a2ab65702e59439b7693260abf040d4e7742
705,483
import resource def __limit_less(lim1, lim2): """Helper function for comparing two rlimit values, handling "unlimited" correctly. Params: lim1 (integer): first rlimit lim2 (integer): second rlimit Returns: true if lim1 <= lim2 """ if lim2 == resource.RLIM_INFINITY: return True if lim1 == resource.RLIM_INFINITY: return False return lim1 <= lim2
8c8faebd4cc1eecfbd8e0a73b16b2bee0a433572
705,484
def val_err_str(val: float, err: float) -> str: """ Get a float representation of a value/error pair and create a string representation 12.345 +/- 1.23 --> 12.3(12) 12.345 +/- 0.012 -> 12.345(12 12345 +/- 654 ---> 12340(650) :param val: float representing the value :param err: float representing the error in the value :return: a string representation of the value/error pair """ err_sig_figs = 2 # future upgrade path is to allow user to set this dps = 2 - err_sig_figs if err < 10: while err < 10.: err *= 10 dps += 1 err = round(err, 0) else: # err > 10 while err > 100.: err /= 10 dps -= 1 err = round(err, 0) * 10 ** (-dps) val = round(val, dps) return f"{val:.{max(0, dps)}f}({err:.0f})"
5b759ff8e6996704edb7f6b68f6cb7e307593c9e
705,487
def port_name(name, nr=0): """Map node output number to name.""" return name + ":" + str(nr)
a82e0b9940fa6b7f11f1a11fbd8a1b9b1a57c07b
705,488
def _stringify(values): """internal method: used to convert values to a string suitable for an xml attribute""" if type(values) == list or type(values) == tuple: return " ".join([str(x) for x in values]) elif type(values) == type(True): return "1" if values else "0" else: return str(values)
a8f3c290ef949a254ca5dca9744ff3f4c602c4d2
705,489
def xml_safe(s): """Returns the XML-safe version of a given string. """ new_string = s.replace("&", "&amp;").replace("<", "&lt;") new_string = new_string.replace("\r", "").replace("\n", "<br/>") return new_string
166bf2b78441b4f22bf3a89f8be56efb756fe72f
705,490
from typing import List from typing import Dict def group_by_author(commits: List[dict]) -> Dict[str, List[dict]]: """Group GitHub commit objects by their author.""" grouped: Dict[str, List[dict]] = {} for commit in commits: name = commit["author"]["login"] if name not in grouped: grouped[name] = [] grouped[name].append(commit) return grouped
239c523317dc8876017d4b61bc2ad8887444085e
705,491
def non_empty_string(value): """Must be a non-empty non-blank string""" return bool(value) and bool(value.strip())
707d6c39a52b1ec0e317d156e74fef78170739d9
705,493
import pwd def get_uid_from_user(user): """Return UID from user name Looks up UID matching the supplied user name; returns None if no matching name can be found. NB returned UID will be an integer. """ try: return pwd.getpwnam(str(user)).pw_uid except KeyError: return None
dd4f6f839f985b923199b438216c567e1e84327d
705,494
from typing import Dict from typing import Any def get_context() -> Dict[str, Any]: """ Retrieve the current Server Context. Returns: - Dict[str, Any]: the current context """ ctx = _context.get() # type: ignore if ctx is not None: assert isinstance(ctx, dict) return ctx.copy() else: return {}
dad971abb645fa7c194db5cd9ce45e7c38166f31
705,498
def optional_observation_map(env, inner_obs): """ If the env implements the `observation` function (i.e. if one of the wrappers is an ObservationWrapper), call that `observation` transformation on the observation produced by the inner environment """ if hasattr(env, 'observation'): return env.observation(inner_obs) else: return inner_obs
b1b57e74e498e520df80a310f95d1c79799a517d
705,500
def RunMetadataLabels(run_metadata): """Returns all labels in run_metadata.""" labels = [] for dev_stats in run_metadata.step_stats.dev_stats: for node_stats in dev_stats.node_stats: labels.append(node_stats.timeline_label) return labels
277745263c75c4c6037f8b7a26b9421699bec3a5
705,501
def generate_output_file_name(input_file_name): """ Generates an output file name from input file name. :type input_file_name: str """ assert isinstance(input_file_name, str) output_file_name = input_file_name + ".gen.ipynb" return output_file_name
e638d676048e062711ca1a09d88a12d76fb9239d
705,505
import ast from typing import Set def all_statements(tree: ast.AST) -> Set[ast.stmt]: """ Return the set of all ast.stmt nodes in a tree. """ return {node for node in ast.walk(tree) if isinstance(node, ast.stmt)}
9f7cc367f01ec3bb90869879e79eb9cbe6636820
705,511
def key(i): """ Helper method to generate a meaningful key. """ return 'key{}'.format(i)
04658ebead9581ff97406111c9b85e361ee49ff8
705,514
import re def find_first_in_register_stop(seq): """ Find first stop codon on lowercase seq that starts at an index that is divisible by three """ # Compile regexes for stop codons regex_stop = re.compile('(taa|tag|tga)') # Stop codon iterator stop_iterator = regex_stop.finditer(seq) # Find next stop codon that is in register for stop in stop_iterator: if stop.end() % 3 == 0: return stop.end() # Return -1 if we failed to find a stop codon return -1
56741828c42ecf0cb96044d03c8d1b6bc4994e01
705,519
def _table_row(line): """ Return all elements of a data line. Return all elements of a data line. Simply splits it. Parameters ---------- line: string A stats line. Returns ------- list of strings A list of strings, containing the data on the line, split at white space. """ return line.split()
dc5d76db80059b0da257b45f12513d75c2765d55
705,522
def range2d(range_x, range_y): """Creates a 2D range.""" range_x = list(range_x) return [ (x, y) for y in range_y for x in range_x ]
ca33799a277f0f72e99836e81a7ffc98b191fc37
705,523
import json def is_json(payload): """Check if a payload is valid JSON.""" try: json.loads(payload) except (TypeError, ValueError): return False else: return True
a02499ffd0a890fa4697f1002c5deb0fc894cac0
705,525
def _seconds_to_hours(time): """Convert time: seconds to hours""" return time / 3600.0
d6abd9144882587833601e64d5c2226446f1bbdc
705,526
import click def generate_list_display(object, attrs): """Generate a display string for an object based on some attributes. Args: object: An object which has specific attributes. attrs: An interable of strings containing attributes to get from the above object. Returns: A string containing a list display of the object with respect to the passed in attributes. """ return "\n".join( click.style(attr, bold=True) + ": %s" % getattr(object, attr) for attr in attrs )
17c876261bede0c38d91b4bd3e7b0048616f8cbf
705,528
def collocations_table_exist(con): """Return True if the collocations table exist""" query = con.query( "select 1 from information_schema.tables " "where table_name='collocations'") return bool(list(query.dictresult()))
9ffa05f698056d9fab6bb9651427b6bc64f414ea
705,530
from bs4 import BeautifulSoup import re def ftp_profile(publish_settings): """Takes PublishSettings, extracts ftp user, password, and host""" soup = BeautifulSoup(publish_settings, 'html.parser') profiles = soup.find_all('publishprofile') ftp_profile = [profile for profile in profiles if profile['publishmethod'] == 'FTP'][0] matches = re.search('ftp://(.+)/site/wwwroot', ftp_profile['publishurl']) host = matches.group(1) if matches else '' username = ftp_profile['username'].replace("\\$", "%5C%24") password = ftp_profile['userpwd'] return host, username, password, ftp_profile['publishurl']
003218e6d58d01afcbf062a14e68294d0033b8af
705,531
def get_list_from_file(filename): """ Returns a list of containers stored in a file (one on each line) """ with open(filename) as fh: return [_ for _ in fh.read().splitlines() if _]
8d9a271aa4adea81f62bf74bb1d3c308870f1baf
705,533
def accumulator(init, update): """ Generic accumulator function. .. code-block:: python # Simplest Form >>> a = 'this' + ' ' >>> b = 'that' >>> c = functools.reduce(accumulator, a, b) >>> c 'this that' # The type of the initial value determines output type. >>> a = 5 >>> b = Hello >>> c = functools.reduce(accumulator, a, b) >>> c 10 :param init: Initial Value :param update: Value to accumulate :return: Combined Values """ return ( init + len(update) if isinstance(init, int) else init + update )
6a4962932c8dba4d5c01aa8936787b1332a6323f
705,534
def forestvar(z_in): """ Return intrinsic variance of LyaF variance for weighting. This estimate is roughly from McDonald et al 2006 Parameters ---------- z_in : float or ndarray Returns ------- fvar : float or ndarray Variance """ fvar = 0.065 * ((1.+z_in)/(1.+2.25))**3.8 # Return return fvar
d3523510ee29b0cc12138da93001635f5ffe6a11
705,540
def adjust_age_groups(age_labels): """ for each pair of cols to aggregate, takes the first number of the first element, and the last number for the last element for instance: ["0-4",'5-10'] -> ['0-10'] """ i=0 new_age_labels=[] label="" for element in age_labels: if i%2==0: label+=element.split('-')[0] i+=1 elif i%2==1: label=label+'-'+element.split('-')[-1] new_age_labels.append(label) label="" i+=1 #making the last agegroup based on the first number + new_age_labels[-1]= new_age_labels[-1].split("-")[0]+"+" return(new_age_labels)
521a2f6779ae8fa3f3a53801e0f935844245cffc
705,541
import sqlite3 def user_has_registered(userID): """Checks if a particular user has been registered in database""" database = sqlite3.connect("users.db") cursor = database.cursor() cursor.execute(f"SELECT user_id FROM profile WHERE user_id = {userID}") result = cursor.fetchone() if result is None: return False return True
e98f83b272a52828638f276575596489bebe1fcf
705,543
def key_gen(**kwargs): """ Key generator for linux. Determines key based on parameters supplied in kwargs. Keyword Parameters: @keyword geounit1: portable_id of a geounit @keyword geounit2: portable_id of a geounit @keyword region: region abbreviation """ if 'geounit1' in kwargs and 'geounit2' in kwargs: return 'adj:geounit1:%s:geounit2:%s' % (kwargs['geounit1'], kwargs['geounit2']) if 'region' in kwargs: return 'adj:region:%s' % kwargs['region']
02426fbf49e7a4d85094896546980828e2c6bc20
705,544
import math def lafferty_wyatt_point(lowedge, highedge, expo_slope): """calculates the l-w point for a bin where the true distribution is an exponential characterized by expo_slope. """ rhs = (math.exp(expo_slope*highedge) - math.exp(expo_slope*lowedge)) rhs /= expo_slope rhs /= (highedge - lowedge) return math.log(rhs) / expo_slope
326acddc1926f1a142f34e8cff9109554ec850d3
705,546
def check_list(data): """check if data is a list, if it is not a list, it will return a list as [data]""" if type(data) is not list: return [data] else: return data
00ae7a857c3f969ca435928edf98ed5bb36c1c34
705,548
import base64 def encode_base64(filename): """encode image to string. Args filename: image file path. Returns: a bites string. """ with open(filename, "rb")as f: bs64 = base64.b64encode(f.read()).decode() return bs64
9eab28ec1cb9619411ea28a9640a2fa8b02e61a3
705,549
def _clean_annotated_text(text): """Cleans text from the format that it was presented to annotators in the S.M.A.R.T data annotation tool. Splits the title from the abstract text and strips any trailing whitespace. Returns: title (str): The project title text (str): The project abstract """ text = text.split('=====') title = text[1].strip() abstract = text[-1].strip() return title, abstract
356cdf893225c41d303e83f1cf2f3418544c76ae
705,551
from typing import Dict def swim_for_a_day(life_counts: Dict[int, int]): """Process the shoal, decrement the life_counts: any that get to -1 have procreated in the last day, their offspring are created with 8 day life_counts, whilst they get reset to 6 days… and are added to the count of any fish that moved down from 7 days. """ new_counts = {d - 1: p for d, p in life_counts.items()} if -1 in new_counts.keys(): new_counts[8] = new_counts[-1] new_counts[6] = new_counts[-1] + new_counts.get(6, 0) del new_counts[-1] return new_counts
3d5d3f48942a5a1f4eba3100e903df592d933e23
705,552
import base64 def basic_token(username, password): """Generate the Authorization token for Resource Orchestrator (SO-ub container). Args: username (str): the SO-ub username password (str): the SO-ub password Returns: str: the Basic token """ if not isinstance(username, str): raise TypeError("The given type of username is `{}`. Expected str.".format(type(username))) if not isinstance(password, str): raise TypeError("The given type of password is `{}`. Expected str.".format(type(password))) credentials = str.encode(username + ":" + password) return bytes.decode(base64.b64encode(credentials))
054fccad28d1c18a34d630a664742f77e15ee4fe
705,561
import csv def read_alias(alias_csv_path): """Reads alias.csv at the specified path. Then returns a dict mapping from alias to monster id. """ with open(alias_csv_path) as alias_csv: return { alias: int(monster_id) for alias, monster_id in csv.reader(alias_csv)}
3a3818b81a916b4dd18ca7cab5fbcbe1b4050d03
705,562
def transform_resource_name(ctx, param, value): """Callback to transform resource_name into title case.""" if value is not None: return value.title() return value
b708c3318b731d652a7acad216093c96bc18fe2e
705,567
def extrema (im): """ Return the minimum and maximum of an image. Arguments: im image whose extrema are to be found """ return [im.min(), im.max()]
303d9c50cca91c3e73341d7b40195aceb02aef7a
705,568
def _create_statement(name, colnames): """Create table if not exists foo (...). Note: Every type is numeric. Table name and column names are all lowercased """ # every col is numeric, this may not be so elegant but simple to handle. # If you want to change this, Think again schema = ', '.join([col + ' ' + 'numeric' for col in colnames]) return "create table if not exists %s (%s)" % (name, schema)
53c7fc9486274645c5dc7dea2257fda3cf496f9e
705,569
def binary_or(a: int, b: int): """ Take in 2 integers, convert them to binary, and return a binary number that is the result of a binary or operation on the integers provided. >>> binary_or(25, 32) '0b111001' >>> binary_or(37, 50) '0b110111' >>> binary_or(21, 30) '0b11111' >>> binary_or(58, 73) '0b1111011' >>> binary_or(0, 255) '0b11111111' >>> binary_or(0, 256) '0b100000000' >>> binary_or(0, -1) Traceback (most recent call last): ... ValueError: the value of both input must be positive >>> binary_or(0, 1.1) Traceback (most recent call last): ... TypeError: 'float' object cannot be interpreted as an integer >>> binary_or("0", "1") Traceback (most recent call last): ... TypeError: '<' not supported between instances of 'str' and 'int' """ if a < 0 or b < 0: raise ValueError("the value of both input must be positive") a_binary = str(bin(a))[2:] # remove the leading "0b" b_binary = str(bin(b))[2:] max_len = max(len(a_binary), len(b_binary)) return "0b" + "".join( str(int("1" in (char_a, char_b))) for char_a, char_b in zip(a_binary.zfill(max_len), b_binary.zfill(max_len)) )
514fa4a02b778dfa91c4097bb8916522339cda33
705,570
import fnmatch def allowed_file(filename, allowed_exts): """ The validator for blueimp that limits which file extensions are allowed. Args: filename (str): a filepath allowed_exts (str): set of allowed file extensions Returns: bool: True if extension is an allowed file type, False otherwise """ allowed_extensions = ["*."+str(e) for e in list(allowed_exts)] for ext in allowed_extensions: if fnmatch.fnmatch(filename.lower(), ext): return True return False
af23f6017ffa76e5402800a77cf794a2c1bce330
705,572
def get_nb_build_nodes_and_entities(city, print_out=False): """ Returns number of building nodes and building entities in city Parameters ---------- city : object City object of pycity_calc print_out : bool, optional Print out results (default: False) Returns ------- res_tuple : tuple Results tuple with number of building nodes (int) and number of building entities (nb_b_nodes, nb_buildings) Annotations ----------- building node might also be PV- or wind-farm (not only building entity) """ nb_b_nodes = 0 nb_buildings = 0 for n in city.nodes(): if 'node_type' in city.nodes[n]: if city.nodes[n]['node_type'] == 'building': if 'entity' in city.nodes[n]: if city.nodes[n]['entity']._kind == 'building': nb_buildings += 1 if (city.nodes[n]['entity']._kind == 'building' or city.nodes[n][ 'entity']._kind == 'windenergyconverter' or city.nodes[n]['entity']._kind == 'pv'): nb_b_nodes += 1 if print_out: # pragma: no cover print('Number of building nodes (Buildings, Wind- and PV-Farms):') print(nb_b_nodes) print() print('Number of buildings: ', nb_buildings) print() return (nb_b_nodes, nb_buildings)
ff3b36dcd2ca7cd0be316b573f20a6dd16bd1c1d
705,587
def lin_exploit(version): """ The title says it all :) """ kernel = version startno = 119 exploits_2_0 = { 'Segment Limit Privilege Escalation': {'min': '2.0.37', 'max': '2.0.38', 'cve': ' CVE-1999-1166', 'src': 'https://www.exploit-db.com/exploits/19419/'} } exploits_2_2 = { 'ptrace kmod Privilege Escalation': {'min': '2.2.0', 'max': '2.2.25', 'cve': 'CVE-2003-0127', 'src': 'https://www.exploit-db.com/exploits/3/'}, 'mremap Privilege Escalation': {'min': '2.2.0', 'max': '2.2.26', 'cve': 'CVE-2004-0077', 'src': 'https://www.exploit-db.com/exploits/160/'}, 'ptrace setuid Privilege Escalation': {'min': '2.2.0', 'max': '2.2.20', 'cve': 'CVE-2001-1384', 'src': 'https://www.exploit-db.com/exploits/21124/'}, 'procfs Stream redirection to Process Memory Privilege Escalation': {'min': '2.2.0', 'max': '2.2.20', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/20979/'}, 'Privileged Process Hijacking Privilege Escalation': {'min': '2.2.0', 'max': '2.2.25', 'cve': 'CVE-2003-0127', 'src': 'https://www.exploit-db.com/exploits/22362/'}, 'Sendmail Capabilities Privilege Escalation': {'min': '2.2.0', 'max': '2.2.16', 'cve': 'CVE-2000-0506', 'src': 'https://www.exploit-db.com/exploits/20001/'} } exploits_2_4 = { 'ptrace kmod Privilege Escalation': {'min': '2.4.0', 'max': '2.4.21', 'cve': 'CVE-2003-0127', 'src': 'https://www.exploit-db.com/exploits/3/'}, 'do_brk Privilege Escalation': {'min': '2.4.0', 'max': '2.4.23', 'cve': 'CVE-2003-0961', 'src': 'https://www.exploit-db.com/exploits/131/'}, 'do_mremap Privilege Escalation': {'min': '2.4.0', 'max': '2.4.24', 'cve': ' CVE-2003-0985', 'src': 'https://www.exploit-db.com/exploits/145/'}, 'mremap Privilege Escalation': {'min': '2.4.0', 'max': '2.4.25', 'cve': 'CVE-2004-0077', 'src': 'https://www.exploit-db.com/exploits/160/'}, 'uselib Privilege Escalation': {'min': '2.4.0', 'max': '2.4.29-rc2', 'cve': 'CVE-2004-1235', 'src': 'https://www.exploit-db.com/exploits/895/'}, 'bluez Privilege Escalation': {'min': '2.4.6', 'max': '2.4.30-rc2', 'cve': 'CVE-2005-0750', 'src': 'https://www.exploit-db.com/exploits/926/'}, 'System Call Emulation Privilege Escalation': {'min': '2.4.0', 'max': '2.4.37.10', 'cve': 'CVE-2007-4573', 'src': 'https://www.exploit-db.com/exploits/4460/'}, 'ptrace setuid Privilege Escalation': {'min': '2.4.0', 'max': '2.4.10', 'cve': 'CVE-2001-1384', 'src': 'https://www.exploit-db.com/exploits/21124/'}, 'procfs Stream redirection to Process Memory Privilege Escalation': {'min': '2.4.0', 'max': '2.4.4', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/20979/'}, 'Privileged Process Hijacking Privilege Escalation': {'min': '2.4.0', 'max': '2.4.21', 'cve': 'CVE-2003-0127', 'src': 'https://www.exploit-db.com/exploits/22362/'}, 'sock_sendpage Privilege Escalation': {'min': '2.4.4', 'max': '2.4.37.4', 'cve': ' CVE-2009-2692', 'src': 'https://www.exploit-db.com/exploits/9641/'}, 'pipe.c Privilege Escalation': {'min': '2.4.1', 'max': '2.4.37', 'cve': 'CVE-2009-3547', 'src': 'https://www.exploit-db.com/exploits/9844/'}, 'Ptrace Privilege Escalation': {'min': '2.4.0', 'max': '2.4.35.3', 'cve': 'CVE-2007-4573', 'src': 'https://www.exploit-db.com/exploits/30604/'} } exploits_2_6 = { 'mremap Privilege Escalation': {'min': '2.6.0', 'max': '2.6.2', 'cve': 'CVE-2004-0077', 'src': 'https://www.exploit-db.com/exploits/160/'}, 'uselib Privilege Escalation': {'min': '2.6.0', 'max': '2.6.11', 'cve': 'CVE-2004-1235', 'src': 'https://www.exploit-db.com/exploits/895/'}, 'bluez Privilege Escalation': {'min': '2.6.0', 'max': '2.6.11.5', 'cve': 'CVE-2005-0750', 'src': 'https://www.exploit-db.com/exploits/926/'}, 'SYS_EPoll_Wait Privilege Escalation': {'min': '2.6.0', 'max': '2.6.12', 'cve': 'CVE-2005-0736', 'src': 'https://www.exploit-db.com/exploits/1397/'}, 'logrotate prctl Privilege Escalation': {'min': '2.6.13', 'max': '2.6.17.4', 'cve': ' CVE-2006-2451', 'src': 'https://www.exploit-db.com/exploits/2031/'}, 'proc Privilege Escalation': {'min': '2.6.13', 'max': '2.6.17.4', 'cve': ' CVE-2006-2451', 'src': 'https://www.exploit-db.com/exploits/2013/'}, 'System Call Emulation Privilege Escalation': {'min': '2.6.0', 'max': '2.6.22.7', 'cve': 'CVE-2007-4573', 'src': 'https://www.exploit-db.com/exploits/4460/'}, 'BlueTooth Stack Privilege Escalation': {'min': '2.6.0', 'max': '2.6.11.5', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/4756/'}, 'vmsplice Privilege Escalation': {'min': '2.6.17', 'max': '2.6.24.1', 'cve': 'CVE-2008-0600', 'src': 'https://www.exploit-db.com/exploits/5092/'}, 'ftruncate()/open() Privilege Escalation': {'min': '2.6.0', 'max': '2.6.22', 'cve': 'CVE-2008-4210', 'src': 'https://www.exploit-db.com/exploits/6851/'}, 'exit_notify() Privilege Escalation': {'min': '2.6.0', 'max': '2.6.30-rc1', 'cve': 'CVE-2009-1337', 'src': 'https://www.exploit-db.com/exploits/8369/'}, 'UDEV Privilege Escalation': {'min': '2.6.0', 'max': '2.6.40', 'cve': 'CVE-2009-1185', 'src': 'https://www.exploit-db.com/exploits/8478/'}, 'ptrace_attach() Race Condition': {'min': '2.6.0', 'max': '2.6.30-rc4', 'cve': 'CVE-2009-1527', 'src': 'https://www.exploit-db.com/exploits/8673/'}, 'Samba Share Privilege Escalation': {'min': '2.6.0', 'max': '2.6.39', 'cve': 'CVE-2004-0186', 'src': 'https://www.exploit-db.com/exploits/23674/'}, 'ReiserFS xattr Privilege Escalation': {'min': '2.6.0', 'max': '2.6.35', 'cve': 'CVE-2010-1146', 'src': 'https://www.exploit-db.com/exploits/12130/'}, 'sock_sendpage Privilege Escalation': {'min': '2.6.6', 'max': '2.6.30.5', 'cve': ' CVE-2009-2692', 'src': 'https://www.exploit-db.com/exploits/9641/'}, 'pipe.c Privilege Escalation': {'min': '2.6.0', 'max': '2.6.32-rc6', 'cve': 'CVE-2009-3547', 'src': 'https://www.exploit-db.com/exploits/33322/'}, 'Sys_Tee Privilege Escalation': {'min': '2.6.0', 'max': '2.6.17.6', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/29714/'}, 'Linux Kernel Privilege Escalation': {'min': '2.6.18', 'max': '2.6.18-20', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/10613/'}, 'Dirty COW': {'min': '2.6.22', 'max': '4.8.3', 'cve': 'CVE-2016-5195', 'src': 'https://www.exploit-db.com/exploits/40616/'}, 'compat Privilege Escalation': {'min': '2.6.0', 'max': '2.6.36', 'cve': 'CVE-2010-3081', 'src': 'https://www.exploit-db.com/exploits/15024/'}, 'DEC Alpha Linux - Privilege Escalation': {'min': '2.6.28', 'max': '3.0', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/17391/'}, 'SELinux (RHEL 5) - Privilege Escalation': {'min': '2.6.30', 'max': '2.6.31', 'cve': 'CVE-2009-1897', 'src': 'https://www.exploit-db.com/exploits/9191/'}, 'proc Handling SUID Privilege Escalation': {'min': '2.6.0', 'max': '2.6.38', 'cve': 'CVE-2011-1020', 'src': 'https://www.exploit-db.com/exploits/41770/'}, 'PERF_EVENTS Privilege Escalation': {'min': '2.6.32', 'max': '3.8.9', 'cve': 'CVE-2013-2094', 'src': 'https://www.exploit-db.com/exploits/25444/'}, 'RDS Protocol Privilege Escalation': {'min': '2.6.0', 'max': '2.6.36-rc8', 'cve': 'CVE-2010-3904', 'src': 'https://www.exploit-db.com/exploits/15285/'}, 'Full-Nelson.c Privilege Escalation': {'min': '2.6.0', 'max': '2.6.37', 'cve': 'CVE-2010-4258', 'src': 'https://www.exploit-db.com/exploits/15704/'}, 'Mempodipper Privilege Escalation': {'min': '2.6.39', 'max': '3.2.2', 'cve': 'CVE-2012-0056', 'src': 'https://www.exploit-db.com/exploits/35161/'}, 'Ext4 move extents ioctl Privilege Escalation': {'min': '2.6.0', 'max': '2.6.32-git6', 'cve': 'CVE-2009-4131', 'src': 'https://www.exploit-db.com/exploits/33395/'}, 'Ptrace Privilege Escalation': {'min': '2.6.0', 'max': '2.6.22.7', 'cve': 'CVE-2007-4573', 'src': 'https://www.exploit-db.com/exploits/30604/'}, 'udp_sendmsg Privilege Escalation': {'min': '2.6.0', 'max': '2.6.19', 'cve': 'CVE-2009-2698', 'src': 'https://www.exploit-db.com/exploits/9575/'}, 'fasync_helper() Privilege Escalation': {'min': '2.6.28', 'max': '2.6.33-rc4-git1', 'cve': 'CVE-2009-4141', 'src': 'https://www.exploit-db.com/exploits/33523/'}, 'CAP_SYS_ADMIN Privilege Escalation': {'min': '2.6.34', 'max': '2.6.40', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/15916/'}, 'CAN BCM Privilege Escalation': {'min': '2.6.0', 'max': '2.6.36-rc1', 'cve': 'CVE-2010-2959', 'src': 'https://www.exploit-db.com/exploits/14814/'}, 'ia32syscall Emulation Privilege Escalation': {'min': '2.6.0', 'max': '2.6.36-rc4-git2', 'cve': 'CVE-2010-3301', 'src': 'https://www.exploit-db.com/exploits/15023/'}, 'Half-Nelson.c Econet Privilege Escalation': {'min': '2.6.0', 'max': '2.6.36.2', 'cve': 'CVE-2010-3848', 'src': 'https://www.exploit-db.com/exploits/17787/'}, 'ACPI custom_method Privilege Escalation': {'min': '2.6.0', 'max': '2.6.37-rc2', 'cve': 'CVE-2010-4347', 'src': 'https://www.exploit-db.com/exploits/15774/'}, 'SGID Privilege Escalation': {'min': '2.6.32.62', 'max': '3.14.8', 'cve': 'CVE-2014-4014', 'src': 'https://www.exploit-db.com/exploits/33824/'}, 'libfutex Privilege Escalation': {'min': '2.6.4', 'max': '3.14.6', 'cve': 'CVE-2014-3153', 'src': 'https://www.exploit-db.com/exploits/35370/'}, 'perf_swevent_init Privilege Escalation': {'min': '2.6.37', 'max': '3.8.9', 'cve': 'CVE-2013-2094', 'src': 'https://www.exploit-db.com/exploits/26131/'}, 'MSR Driver Privilege Escalation': {'min': '2.6', 'max': '3.7.6', 'cve': 'CVE-2013-0268', 'src': 'https://www.exploit-db.com/exploits/27297/'} } exploits_3 = { 'overlayfs Privilege Escalation': {'min': '3.0.0', 'max': '3.19.0', 'cve': 'CVE-2015-1328', 'src': 'https://www.exploit-db.com/exploits/37292/'}, 'CLONE_NEWUSER|CLONE_FS Privilege Escalation': {'min': '3.0', 'max': '3.3.6', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/38390/'}, 'SO_SNDBUFFORCE & SO_RCVBUFFORCE Local Privilege Escalation': {'min': '3.5', 'max': '4.8.14', 'cve': 'CVE-2016-9793', 'src': 'https://www.exploit-db.com/exploits/41995/'}, 'Raw Mode PTY Echo Race Condition Privilege Escalation': {'min': '3.14-rc1', 'max': '3.16', 'cve': 'CVE-2014-0196', 'src': 'https://www.exploit-db.com/exploits/33516/'}, 'sock_diag_handlers() Privilege Escalation': {'min': '3.3.0', 'max': '3.7.10', 'cve': 'CVE-2013-1763', 'src': 'https://www.exploit-db.com/exploits/24555/'}, 'b43 Wireless Driver Privilege Escalation': {'min': '3.0', 'max': '3.9.4', 'cve': 'CVE-2013-2852', 'src': 'https://www.exploit-db.com/exploits/38559/'}, 'CONFIG_X86_X32=y Privilege Escalation': {'min': '3.4', 'max': '3.13.2', 'cve': 'CVE-2014-0038', 'src': 'https://www.exploit-db.com/exploits/31347/'}, 'Double-free usb-midi SMEP Local Privilege Escalation': {'min': '3.0', 'max': '4.5', 'cve': 'CVE-2016-2384', 'src': 'https://www.exploit-db.com/exploits/41999/'}, 'Remount FUSE Privilege Escalation': {'min': '3.2', 'max': '3.16.1', 'cve': 'CVE-2014-5207', 'src': 'https://www.exploit-db.com/exploits/34923/'}, 'ptrace/sysret Privilege Escalation': {'min': '3.0', 'max': '3.15.4', 'cve': 'CVE-2014-4699', 'src': 'https://www.exploit-db.com/exploits/34134/'}, 'open-time Capability file_ns_capable() Privilege Escalation': {'min': '3.0', 'max': '3.8.9', 'cve': 'CVE-2013-1959', 'src': 'https://www.exploit-db.com/exploits/25450/'}, 'REFCOUNT Overflow/Use-After-Free in Keyrings Privilege Escalation': {'min': '3.8.0', 'max': '4.4.1', 'cve': 'CVE-2016-0728', 'src': 'https://www.exploit-db.com/exploits/39277/'} } exploits_4 = { 'overlayfs Privilege Escalation': {'min': '4.0', 'max': '4.3.3', 'cve': 'CVE-2015-8660', 'src': 'https://www.exploit-db.com/exploits/39166/'}, 'BPF Privilege Escalation': {'min': '4.4.0', 'max': '4.5.5', 'cve': 'CVE-2016-4557', 'src': 'https://www.exploit-db.com/exploits/39772/'}, 'AF_PACKET Race Condition Privilege Escalation': {'min': '4.2.0', 'max': '4.9.0-2', 'cve': 'CVE-2016-8655', 'src': 'https://www.exploit-db.com/exploits/40871/'}, 'DCCP Double-Free Privilege Escalation': {'min': '4.4.0', 'max': '4.9.11', 'cve': 'CVE-2017-6074', 'src': 'https://www.exploit-db.com/exploits/41458/'}, 'Netfilter target_offset Out-of-Bounds Privilege Escalation': {'min': '4.4.0-21-generic', 'max': '4.4.0-31-generic', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/40049/'}, 'IP6T_SO_SET_REPLACE Privilege Escalation': {'min': '4.6.2', 'max': '4.6.3', 'cve': 'CVE-2016-4997', 'src': 'https://www.exploit-db.com/exploits/40489/'}, 'Packet Socket Local Privilege Escalation': {'min': '4.8.0', 'max': '4.10.6', 'cve': 'CVE-2017-7308', 'src': 'https://www.exploit-db.com/exploits/41994/'}, 'UDEV < 232 - Privilege Escalation': {'min': '4.8.0', 'max': '4.9.0', 'cve': 'N/A', 'src': 'https://www.exploit-db.com/exploits/41886/'} } if kernel.startswith('2.2'): for name, exploit in exploits_2_2.items(): # iterate over exploits dict if kernel >= exploit['min'] and kernel < exploit['max']: return name, exploit['cve'], exploit['src'] else: continue elif kernel.startswith('2.4'): for name, exploit in exploits_2_4.items(): if kernel >= exploit['min'] and kernel < exploit['max']: return name, exploit['cve'], exploit['src'] else: continue elif kernel.startswith('2.6'): for name, exploit in exploits_2_6.items(): if kernel >= exploit['min'] and kernel < exploit['max']: return name, exploit['cve'], exploit['src'] else: continue elif kernel.startswith('2.0'): for name, exploit in exploits_2_0.items(): if kernel >= exploit['min'] and kernel < exploit['max']: return name, exploit['cve'], exploit['src'] else: continue elif kernel.startswith('3'): for name, exploit in exploits_3.items(): if kernel >= exploit['min'] and kernel < exploit['max']: return name, exploit['cve'], exploit['src'] else: continue elif kernel.startswith('4'): for name, exploit in exploits_4.items(): if kernel >= exploit['min'] and kernel < exploit['max']: return name, exploit['cve'], exploit['src'] else: continue else: return 'No exploits found for this kernel version'
499e21091fb508b26564d06ad119d8b8ea783443
705,590
def cartesian2complex(real, imag): """ Calculate the complex number from the cartesian form: z = z' + i * z". Args: real (float|np.ndarray): The real part z' of the complex number. imag (float|np.ndarray): The imaginary part z" of the complex number. Returns: z (complex|np.ndarray): The complex number: z = z' + i * z". """ return real + 1j * imag
1fd44bc0accff8c9f26edfa84f4fcfafb2323728
705,591
def upper_case(string): """ Returns its argument in upper case. :param string: str :return: str """ return string.upper()
bbf3fc8b856d466ec73229145443566d85a3457a
705,592
def add_metadata(infile, outfile, sample_metadata): """Add sample-level metadata to a biom file. Sample-level metadata should be in a format akin to http://qiime.org/tutorials/tutorial.html#mapping-file-tab-delimited-txt :param infile: String; name of the biom file to which metadata shall be added :param outfile: String; name of the resulting metadata-enriched biom file :param sample_metadata: String; name of the sample-level metadata tab-delimited text file. Sample attributes are taken from this file. Note: the sample names in the `sample_metadata` file must match the sample names in the biom file. External dependencies - biom-format: http://biom-format.org/ """ return { "name": "biom_add_metadata: " + infile, "actions": [("biom add-metadata" " -i "+infile+ " -o "+outfile+ " -m "+sample_metadata)], "file_dep": [infile], "targets": [outfile] }
e779f876159741de60e99002a90906b151dc7530
705,599
def sql_sanitize(sql_name): """ Return a SQL name (table or column) cleaned of problematic characters. ex. punctuation )(][; whitespace Don't use with values, which can be properly escaped with parameterization. Ideally retaining only alphanumeric char. Credits: Donald Miner, Source: StackOverflow, DateAccessed: 2020-02-20 """ sanitize_name = "".join(char for char in sql_name if char.isalnum()) return sanitize_name
9ce9e0e8bed2348079fb23f2d27c53880fa1c795
705,601
def midpoint(rooms): """ Helper function to help find the midpoint between the two rooms. Args: rooms: list of rooms Returns: int: Midpoint """ return rooms[0] + (rooms[0] + rooms[2]) // 2, rooms[1] + (rooms[1] + rooms[3]) // 2
60b3ba53fb15154ff97ab9c6fa3cf1b726bc2df1
705,603
def int_or_float(x): """Convert `x` to either `int` or `float`, preferring `int`. Raises: ValueError : If `x` is not convertible to either `int` or `float` """ try: return int(x) except ValueError: return float(x)
d0a4def320f88655e494f89b7239e47e1ee70d0d
705,606
def nohighlight(nick): """add a ZWNJ to nick to prevent highlight""" return nick[0] + "\u200c" + nick[1:]
1b8d0cafc5df4a442daafdece59af1675ab1de33
705,607
def d_enter_waste_cooler(W_mass, rho_waste, w_drift): """ Calculates the tube's diameter of enter waste to waste cooler. Parameters ---------- W_mass : float The mass flow rate of waste, [kg/s] rho_waste : float The density of liquid at boilling temperature, [kg/m**3] w_drift :float The speed of steam at the tube, [m/s] Returns ------- d_enter_waste_cooler : float The tube's diameter of enter waste to waste cooler, [m] References ---------- &&& """ return W_mass/(0,785*rho_waste*w_drift)
651c1adc0b90a286c2c8685c389268bc8834ad73
705,615
def _get_option_of_highest_precedence(config, option_name): """looks in the config and returns the option of the highest precedence This assumes that there are options and flags that are equivalent Args: config (_pytest.config.Config): The pytest config object option_name (str): The name of the option Returns: str: The value of the option that is of highest precedence None: no value is present """ # Try to get configs from CLI and ini try: cli_option = config.getoption("--{}".format(option_name)) except ValueError: cli_option = None try: ini_option = config.getini(option_name) except ValueError: ini_option = None highest_precedence = cli_option or ini_option return highest_precedence
4f3bca4ff5b0a1eb04fbdc7a5d22bc09dbc95df6
705,618
def transform(data, transformer): """This hook defines how DataRobot will use the trained object from fit() to transform new data. DataRobot runs this hook when the task is used for scoring inside a blueprint. As an output, this hook is expected to return the transformed data. The input parameters are passed by DataRobot based on dataset and blueprint configuration. Parameters ------- data: pd.DataFrame Data that DataRobot passes for transformation. transformer: Any Trained object, extracted by DataRobot from the artifact created inside fit(). In this example, it's a function Returns ------- pd.DataFrame Returns a dataframe with transformed data. """ return data.apply(transformer)
b52577c0b2a3f3edb1297dcf9c567f9845f04bd5
705,624
from typing import List def get_scale(notes: List[str]) -> int: """Convert a list of notes to a scale constant. # Args - *notes*: list of notes in the scale. This should be a list of string where each string is a note ABC notation. Sharps should be represented with a pound sign preceding the note e.g. '#A' and flats should be represented with a lower case b preceding the note e.g. 'bB'. # Returns An integer mask used to represent a musical key or scale as an argument to any of the MusicalHash methods. # Raises A ValueError if an invalid string is included in the input list. """ note_map = {'A': 0x1, '#A': 0x2, 'bB': 0x2, 'B': 0x4, 'C': 0x8, '#C': 0x10, 'bD': 0x10, 'D': 0x20, '#D': 0x40, 'bE': 0x40, 'E': 0x80, 'F': 0x100, '#F': 0x200, 'bG': 0x200, 'G': 0x400, '#G': 0x800, 'bA': 0x800} scale = 0x0 for note in notes: try: scale |= note_map[note] except KeyError: raise ValueError( 'The string {} is not a valid musical note'.format(note)) return scale
91cbcc7bfa05df52adf741b85f78beeabf819966
705,629
import math def slurm_format_bytes_ceil(n): """ Format bytes as text. SLURM expects KiB, MiB or Gib, but names it KB, MB, GB. SLURM does not handle Bytes, only starts at KB. >>> slurm_format_bytes_ceil(1) '1K' >>> slurm_format_bytes_ceil(1234) '2K' >>> slurm_format_bytes_ceil(12345678) '13M' >>> slurm_format_bytes_ceil(1234567890) '2G' >>> slurm_format_bytes_ceil(15000000000) '14G' """ if n >= (1024 ** 3): return "%dG" % math.ceil(n / (1024 ** 3)) if n >= (1024 ** 2): return "%dM" % math.ceil(n / (1024 ** 2)) if n >= 1024: return "%dK" % math.ceil(n / 1024) return "1K" % n
ce48c778b9605105ed9b66a55d27796fb90499cc
705,630
def rowwidth(view, row): """Returns the number of characters of ``row`` in ``view``. """ return view.rowcol(view.line(view.text_point(row, 0)).end())[1]
f8db1bf6e3d512d1a2bd5eeb059af93e8ac3bc5f
705,633
def sec2msec(sec): """Convert `sec` to milliseconds.""" return int(sec * 1000)
f1b3c0bf60ab56615ed93f295e7716e56c6a1117
705,635
def on_segment(p, r, q, epsilon): """ Given three colinear points p, q, r, and a threshold epsilone, determine if determine if point q lies on line segment pr """ # Taken from http://stackoverflow.com/questions/328107/how-can-you-determine-a-point-is-between-two-other-points-on-a-line-segment crossproduct = (q.y - p.y) * (r.x - p.x) - (q.x - p.x) * (r.y - p.y) if abs(crossproduct) > epsilon: return False # (or != 0 if using integers) dotproduct = (q.x - p.x) * (r.x - p.x) + (q.y - p.y)*(r.y - p.y) if dotproduct < 0: return False squaredlengthba = (r.x - p.x)*(r.x - p.x) + (r.y - p.y)*(r.y - p.y) if dotproduct > squaredlengthba: return False return True
b8517fc9d3c6d916cac698913c35ba4e5d873697
705,646
def transform(func, geom): """Applies `func` to all coordinates of `geom` and returns a new geometry of the same type from the transformed coordinates. `func` maps x, y, and optionally z to output xp, yp, zp. The input parameters may iterable types like lists or arrays or single values. The output shall be of the same type. Scalars in, scalars out. Lists in, lists out. For example, here is an identity function applicable to both types of input. def id_func(x, y, z=None): return tuple(filter(None, [x, y, z])) g2 = transform(id_func, g1) Using pyproj >= 2.1, this example will accurately project Shapely geometries: import pyproj wgs84 = pyproj.CRS('EPSG:4326') utm = pyproj.CRS('EPSG:32618') project = pyproj.Transformer.from_crs(wgs84, utm, always_xy=True).transform g2 = transform(project, g1) Note that the always_xy kwarg is required here as Shapely geometries only support X,Y coordinate ordering. Lambda expressions such as the one in g2 = transform(lambda x, y, z=None: (x+1.0, y+1.0), g1) also satisfy the requirements for `func`. """ if geom.is_empty: return geom if geom.type in ('Point', 'LineString', 'LinearRing', 'Polygon'): # First we try to apply func to x, y, z sequences. When func is # optimized for sequences, this is the fastest, though zipping # the results up to go back into the geometry constructors adds # extra cost. try: if geom.type in ('Point', 'LineString', 'LinearRing'): return type(geom)(zip(*func(*zip(*geom.coords)))) elif geom.type == 'Polygon': shell = type(geom.exterior)( zip(*func(*zip(*geom.exterior.coords)))) holes = list(type(ring)(zip(*func(*zip(*ring.coords)))) for ring in geom.interiors) return type(geom)(shell, holes) # A func that assumes x, y, z are single values will likely raise a # TypeError, in which case we'll try again. except TypeError: if geom.type in ('Point', 'LineString', 'LinearRing'): return type(geom)([func(*c) for c in geom.coords]) elif geom.type == 'Polygon': shell = type(geom.exterior)( [func(*c) for c in geom.exterior.coords]) holes = list(type(ring)([func(*c) for c in ring.coords]) for ring in geom.interiors) return type(geom)(shell, holes) elif geom.type.startswith('Multi') or geom.type == 'GeometryCollection': return type(geom)([transform(func, part) for part in geom.geoms]) else: raise ValueError('Type %r not recognized' % geom.type)
71bde1500ec8370a7718542ee26181d2aad6591f
705,650
def check_flush(hand): """Check whether the hand has a flush; returns a boolean.""" if len(hand) == len(hand.by_suit(hand[0].suit)): return True return False
de11f50f11b477e61f284063c7f0da0dda2dd87e
705,655
import torch def binary_accuracy(preds, y): """ Returns accuracy per batch :param preds: prediction logits :param y: target labels :return: accuracy = percentage of correct predictions """ # round predictions to the closest integer rounded_predictions = torch.round(torch.sigmoid(preds)) correct = (rounded_predictions == y).float() acc = correct.sum() / len(correct) return acc
2a321bb9e60a937a879619c2fa3baf1cbe968a33
705,656
import re def _to_numeric_range(cell): """ Translate an Excel cell (eg 'A1') into a (col, row) tuple indexed from zero. e.g. 'A1' returns (0, 0) """ match = re.match("^\$?([A-Z]+)\$?(\d+)$", cell.upper()) if not match: raise RuntimeError("'%s' is not a valid excel cell address" % cell) col, row = match.groups() # A = 1 col_digits = map(lambda c: ord(c) - ord("A") + 1, col) col = 0 for digit in col_digits: col = (col * 26) + digit row = int(row) - 1 col = col - 1 return col, row
468f452a7e4d4b045ecbb1a1fc261712fb25f3fc
705,658
import re def parse_regex(ctx, param, values): """Compile a regex if given. :param click.Context ctx: click command context. :param click.Parameter param: click command parameter (in this case, ``ignore_regex`` from ``-r|--ignore-regiex``). :param list(str) values: list of regular expressions to be compiled. :return: a list of compiled regular expressions. .. versionchanged:: 1.1.3 parameter value (``values``) must be a ``list`` of ``str``s. """ if not values: return return [re.compile(v) for v in values]
b920d5a406ac3b7a8f28bb9125313c90eec5e212
705,661
def get_query_string(**kwargs): """ Concatenates the non-None keyword arguments to create a query string for ElasticSearch. :return: concatenated query string or None if not arguments were given """ q = ['%s:%s' % (key, value) for key, value in kwargs.items() if value not in (None, '')] return ' AND '.join(q) or None
cc73c157a8975e5df9c98efcd5b10396e5175486
705,663
def add_quotes(path): """Return quotes if needed for spaces on path.""" quotes = '"' if ' ' in path and '"' not in path else '' return '{quotes}{path}{quotes}'.format(quotes=quotes, path=path)
6e65da4512183ef62a0ac22b4c3c74f9e5273fbd
705,664
def bostock_cat_colors(color_sets = ["set3"]): """ Get almost as many categorical colors as you please. Get more than one of the color brewer sets with ['set1' , 'set2'] Parameters ---------- sets : list list of color sets to return valid options are (set1, set2, set3, pastel1, pastel2, paired, dark, accent, category10) Returns ------- categorical_colors : list list of strings (e.g. ["#e41a1c",...]) Examples -------- >>> bostock_cat_colors(['set3'])[:5] ['#8dd3c7', '#ffffb3', '#bebada', '#fb8072', '#80b1d3'] >>> bostock_cat_colors(['category10'])[:5] ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd'] Notes ----- list of hex colors can be found here: https://observablehq.com/@d3/color-schemes """ bostock = \ {"set1" : ["#e41a1c","#377eb8","#4daf4a","#984ea3", "#ff7f00","#ffff33","#a65628","#f781bf", "#999999"], "set2" : ["#66c2a5","#fc8d62","#8da0cb","#e78ac3", "#a6d854","#ffd92f","#e5c494","#b3b3b3"], "set3" : ["#8dd3c7","#ffffb3","#bebada","#fb8072", "#80b1d3","#fdb462","#b3de69","#fccde5", "#d9d9d9","#bc80bd","#ccebc5","#ffed6f"], "pastel1" : ["#fbb4ae","#b3cde3","#ccebc5","#decbe4", "#fed9a6","#ffffcc","#e5d8bd","#fddaec", "#f2f2f2"], "pastel2" : ["#b3e2cd","#fdcdac","#cbd5e8","#f4cae4", "#e6f5c9","#fff2ae","#f1e2cc","#cccccc"], "paired" : ["#a6cee3","#1f78b4","#b2df8a","#33a02c", "#fb9a99","#e31a1c","#fdbf6f","#ff7f00", "#cab2d6","#6a3d9a","#ffff99","#b15928"], "dark" : ["#1b9e77","#d95f02","#7570b3","#e7298a", "#66a61e","#e6ab02","#a6761d","#666666"], "accent" : ["#7fc97f","#beaed4","#fdc086","#ffff99", "#386cb0","#f0027f","#bf5b17","#666666"], "category10":["#1f77b4","#ff7f0e","#2ca02c","#d62728", "#9467bd","#8c564b","#e377c2","#7f7f7f", "#bcbd22","#17becf"] } l = [bostock[k] for k in color_sets] categorical_colors = [item for sublist in l for item in sublist] return categorical_colors
d01a2c833c3ee4ab1a196184ec4aecdb6cfc97a0
705,666
def _quote_embedded_quotes(text): """ Replace any embedded quotes with two quotes. :param text: the text to quote :return: the quoted text """ result = text if '\'' in text: result = result.replace('\'', '\'\'') if '"' in text: result = result.replace('"', '""') return result
71231e590e025c2ceb7b2dd4fde4465a9ff61a4c
705,668
def exp2(x): """Calculate 2**x""" return 2 ** x
d76d1e344e79ebb05d38a2e7e6ef36b6f367e85b
705,669
def archived_minute(dataSet, year, month, day, hour, minute): """ Input: a dataset and specific minute Output: a list of ride details at that minute or -1 if no ride during that minute """ year = str(year) month = str(month) day = str(day) #Converts hour and minute into 2 digit integers (that are strings) hour = "%02d" % hour minute = "%02d" % minute timeStamp = month+'/'+day+'/'+year+' '+hour+':'+minute+':'+'00' if timeStamp in dataSet: return dataSet[timeStamp] else: return -1
e550cb8ae5fbcfcc2a0b718dc2e4f3372f100015
705,670
def inputRead(c, inps): """ Reads the tokens in the input channels (Queues) given by the list inps using the token rates defined by the list c. It outputs a list where each element is a list of the read tokens. Parameters ---------- c : [int] List of token consumption rates. inps : [Queue] List of channels. Returns ---------- inputs: [List] List of token lists. """ if len(c) != len(inps): raise Exception("Token consumption list and Queue list have different sizes") inputs = [] for i in range(len(c)): aux = [] for j in range(c[i]): aux.append(inps[i].get()) inputs.append(aux) return inputs
ea70548f7da4fae66fe5196734bbf39deb255537
705,671
def wall_filter(points, img): """ Filters away points that are inside walls. Works by checking where the refractive index is not 1. """ deletion_mask = img[points[:, 0], points[:, 1]] != 1 filtered_points = points[~deletion_mask] return filtered_points
05a34602e8a555eb1f1739f5de910a71514a92ae
705,674
def roq_transform(pressure, loading): """Rouquerol transform function.""" return loading * (1 - pressure)
b69d83579cdb904cc7e3625a371e1f6c0573e44b
705,675
def descope_queue_name(scoped_name): """Descope Queue name with '.'. Returns the queue name from the scoped name which is of the form project-id.queue-name """ return scoped_name.split('.')[1]
24de78d12399e0894f495cd5c472b10c2315e4af
705,679
def make_auth_header(auth_token): """Make the authorization headers to communicate with endpoints which implement Auth0 authentication API. Args: auth_token (dict): a dict obtained from the Auth0 domain oauth endpoint, containing the signed JWT (JSON Web Token), its expiry, the scopes granted, and the token type. Returns: headers (dict): A dict representing the headers with necessary token information to talk to Auth0 authentication required endpoints. """ token_type = auth_token['token_type'] access_token = auth_token['access_token'] headers = { "Content-type": "application/json", "Authorization": "{token_type} {access_token}".format( token_type=token_type, access_token=access_token ), } return headers
e7c9b93cfbda876668068fb871d3abaf06157204
705,683
def dscp_class(bits_0_2, bit_3, bit_4): """ Takes values of DSCP bits and computes dscp class Bits 0-2 decide major class Bit 3-4 decide drop precedence :param bits_0_2: int: decimal value of bits 0-2 :param bit_3: int: value of bit 3 :param bit_4: int: value of bit 4 :return: DSCP class name """ bits_3_4 = (bit_3 << 1) + bit_4 if bits_3_4 == 0: dscp_cl = "cs{}".format(bits_0_2) elif (bits_0_2, bits_3_4) == (5, 3): dscp_cl = "ef" else: dscp_cl = "af{}{}".format(bits_0_2, bits_3_4) return dscp_cl
79e9881e413a5fcbbbaab110e7b3346a2dbcaa53
705,684
def scale_to_one(iterable): """ Scale an iterable of numbers proportionally such as the highest number equals to 1 Example: >> > scale_to_one([5, 4, 3, 2, 1]) [1, 0.8, 0.6, 0.4, 0.2] """ m = max(iterable) return [v / m for v in iterable]
92cfc7ef586ecfea4300aeedabe2410a247610f7
705,686
def get_total_count(data): """ Retrieves the total count from a Salesforce SOQL query. :param dict data: data from the Salesforce API :rtype: int """ return data['totalSize']
7cb8696c36449425fbcfa944f1f057d063972888
705,688
def substring_in_list(substr_to_find, list_to_search): """ Returns a boolean value to indicate whether or not a given substring is located within the strings of a list. """ result = [s for s in list_to_search if substr_to_find in s] return len(result) > 0
77521a1c5d487fa110d5adecb884dd298d2515e5
705,693
def axes_to_list(axes_data: dict) -> list: """helper method to convert a dict of sensor axis graphs to a 2d array for graphing """ axes_tuples = axes_data.items() axes_list = [axes[1].tolist() for axes in axes_tuples] return axes_list
fb2e5ef1f2283e2f31e5c8828a3ec7ef94869c5c
705,697
from typing import Iterable import functools import operator def prod(values: Iterable[int]) -> int: """Compute the product of the integers.""" return functools.reduce(operator.mul, values)
3f03200078daf1b0b27f777e7744144ab72ec7af
705,698
def get_stars_dict(stars): """ Transform list of stars into dictionary where keys are their names Parameters ---------- stars : list, iterable Star objects Return ------ dict Stars dictionary """ x = {} for st in stars: try: x[st.name] = st except: pass return x
6d627be48a96d8ba93bd13511a05c251f3a3f169
705,699
def clean_packages_list(packages): """ Remove comments from the package list """ lines = [] for line in packages: if not line.startswith("#"): lines.append(line) return lines
a6c942f9b90c8f6c610ba0b57728f3da48f35ded
705,700
def add_kwds(dictionary, key, value): """ A simple helper function to initialize our dictionary if it is None and then add in a single keyword if the value is not None. It doesn't add any keywords at all if passed value==None. Parameters ---------- dictionary: dict (or None) A dictionary to copy and update. If none it will instantiate a new dictionary. key: str A the key to add to the dictionary value: object (or None) A value to add to the dictionary. If None then no key, value pair will be added to the dictionary. Returns ------- dictionary A copy of dictionary with (key,value) added into it or a new dictionary with (key,value) in it. """ if dictionary is None: kwds = {} else: kwds = dictionary.copy() if (value is not None) and (key is not None): kwds.update({key: value}) return kwds
96aa104f86e521e419d51096b6c1f86e4b506c57
705,707
def _get_cindex(circ, name, index): """ Find the classical bit index. Args: circ: The Qiskit QuantumCircuit in question name: The name of the classical register index: The qubit's relative index inside the register Returns: The classical bit's absolute index if all registers are concatenated. """ ret = 0 for reg in circ.cregs: if name != reg.name: ret += reg.size else: return ret + index return ret + index
340105a2ddfe5fb2527171a7592390c9dd2937e5
705,708
def get_bin(pdf: str) -> str: """ Get the bins of the pdf, e.g. './00/02/Br_J_Cancer_1977_Jan_35(1)_78-86.tar.gz' returns '00/02'. """ parts = pdf.split('/') return parts[-3] + '/' + parts[-2] + '/'
a1e25162b8a353f508667ccb4fc750e51fcf611d
705,709
def burkert_density(r, r_s, rho_o): """ Burkert dark matter density profile """ x = r / r_s density = rho_o / ( (x) * (1.0 + x)**2) return density.to('g/cm**3')
8293a62b6c52c65e7c5fe7c676fd3807f301e40b
705,711
def parse_username_password_hostname(remote_url): """ Parse a command line string and return username, password, remote hostname and remote path. :param remote_url: A command line string. :return: A tuple, containing username, password, remote hostname and remote path. """ assert remote_url assert ':' in remote_url if '@' in remote_url: username, hostname = remote_url.rsplit('@', 1) else: username, hostname = None, remote_url hostname, remote_path = hostname.split(':', 1) password = None if username and ':' in username: username, password = username.split(':', 1) assert hostname assert remote_path return username, password, hostname, remote_path
50410ad87865559af84b83ab6bdfae19e536791d
705,713
import ast def skip_node(node): """Whether to skip a step in the traceback based on ast node type.""" return isinstance(node, (ast.If, ast.While, ast.For))
2406d02190a4dccb3d1f5d743a742f82c97f6541
705,714
def lc_reverse_integer(n): """ Given a 32-bit signed integer, reverse digits of an integer. Assume we are dealing with an environment which could only hold integers within the 32-bit signed integer range. For the purpose of this problem, assume that your function returns 0 when the reversed integer overflows. Examples: >>> lc_reverse_integer(123) 321 >>> lc_reverse_integer(-123) -321 >>> lc_reverse_integer(120) 21 """ class Solution(object): @staticmethod def reverse(x): neg = x < 0 if neg: x = -x result = 0 while x: result = result * 10 + x % 10 x /= 10 if result > 2 ** 31: return 0 return -result if neg else result return Solution.reverse(n)
eff1054873ef0e77a82e34b7cf7af51d42f27d6c
705,716
def tsallis(ion_temp, avg_temp, n): """ Non-normalized probability of an ion at ion-temp using a Tsallis distribution :param ion_temp: temperature of ion (K) :param avg_temp: average temperature of ions (K) :param n: average harmonic oscillator level :return: value """ kb = 1.38e-23 energy = ion_temp * kb top = (n - 3) * (n - 2) * (n - 1) * energy ** 2 bot = 2 * (n * kb * avg_temp) ** 3 * (1 + energy / (n * kb * avg_temp)) ** n output = top / bot return output
4598c5241fc06219938beced4c9d5a4473cf8363
705,722