content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import socket
import struct
def int_to_ip(addr):
"""
Converts a the numeric representation of an ip address
to an ip address strin
Example:
>>> int_to_ip(2130706433)
'127.0.0.1'
"""
return socket.inet_ntoa(struct.pack("!I", addr)) | 6db99b4cb7e7274eb1ac2b7783a4b606d845c4a5 | 9,775 |
def aa_seq_doc(aa_sequence):
"""This function takes in an amino acid sequence (aa sequence) and adds spaces between each amino acid."""
return ' '.join([aa_sequence[i:i+1]
for i in range(0, len(aa_sequence))]) | cb497c340d5ecc29184dc079ea9530ebddc43fbd | 9,777 |
def v70_from_params_asdict(v70_from_params):
"""Converts a sparse `v70_from_params` array to a dict."""
dict_v70_from_params = {}
for i in range(v70_from_params.shape[0]):
for j in range(v70_from_params.shape[1]):
v = v70_from_params[i, j]
if v:
dict_v70_from_params[(i, j)] = str(v)
return dict_v70_from_params | 355ba80c131d08b6aedf20680545b4b31e07832e | 9,784 |
def open_r(filename):
"""Open a file for reading with encoding utf-8 in text mode."""
return open(filename, 'r', encoding='utf-8') | 08086625a9c05738a3536001a158eff3b0718ddf | 9,785 |
def prepend_protocol(url: str) -> str:
"""Prefix a URL with a protocol schema if not present
Args:
url (str)
Returns:
str
"""
if '://' not in url:
url = 'https://' + url
return url | 856961526207510c630fe503dce77bdcfc58d0cc | 9,788 |
def chunk_by_image(boxes):
"""
turn a flat list of boxes into a hierarchy of:
image
category
[boxes]
:param boxes: list of box detections
:return: dictionary of boxes chunked by image/category
"""
chunks = {}
for b in boxes:
if b['image_id'] not in chunks:
chunks[b['image_id']] = {b['category_id']: [b]}
elif b['category_id'] not in chunks[b['image_id']]:
chunks[b['image_id']][b['category_id']] = [b]
else:
chunks[b['image_id']][b['category_id']].append(b)
return chunks | d6eaf46214a97853407112a9d0a3c47a132fb3c4 | 9,791 |
from typing import Tuple
from typing import List
def split_by_commas(maybe_s: str) -> Tuple[str, ...]:
"""Split a string by commas, but allow escaped commas.
- If maybe_s is falsey, returns an empty tuple
- Ignore backslashed commas
"""
if not maybe_s:
return ()
parts: List[str] = []
split_by_backslash = maybe_s.split(r'\,')
for split_by_backslash_part in split_by_backslash:
splitby_comma = split_by_backslash_part.split(',')
if parts:
parts[-1] += ',' + splitby_comma[0]
else:
parts.append(splitby_comma[0])
parts.extend(splitby_comma[1:])
return tuple(parts) | ca21e5103f864e65e5ae47b49c161e8527036810 | 9,794 |
def kalman_predict(m, P, A, Q):
"""Kalman filter prediction step"""
m_p = A @ m
P_p = A @ P @ A.T + Q
return m_p, P_p | a43e644693d02e317f2ac67a36f0dc684e93f3d5 | 9,797 |
def is_probably_gzip(response):
"""
Determine if a urllib response is likely gzip'd.
:param response: the urllib response
"""
return (response.url.endswith('.gz') or
response.getheader('Content-Encoding') == 'gzip' or
response.getheader('Content-Type') == 'application/x-gzip') | 30ca3774f16debbac4b782ba5b1c4be8638fe344 | 9,800 |
import random
import math
def buffon(needlesNbr, groovesLen, needlesLen):
"""Simulates Buffon's needle experiments."""
intersects = 0
for i in range(needlesNbr):
y = random.random() * needlesLen / 2
angle = random.random() * math.pi
z = groovesLen / 2 * math.sin(angle)
if y <= z:
intersects += 1
expFreq = intersects / needlesNbr
thFreq = 2 * needlesLen / (math.pi * groovesLen)
return (intersects, expFreq, thFreq) | 34bbb29346690b5d0ef519282699f0b3b82d93cb | 9,801 |
def get_new_snp(vcf_file):
"""
Gets the positions of the new snp in a vcf file
:param vcf_file: py_vcf file
:return: list of new snp
"""
new_snp = []
for loci in vcf_file:
if "gff3_notarget" in loci.FILTER:
new_snp.append(loci)
return(new_snp) | 1385a5552f9ad508f5373b2783d14938ab04b9c5 | 9,803 |
def most_freq(neighbors):
"""
Returns the dominant color with the greater frequency
Example: num_dominating = [paper, paper, paper, spock, spock, spock, spock, spock]
Returns: spock
"""
return max(set(neighbors), key=neighbors.count) | 09c041b27dbf55f6e862d73bde421a86ac265f42 | 9,805 |
from datetime import datetime
def greater_than_days_cutoff(timestamp, cutoff):
""" Helper function to calculate if PR is past cutoff
"""
# Convert string to datetime object
last_update = datetime.strptime(timestamp[0:22], '%Y-%m-%dT%H:%M:%S.%f')
# Get the number of days since this PR has been last updated
last_update_days = (datetime.now() - last_update).days
return last_update_days > cutoff | 2dd1a9c01112d30a77ca2f5826db32d29f26d830 | 9,807 |
def reportnulls(df):
"""
Takes a data frame and check de nulls and sum
the resutls and organizes them from highest to lowest
"""
null_counts = df.isnull().sum().sort_values(ascending=False)
# return count of null values
return null_counts | a3dc20feeaaf0f3467de76812531f1d0b791dc01 | 9,810 |
def getConstrainedTargets(driver, constraint_type='parentConstraint'):
"""
Gets all the transforms the given driver is driving through the giving constraint type.
Args:
driver (PyNode): The transform that is driving the other transform(s) through a constraint.
constraint_type (string): The type of constraint to look for.
Normally "parentConstraint", "pointConstraint", "orientConstraint", or "scaleConstraint".
Returns:
(set): Transform(s) being driven by given driver through given constraint type.
"""
# Using sets to remove duplicates from list connections because Maya
constraints = set(driver.listConnections(type=constraint_type))
targets = set()
for constraint in constraints:
targets.update(set(constraint.connections(source=False, destination=True, et=True, type='transform')))
return targets | 4e94a4cf1e72012e2413f1889ec76ccd0a76800e | 9,812 |
from typing import List
from pathlib import Path
def get_isbr2_nii_file_paths(dir_paths: List[Path], file_selector: str) -> List[Path]:
"""Returns all the .nii.gz file paths for a given file_selector type.
Arguments:
dir_paths: a list of sample dir paths, each directory holds a full scan
file_selector: a string representing which file type to chose, e.g. 'ana' for IBSR_02_ana.nii.gz
"""
nii_file_paths = []
for dir_path in dir_paths:
sample_name = dir_path.name
file_name = f'{sample_name}_{file_selector}.nii.gz'
file_path = dir_path / file_name
nii_file_paths.append(file_path)
return nii_file_paths | fc04b9fa48e2d44c532344c36bdbdefe71f24f67 | 9,813 |
def get_num_gophers(blades, remainders, M):
"""Find no. of gophers given no. of blades and remainders."""
for i in range(1, M + 1):
congruences = all([i % b == r for b, r in zip(blades, remainders)])
if congruences:
return i
return None | 0467bd7a9ab56181b03c26f0048adf52b1cc8228 | 9,814 |
import math
def __calc_entropy_passphrase(word_count, word_bank_size, pad_length, pad_bank_size):
"""
Approximates the minimum entropy of the passphrase with its possible deviation
:param word_count: Number of words in passphrase
:param word_bank_size: Total number of words in the word bank
:param pad_length: Number of characters used in padding
:param pad_bank_size: The size of the character pool used to generate padding
:return: A tuple containing the minimum entropy and deviation
"""
# Multiply word bank size by 2 since there are uppercase or lower case words
inner = math.pow(word_bank_size*2, word_count)
entropy = math.log(inner, 2)
inner = math.pow(pad_bank_size, pad_length)
deviation = math.log(inner, 2)
return entropy, deviation | c3597b8d8fc35387638e1e0e4923316c2b99aaa8 | 9,815 |
def join_string(list_string, join_string):
"""
Join string based on join_string
Parameters
----------
list_string : string list
list of string to be join
join_string : string
characters used for the joining
Returns
-------
string joined : string
a string where all elements of the list string have been join with join_string between them
Examples
--------
>>> test = ["a", "b", "c", "d"]
>>> join_string(test, "/")
'a/b/c/d'
"""
return join_string.join(list_string) | bcb55fb72b9579dd5ab548b737a0ffd85cbc7f43 | 9,816 |
def parse_segments(segments_str):
"""
Parse segments stored as a string.
:param vertices: "v1,v2,v3,..."
:param return: [(v1,v2), (v3, v4), (v5, v6), ... ]
"""
s = [int(t) for t in segments_str.split(',')]
return zip(s[::2], s[1::2]) | 4adfaff824ceb12772e33480a73f52f0054f6f5d | 9,820 |
def GetFirstWord(buff, sep=None):#{{{
"""
Get the first word string delimited by the supplied separator
"""
try:
return buff.split(sep, 1)[0]
except IndexError:
return "" | ce01a470ff5ba08f21e37e75ac46d5a8f498a76a | 9,821 |
import torch
def BPR_Loss(positive : torch.Tensor, negative : torch.Tensor) -> torch.Tensor:
"""
Given postive and negative examples, compute Bayesian Personalized ranking loss
"""
distances = positive - negative
loss = - torch.sum(torch.log(torch.sigmoid(distances)), 0, keepdim=True)
return loss | 868df180dc0166b47256d64c60928e9759b80e5f | 9,825 |
def find_fxn(tu, fxn, call_graph):
"""
Looks up the dictionary associated with the function.
:param tu: The translation unit in which to look for locals functions
:param fxn: The function name
:param call_graph: a object used to store information about each function
:return: the dictionary for the given function or None
"""
if fxn in call_graph['globals']:
return call_graph['globals'][fxn]
else:
try:
return call_graph['locals'][fxn][tu]
except KeyError:
return None | e73783b2eddadcbbc9e9eff39073805fc158c34e | 9,828 |
def checksum(message):
"""
Calculate the GDB server protocol checksum of the message.
The GDB server protocol uses a simple modulo 256 sum.
"""
check = 0
for c in message:
check += ord(c)
return check % 256 | bfba144414f26d3b65dc0c102cb7eaa903de780a | 9,832 |
def parse_report_filter_values(request, reports):
"""Given a dictionary of GET query parameters, return a dictionary mapping
report names to a dictionary of filter values.
Report filter parameters contain a | in the name. For example, request.GET
might be
{
"crash_report|operating_system": "Linux",
"crash_report|graphics_card": "nVidia",
"apprentice_report|version": "13.0",
"start_date": "2015-01-01",
"end_date": "2015-01-31",
}
We want to return
{
"crash_report": {
"operating_system": "Linux",
"graphics_card": "nVidia",
},
"apprentice_report": {
"version": "13.0",
},
}
"""
report_name_to_filter_values = {}
# Note that if there are multiple values in the request.GET dictionary,
# as is the case for checkboxes with corresponding hidden fields, that
# items() will simply return the last value.
for report_and_parm_name, value in request.GET.items():
if "|" in report_and_parm_name:
report_name, parm_name = report_and_parm_name.split("|", 1)
report_name_to_filter_values.setdefault(
report_name, {})[parm_name] = value
# Make sure that all reports are in the result, and that each of the
# report's filters has a value.
for report in reports:
filter_values = report_name_to_filter_values.setdefault(
report.name(), {})
for filt in report.get_filters():
if filt.name not in filter_values:
filter_values[filt.name] = filt.default_value()
# Give the filter a chance to convert from the GET value into
# something that makes more sense to the report.
filter_values[filt.name] = filt.process_GET_value(
filter_values[filt.name])
return report_name_to_filter_values | 217a7bfdeb65952637774ebefb6ae0ea7a0d991c | 9,834 |
import itertools
def iter_extend(iterable, length, obj=None):
"""Ensure that iterable is the specified length by extending with obj"""
return itertools.islice(itertools.chain(iterable, itertools.repeat(obj)), length) | 1e6a2bdd36b8bcb3202c4472c9e7621eef9edcf1 | 9,835 |
def keyfunc(line):
"""Return the key from a TAB-delimited key-value pair."""
return line.partition("\t")[0] | 39737389cd7a9e8046ff00700004b8864a242914 | 9,839 |
def html_wrap(html_string):
"""Add an html-head-body wrapper around an html string."""
html_prefix="""<html>
<head>
<title>HTML CSS TESTS</title>
<link rel="stylesheet" type="text/css" href="tests/manual html-css tests/html-css.css">
</head>
<body>"""
html_postfix="""
</body></html>
"""
return html_prefix + html_string + html_postfix | 8510549f4de1de25ac98361f757210eafdb02631 | 9,840 |
def split(ary, n):
"""Given an array, and an integer, split the array into n parts"""
result = []
for i in range(0, len(ary), n):
result.append(ary[i: i + n])
return result | 27ae4f06603de17c993656fae9df07b61f333474 | 9,841 |
def apply_bounds(grid, lVec):
"""
Assumes periodicity and restricts grid positions to a box in x- and
y-direction.
"""
dx = lVec[1,0]-lVec[0,0]
grid[0][grid[0] >= lVec[1,0]] -= dx
grid[0][grid[0] < lVec[0,0]] += dx
dy = lVec[2,1]-lVec[0,1]
grid[1][grid[1] >= lVec[2,1]] -= dy
grid[1][grid[1] < lVec[0,1]] += dy
return grid | 34411b7cc1062ade4d45c922225a3364a6c84180 | 9,845 |
def matches_filter(graph, props):
"""
Returns True if a given graph matches all the given props. Returns False
if not.
"""
for prop in props:
if prop != 'all' and not prop in graph['properties']:
return False
return True | 8d53f198b7ad1af759203909a05cd28916512708 | 9,847 |
from typing import List
from typing import Any
from typing import Union
import re
def human_sort_key(key: str) -> List[Any]:
"""
Function that can be used for natural sorting, where "PB2" comes before
"PB10" and after "PA3".
"""
def _convert(text: str) -> Union[int, str]:
return int(text) if text.isdigit() else text
return [_convert(x) for x in re.split(r'(\d+)', key) if x] | 31c163250f5b040b18f3f2374825191ce3f61ff4 | 9,848 |
def get_class(class_string):
""" Returns class object specified by a string.
Arguments:
class_string -- The string representing a class.
Raises:
ValueError if module part of the class is not specified.
"""
module_name, _, class_name = class_string.rpartition('.')
if module_name == '':
raise ValueError('Class name must contain module part.')
return getattr(__import__(module_name, globals(), locals(), [class_name], -1), class_name) | a25573ac9cb6115e0b8ad1d28a286f86628d8235 | 9,849 |
import hashlib
def flip_bloom_filter(string: str, bf_len: int, num_hash_funct: int):
"""
Hash string and return indices of bits that have been flipped correspondingly.
:param string: string: to be hashed and to flip bloom filter
:param bf_len: int: length of bloom filter
:param num_hash_funct: int: number of hash functions
:return: bfset: a set of integers - indices that have been flipped to 1
"""
# config for hashing
h1 = hashlib.sha1
h2 = hashlib.md5
sha_bytes = h1(string.encode('utf-8')).digest()
md5_bytes = h2(string.encode('utf-8')).digest()
int1 = int.from_bytes(sha_bytes, 'big') % bf_len
int2 = int.from_bytes(md5_bytes, 'big') % bf_len
# flip {num_hash_funct} times
bfset = set()
for i in range(num_hash_funct):
gi = (int1 + i * int2) % bf_len
bfset.add(gi)
return bfset | c245637b86cc16b68d069bf2359b1e5c7483a8fc | 9,850 |
def largest_product(product_list):
"""Find the largest product from a given list."""
largest = 1
for products in product_list:
if largest < max(products):
largest = max(products)
return largest | 7f9f2b151afc7d0e35203a5dda665ef100279c65 | 9,852 |
import random
def shuffle_string(s: str) -> str:
"""
Mixes all the letters in the given string
Parameters:
s(str): the string to shuffle
Returns:
str: string with randomly reordered letters
Examples:
>>> shuffle_string('abc')
'cba'
>>> shuffle_string('abc d')
'da bc'
>>> shuffle_string('i like python')
'okny tehiil p'
"""
chars = list(s)
random.shuffle(chars)
return ''.join(chars) | 9c68b276f539d6385e5393cb92fa971fbb6059d4 | 9,855 |
def diff_lists(old, new):
"""Returns sorted lists of added and removed items."""
old = set(old or [])
new = set(new or [])
return sorted(new - old), sorted(old - new) | 1359a2c89c20993445491ff2a986a392eaee2aed | 9,862 |
def build_zooma_query(trait_name: str, filters: dict, zooma_host: str) -> str:
"""
Given a trait name, filters and hostname, create a url with which to query Zooma. Return this
url.
:param trait_name: A string containing a trait name from a ClinVar record.
:param filters: A dictionary containing filters used when querying OxO
:param zooma_host: Hostname of a Zooma instance to query.
:return: String of a url which can be requested
"""
url = "{}/spot/zooma/v2/api/services/annotate?propertyValue={}".format(zooma_host, trait_name)
url_filters = [
"required:[{}]".format(filters["required"]),
"ontologies:[{}]".format(filters["ontologies"]),
"preferred:[{}]".format(filters["preferred"])
]
url += "&filter={}".format(",".join(url_filters))
return url | 0fb22b1f44319c2fa8e87d8e720de248dd1a7eba | 9,876 |
from typing import Any
def ret(d: dict, key: str) -> Any:
""" Unwrap nested dictionaries in a recursive way.
Parameters
----------
d: dict
Python dictionary
key: str or Any
Key or chain of keys. See example below to
access nested levels.
Returns
----------
out: Any
Value from the key or value from the
last key from the chains.
Examples
----------
>>> a = {"toto": {"titi":1}, "tutu":2}
>>> ret(a, "tutu")
2
>>> ret(a, "toto")
{"titi":1}
>>> ret(a, "toto:titi")
1
"""
if ":" in key:
# Split at first ":"
level = key.split(":", maxsplit=1)
if isinstance(d[level[0]], dict):
return ret(d[level[0]], level[1])
else:
raise AssertionError("""
Nested level must be dictionaries!
d[{}] is not a dict!
""".format(level[0]))
return d[key] | 86d07ba6dbe2610ceabb0bcee3099b5734c770ae | 9,879 |
def is_identity(u):
"""Checks if the unit is equivalent to 1"""
return u.unitSI == 1 and u.unitDimension == (0,0,0,0,0,0,0) | afc9029eeb44739a38869f06f84ba36b2a19da6f | 9,883 |
def common(first_list, second_list):
"""Receives two lists, returns True if they have any common objects and false otherwise.
Note that this implementation should theoretically work with any container-like object for which
the in operator doesn't have a different meaning."""
# iterate over the elements in the first list
for i in first_list:
# if the current element in the second list, short-circuit and return True
# one common object satisfies "at least one"
if i in second_list:
return True
# fall through to False
return False | da48a3a559bc26a79bc5aa06ca6efa15053e1cd4 | 9,887 |
def calc_linear_crossing(m, left_v, right_v):
"""
Computes the intersection between two line segments, defined by two common
x points, and the values of both segments at both x points
Parameters
----------
m : list or np.array, length 2
The two common x coordinates. m[0] < m[1] is assumed
left_v :list or np.array, length 2
y values of the two segments at m[0]
right_v : list or np.array, length 2
y values of the two segments at m[1]
Returns
-------
(m_int, v_int): a tuple with the corrdinates of the intercept.
if there is no intercept in the interval [m[0],m[1]], (None,None)
"""
# Find slopes of both segments
delta_m = m[1] - m[0]
s0 = (right_v[0] - left_v[0]) / delta_m
s1 = (right_v[1] - left_v[1]) / delta_m
if s1 == s0:
if left_v[0] == left_v[1]:
return (m[0], left_v[0])
else:
return (None, None)
else:
# Find h where intercept happens at m[0] + h
h = (left_v[0] - left_v[1]) / (s1 - s0)
if h >= 0 and h <= (m[1] - m[0]):
return (m[0] + h, left_v[0] + h * s0) | 3f5eeab9fa906b6858e249f97c7e175e427cb2d7 | 9,890 |
def octave_to_frequency(octave: float) -> float:
"""Converts an octave to its corresponding frequency value (in Hz).
By convention, the 0th octave is 125Hz.
Parameters
----------
octave : float
The octave to put on a frequency scale.
Returns
-------
float
The frequency value corresponding to the octave.
"""
return 125 * 2 ** octave | d50fe69e0dd267b5418834ca2999867213b5f306 | 9,893 |
def is_bitcode_file(path):
"""
Returns True if path contains a LLVM bitcode file, False if not.
"""
with open(path, 'rb') as f:
return f.read(4) == b'BC\xc0\xde' | acfd17eee949f42994b2bc76499ee58c710eb388 | 9,898 |
from typing import List
def get_file_pattern() -> List[str]:
"""
Returns a list with all possible file patterns
"""
return ["*.pb", "*.data", "*.index"] | 8c4f471dea29dfe5c79cf3ea353cb1a335a5cf45 | 9,899 |
def get_param_cols(columns):
""" Get the columns that were provided in the file and return that list so we don't try to query non-existent cols
Args:
columns: The columns in the header of the provided file
Returns:
A dict containing all the FPDS query columns that the provided file has
"""
possible_cols = {'agency_id': 'AGENCY_CODE', 'referenced_idv_agency_iden': 'REF_IDV_AGENCY_ID',
'piid': 'PIID', 'award_modification_amendme': 'MODIFICATION_NUMBER',
'parent_award_id': 'REF_IDV_PIID', 'transaction_number': 'TRANSACTION_NUMBER'}
existing_cols = {}
for key in possible_cols:
if key in columns:
existing_cols[key] = possible_cols[key]
return existing_cols | 8a0030c745d2de5bd2baf93af79efce4dcb4c696 | 9,906 |
import re
def depluralize(word):
"""Return the depluralized version of the word, along with a status flag.
Parameters
----------
word : str
The word which is to be depluralized.
Returns
-------
str
The original word, if it is detected to be non-plural, or the
depluralized version of the word.
str
A status flag represeting the detected pluralization status of the
word, with non_plural (e.g., BRAF), plural_oes (e.g., mosquitoes),
plural_ies (e.g., antibodies), plural_es (e.g., switches),
plural_cap_s (e.g., MAPKs), and plural_s (e.g., receptors).
"""
# If the word doesn't end in s, we assume it's not plural
if not word.endswith('s'):
return word, 'non_plural'
# Another case is words ending in -sis (e.g., apoptosis), these are almost
# exclusively non plural so we return here too
elif word.endswith('sis'):
return word, 'non_plural'
# This is the case when the word ends with an o which is pluralized as oes
# e.g., mosquitoes
elif word.endswith('oes'):
return word[:-2], 'plural_oes'
# This is the case when the word ends with a y which is pluralized as ies,
# e.g., antibodies
elif word.endswith('ies'):
return word[:-3] + 'y', 'plural_ies'
# These are the cases where words form plurals by adding -es so we
# return by stripping it off
elif word.endswith(('xes', 'ses', 'ches', 'shes')):
return word[:-2], 'plural_es'
# If the word is all caps and the last letter is an s, then it's a very
# strong signal that it is pluralized so we have a custom return value
# for that
elif re.match(r'^\p{Lu}+$', word[:-1]):
return word[:-1], 'plural_caps_s'
# Otherwise, we just go with the assumption that the last s is the
# plural marker
else:
return word[:-1], 'plural_s'
# Note: there don't seem to be any compelling examples of -f or -fe -> ves
# so it is not implemented | 9d879a320da566bceb6e5db6cbfe2e7f23f9bb73 | 9,907 |
import ipaddress
def _get_network_address(ip, mask=24):
"""
Return address of the IPv4 network for single IPv4 address with given mask.
"""
ip = ipaddress.ip_address(ip)
return ipaddress.ip_network(
'{}/{}'.format(
ipaddress.ip_address(int(ip) & (2**32 - 1) << (32 - mask)),
mask
)
) | bb706209cc7295ab1d0b4bf88e11d3efba10d526 | 9,909 |
def get_admin_ids(bot, chat_id):
"""
Returns a list of admin IDs for a given chat. Results are cached for 1 hour.
Private chats and groups with all_members_are_administrator flag are handled as empty admin list
"""
chat = bot.getChat(chat_id)
if chat.type == "private" or chat.all_members_are_administrators:
return []
return [admin.user.id for admin in bot.get_chat_administrators(chat_id)] | 6bd89e1d6b7333d97cbc60fd2617a86d1b69fb2f | 9,910 |
import math
def num_tiles_not_in_position(state):
"""
Calculates and returns the number of tiles which are not in their
final positions.
"""
n = len(state)
total = 0
for row in state:
for tile in row:
try:
y = int(math.floor(float(tile)/n - (float(1)/n)))
x = (tile - 1) % n
except ValueError: #blank tile
continue
if row.index(tile) - x != 0 or state.index(row) - y != 0:
total += 1
return total | 102d2eb616f8b459fc31e68788f355440bac97e0 | 9,912 |
def get_bool_symbols(a_boolean):
"""
:param a_boolean: Any boolean representation
:return: Symbols corresponding the to the
True or False value of some boolean representation
Motivation: this is reused on multiple occasions
"""
if a_boolean:
return "✅"
return "❌" | b95a4fc5ca29e07a89c63c97ba88d256a24a7431 | 9,915 |
def returner(x):
"""
Create a function that takes any arguments and always
returns x.
"""
def func(*args, **kwargs):
return x
return func | 43476f89cae80cd2b61d64771a3dfac5e4297b5a | 9,916 |
from typing import Counter
def get_unique_characters(rows, min_count=2):
"""Given a bunch of text rows, get all unique chars that occur in it."""
def char_iterator():
for row in rows.values:
for char in row:
yield str(char).lower()
return [
c for c, count
in Counter(char_iterator()).items()
if count >= min_count
] | ecba44e44222bb63c2f794e7ee9e87eb3754f695 | 9,917 |
def build_data_type_name(sources, properties, statistic, subtype=None):
"""
Parameters
----------
sources: str or list[str]
type(s) of astrophysical sources to which this applies
properties: str or list[str]
feature(s)/characterisic(s) of those sources/fields to
which the statistic applies
statistic_type: str
mathematical type of the statistic
statistic_subtype: str or None
optional additional specifier. Default is None
Returns
-------
name: str
Type name of the form:
{sources}_{properties}_{statistic_type}[_{statistic_subtype}]
"""
if not isinstance(sources, str):
sources = "".join([sources[0]] + [s.lower().capitalize()
for s in sources[1:]])
if not isinstance(properties, str):
properties = "".join([properties[0]] + [s.lower().capitalize()
for s in properties[1:]])
if subtype:
return f"{sources}_{properties}_{statistic}_{subtype}"
else:
return f"{sources}_{properties}_{statistic}" | 103a7440dfe3b8f6ccce0fdf6cdca86202e27ac5 | 9,918 |
def get_limit_from_tag(tag_parts):
"""Get the key and value from a notebook limit tag.
Args:
tag_parts: annotation or label notebook tag
Returns (tuple): key (limit name), values
"""
return tag_parts.pop(0), tag_parts.pop(0) | e7d4858b166b2a62ec497952853d95b81a608e85 | 9,924 |
def Percent(numerator, denominator):
"""Convert two integers into a display friendly percentage string.
Percent(5, 10) -> ' 50%'
Percent(5, 5) -> '100%'
Percent(1, 100) -> ' 1%'
Percent(1, 1000) -> ' 0%'
Args:
numerator: Integer.
denominator: Integer.
Returns:
string formatted result.
"""
return '%3d%%' % int(100 * (numerator / float(denominator))) | a0081a387a737b44268fd71ab9746c7edd72221f | 9,925 |
import json
def read_data(f_name):
"""
Given an input file name f_name, reads the JSON data inside returns as
Python data object.
"""
f = open(f_name, 'r')
json_data = json.loads(f.read())
f.close()
return json_data | 629ab7e9a8bd7d5b311022af4c9900d5942e7a23 | 9,931 |
def reverse_string_recursive(s):
"""
Returns the reverse of the input string
Time complexity: O(n^2) = O(n) slice * O(n) recursive call stack
Space complexity: O(n)
"""
if len(s) < 2:
return s
# String slicing is O(n) operation
return reverse_string_recursive(s[1:]) + s[0] | a8d22e88b1506c56693aa1a8cd346695e2b160b2 | 9,932 |
def get_closest_multiple_of_16(num):
"""
Compute the nearest multiple of 16. Needed because pulse enabled devices require
durations which are multiples of 16 samples.
"""
return int(num) - (int(num) % 16) | 333cbed27abbcb576541d5fb0b44bf85b78c9e78 | 9,939 |
def split_container(path):
"""Split path container & path
>>> split_container('/bigdata/path/to/file')
['bigdata', 'path/to/file']
"""
path = str(path) # Might be pathlib.Path
if not path:
raise ValueError('empty path')
if path == '/':
return '', ''
if path[0] == '/':
path = path[1:]
if '/' not in path:
return path, '' # container
return path.split('/', maxsplit=1) | 53b0d1164ecc245146e811a97017f66bb1f032a7 | 9,941 |
def parse_sph_header( fh ):
"""Read the file-format header for an sph file
The SPH header-file is exactly 1024 bytes at the head of the file,
there is a simple textual format which AFAIK is always ASCII, but
we allow here for latin1 encoding. The format has a type declaration
for each field and we only pay attention to fields for which we
have some understanding.
returns dictionary describing the format such as::
{
'sample_rate':8000,
'channel_count':1,
'sample_byte_format': '01', # little-endian
'sample_n_bytes':2,
'sample_sig_bits': 16,
'sample_coding': 'pcm',
}
"""
file_format = {
'sample_rate':8000,
'channel_count':1,
'sample_byte_format': '01', # little-endian
'sample_n_bytes':2,
'sample_sig_bits': 16,
'sample_coding': 'pcm',
}
end = b'end_head'
for line in fh.read(1024).splitlines():
if line.startswith(end):
break
line = line.decode('latin-1')
for key in file_format.keys():
if line.startswith(key):
_, format, value = line.split(None, 3)
if format == '-i':
value = int(value, 10)
file_format[key] = value
return file_format | d0055b3dc276facd9acb361dfde167d8c123b662 | 9,942 |
def column(matrix, i):
"""
Returning all the values in a specific columns
Parameters:
X: the input matrix
i: the column
Return value: an array with desired column
"""
return [row[i] for row in matrix] | 2a88c40d11e7fe7f28970acd4d995a8428126a4d | 9,944 |
def cubicinout(x):
"""Return the value at x of the 'cubic in out' easing function between 0 and 1."""
if x < 0.5:
return 4 * x**3
else:
return 1/2 * ((2*x - 2)**3 + 2) | 505f0c5b8dc7b9499450474dbb1991eec068b553 | 9,945 |
def remove_duplicates(string: str) -> str:
"""
Remove duplicate characters in a string
:param string:
:return:
"""
without_duplicates = ""
for letter in string:
if letter not in without_duplicates:
without_duplicates += letter
return without_duplicates | 2caafc6b38cf2b920324bd976acb3caf90660c25 | 9,946 |
import json
def dumps(dict_):
"""
Converts dictionaries to indented JSON for readability.
Args:
dict_ (dict): The dictionary to be JSON encoded.
Returns:
str: JSON encoded dictionary.
"""
string = json.dumps(dict_, indent=4, sort_keys=True)
return string | c2f3eabe0b473def6233476018d8c3a3918beb70 | 9,952 |
def jpeg_linqual(quality):
"""
See int jpeg_quality_scaling(int quality)
in libjpeg-turbo source (file ijg/jcparam.c).
Convert quality rating to percentage scaling factor
used to scale the quantization table.
"""
quality = max(quality, 1)
quality = min(quality, 100)
if quality < 50:
quality = 5000 // quality
else:
quality = 200 - quality * 2
return quality | a35157416fd4d95dddfe6cf8408a99e6247703ba | 9,953 |
def pytest_funcarg__content(request):
"""
The content for the test document as string.
By default, the content is taken from the argument of the ``with_content``
marker. If no such marker exists, the content is build from the id
returned by the ``issue_id`` funcargby prepending a dash before the id.
The issue id ``'10'`` will thus produce the content ``'#10'``. If the
``issue_id`` funcarg returns ``None``, a :exc:`~exceptions.ValueError` is
raised eventually.
Test modules may override this funcarg to add their own content.
"""
content_mark = request.keywords.get('with_content')
if content_mark:
return content_mark.args[0]
else:
issue_id = request.getfuncargvalue('issue_id')
if issue_id:
return '#{0}'.format(issue_id)
raise ValueError('no content provided') | e357042566ce22557ba5c8f83b14f56cb8025dca | 9,954 |
def split_s3_path(s3_path):
"""
Splits the complete s3 path to a bucket name and a key name,
this is useful in cases where and api requires two seperate entries (bucket and key)
Arguments:
s3_path {string} -- An S3 uri path
Returns:
bucket {string} - The bucket name
key {string} - The key name
"""
bucket = s3_path.split("/")[2]
key = '/'.join(s3_path.split("/")[3:])
return bucket, key | 81efc5fc125c62d4dcde577b3424029acb4a536f | 9,955 |
def dash(*txts):
"""Join non-empty txts by a dash."""
return " -- ".join([t for t in txts if t != ""]) | 53566f49991f8d7930a63f7d2d22c125d9d25073 | 9,958 |
import asyncio
import functools
def call_later(delay, fn, *args, **kwargs) -> asyncio.Handle:
"""
Call a function after a delay (in seconds).
"""
loop = asyncio.get_event_loop()
callback = functools.partial(fn, *args, **kwargs)
handle = loop.call_later(delay, callback)
return handle | 86d157ed0f5f8dc7f806af9871a71057dc36222c | 9,959 |
from typing import Tuple
def build_blacklist_status(blacklist_status: str) -> Tuple[str, str, str]:
"""Build the blacklist_status
@type blacklist_status: str
@param blacklist_status: The blacklist status from command line
@returns Tuple[str, str, str]: The builded blacklist status
"""
blacklisted_status = blacklist_status
blacklist_action = ''
blacklist_action_param = ''
if ':' in blacklisted_status:
blacklisted_status, blacklist_action = blacklisted_status.split(':', 1)
blacklist_action = blacklist_action.lower()
if '=' in blacklist_action:
blacklist_action, blacklist_action_param = blacklist_action.split('=')
else:
blacklist_action = 'stop'
return (blacklisted_status, blacklist_action, blacklist_action_param) | 10f0c3a77169a0e9b9136d454e6d17efdd4760f6 | 9,960 |
def _check_substituted_domains(patchset, search_regex):
"""Returns True if the patchset contains substituted domains; False otherwise"""
for patchedfile in patchset:
for hunk in patchedfile:
if not search_regex.search(str(hunk)) is None:
return True
return False | e3fdeaa1ede7f041bb6080d4647c8d969b41f73d | 9,962 |
def delete_segment(seq, start, end):
"""Return the sequence with deleted segment from ``start`` to ``end``."""
return seq[:start] + seq[end:] | 1eba39d373ac2ab28ea1ea414f708a508bdf48d2 | 9,966 |
import re
def detect_arxiv(txt):
"""
Extract an arXiv ID from text data
"""
regex = r'arXiv:[0-9]{4,4}\.[0-9]{5,5}(v[0-9]+)?'
m = re.search(regex, txt)
if m is not None:
return m.group(0)
else:
return None | a855497b6eb1f027085a301096735e860e76fbe7 | 9,968 |
def powmod(a, b, m):
""" Returns the power a**b % m """
# a^(2b) = (a^b)^2
# a^(2b+1) = a * (a^b)^2
if b==0:
return 1
return ((a if b%2==1 else 1) * powmod(a, b//2, m)**2) % m | 33b4cadc1d23423f32e147718e3f08b04bd2fd17 | 9,974 |
def ToHex(data):
"""Return a string representing data in hexadecimal format."""
s = ""
for c in data:
s += ("%02x" % ord(c))
return s | 1566962a89967ae0d812ef416d56867a85d824fb | 9,978 |
def _parse_common(xml, the_dict):
"""
Parse things in common for both variables and functions. This should
be run after a more specific function like _parse_func or
_parse_variable because it needs a member dictionary as an input.
Parameters
----------
xml : etree.Element
The xml representation for the member you would like to parse
the_dict : dict
The dictionary that has already been filled with more specific
data. This dictionary is modified in-place and an updated
version is returned.
Returns
-------
the_dict : dict
The member dictionary that has been updated with the
briefdescription and definition keys.
"""
# Find brief description
mem_bd = xml.find('briefdescription')
try:
mem_bdstr = mem_bd.find('para').text
mem_bdstr = mem_bdstr if mem_bdstr is not None else ''
except AttributeError:
mem_bdstr = ''
the_dict['briefdescription'] = mem_bdstr
# add member definition
the_dict['definition'] = xml.find('definition').text
return the_dict | a6606366ce9b0e4d2c848b16bc868532684b4abe | 9,979 |
def elementWise(A, B, operation):
"""
execute an operate element wise and return result
A and B are lists of lists (all lists of same lengths)
operation is a function of two arguments and one return value
"""
return [[operation(x, y)
for x, y in zip(rowA, rowB)]
for rowA, rowB in zip(A, B)] | 39e78ca7730bf8367daf3a55aeb617b2c0707a44 | 9,981 |
def find_spaces(string_to_check):
"""Returns a list of string indexes for each string this finds.
Args:
string_to_check; string: The string to scan.
Returns:
A list of string indexes.
"""
spaces = list()
for index, character in enumerate(string_to_check):
if character == ' ':
spaces.append(index)
return spaces | 8bcd1d9911efab3c65e08524293b11afd449efa0 | 9,982 |
def addr_generator(start_ip, port, count):
"""Generator a list of (ip, port).
"""
def tostr(ip):
return '.'.join([str(_) for _ in ip])
ip = [int(_) for _ in start_ip.split('.')]
addr_list = [(tostr(ip), port)]
for i in range(count-1):
ip[-1] += 1
addr_list.append((tostr(ip), port))
return addr_list | 100288629f35d9108e0b364266242da0110dd8f7 | 9,994 |
import random
import time
def retry(func, *args, **kwargs):
"""Repeats a function until it completes successfully or fails too often.
Args:
func:
The function call to repeat.
args:
The arguments which are passed to the function.
kwargs:
Key-word arguments which are passed to the function.
Returns:
What func returns.
Exceptions:
RuntimeError when number of retries has been exceeded.
"""
# config
backoff = 1. + random.random() * 0.1
max_backoff = 32
max_retries = 5
# try to make the request
for i in range(max_retries):
try:
# return on success
return func(*args, **kwargs)
except Exception:
# sleep on failure
time.sleep(backoff)
backoff = 2 * backoff if backoff < max_backoff else backoff
# max retries exceeded
raise RuntimeError('The connection to the server timed out.') | cf6f7d3e434b54cf178b2867f7565f338e985968 | 9,996 |
def _ScatterAddNdimShape(unused_op):
"""Shape function for ScatterAddNdim Op."""
return [] | 7c59fb40e177fea1bf1cd3970c364f4583fc37f9 | 10,000 |
def find_delimiter_in(value):
"""Find a good delimiter to split the value by"""
for d in [';', ':', ',']:
if d in value:
return d
return ';' | 4a60fbe6294fff048645a6fbc397ec96fd748d67 | 10,002 |
import functools
def with_color(color_code: str):
"""Coloring decorator
Arguments:
color_code {str} -- e.g.: '\033[91m'
"""
def wrapper(func):
@functools.wraps(func)
def inner(args):
result = func(f'{color_code}{args}\033[0m')
return result
return inner
return wrapper | 3f5ecd79b3d4579ba4348b2492eaaa3688201907 | 10,006 |
def over(funcs):
"""Creates a function that invokes all functions in `funcs` with the
arguments it receives and returns their results.
Args:
funcs (list): List of functions to be invoked.
Returns:
function: Returns the new pass-thru function.
Example:
>>> func = over([max, min])
>>> func(1, 2, 3, 4)
[4, 1]
.. versionadded:: 4.0.0
"""
def _over(*args):
return [func(*args) for func in funcs]
return _over | 6cd1a966366ee372c18dd35d71adf91028a04b1c | 10,008 |
def load_labels_map(labels_map_path):
"""Loads the labels map from the given path.
The labels mmap must be in the following plain text format::
1:label1
2:label2
3:label3
...
The indexes are irrelevant to this function, they can be in any order and
can start from zero, one, or another number.
Args:
labels_map_path: the path to a labels map file
Returns:
a dictionary mapping indexes to label strings
"""
labels_map = {}
with open(labels_map_path, "r") as f:
for line in f:
idx, label = line.split(":")
labels_map[int(idx)] = label.strip()
return labels_map | 8ff1e41b87fedffa053981299c48488add754ff9 | 10,009 |
import math
def bbox_to_integer_coords(t, l, b, r, image_h, image_w):
"""
t, l, b, r:
float
Bbox coordinates in a space where image takes [0; 1] x [0; 1].
image_h, image_w:
int
return: t, l, b, r
int
Bbox coordinates in given image's pixel space.
C-style indices (i.e. `b` and `r` are exclusive).
"""
t *= image_h
l *= image_h
b *= image_h
r *= image_h
l, t = map(math.floor, (l, t))
r, b = map(math.ceil, (r, b))
# After rounding, make *exactly* square again
b += (r - l) - (b - t)
assert b - t == r - l
# Make `r` and `b` C-style (=exclusive) indices
r += 1
b += 1
return t, l, b, r | 90fb198e2d6cd170a2e7a2b648f15554a9997389 | 10,010 |
def get_attrib_recursive(element, *attribs):
"""Find the first attribute in attribs in element or its closest ancestor
that has any of the attributes in attribs.
Usage examples:
get_attrib_recursive(el, "fallback-langs")
get_attrib_recursive(el, "xml:lang", "lang")
Args:
element: an etree element where to search for attributes in attribs
attribs: one or more attribute label(s) to search for
Returns:
the value of the first attribute in attribes found in element or the
closest ancestor that has any of the attributes in attribs, or None
"""
for attrib in attribs:
# We could also element.attrib[attrib] instead of xpath, but it only
# works for attributes without a name, like attrib="lang", while xpath
# also works for attributes with a namespace, like attrib="xml:lang".
path = element.xpath("./@" + attrib)
if path:
return path[0]
if element.getparent() is not None:
return get_attrib_recursive(element.getparent(), *attribs)
else:
return None | d04ba71a280bd1697cc61b79af21df46023473d2 | 10,012 |
def find_extra_inferred_properties(spec_dict: dict) -> list:
"""Finds if there are any inferred properties which are used.
Args:
spec_dict: Dict obj containing configurations for the import.
Returns:
List of properties that appear in inferredSpec but are not part of 'pvs' section.
"""
ret_list = []
if 'inferredSpec' in spec_dict:
for property_name in spec_dict['inferredSpec']:
if property_name not in spec_dict['pvs']:
ret_list.append(property_name)
return ret_list | 3be2950a227cfca8d0ab4c4413322f8aa8b22cc0 | 10,013 |
def find_corresponding_basins(pfaf_id_level6,gdf_level7):
"""
Using a pfaf_id from level 6, find all hydrobasins in level 7 that
make up the hydrobasin level 6 polygon.
"""
pfaf_id_level7_min = pfaf_id_level6*10
pfaf_id_level7_max = pfaf_id_level7_min + 9
gdf_level7_selection = gdf_level7.loc[(gdf_level7["PFAF_ID"] >= pfaf_id_level7_min)&(gdf_level7["PFAF_ID"] <= pfaf_id_level7_max)]
return gdf_level7_selection | d8952abfb681fc2b1c33d53b930fff6c56f6bc0a | 10,015 |
def get_content_function_ratio(content, function):
""" Calculate the content-function word ratio. """
ratio = float(len(content)) / float(len(function)) if len(function) != 0 else 0
return round(ratio, 4) | e82109ed1fd2a3f3136945c4598358efdc0985e9 | 10,016 |
def test_decorator(f):
"""Decorator that does nothing"""
return f | b60b815e336a3f1ca3f12712a2d1d207a5fe110c | 10,018 |
def in_range(target, bounds):
"""
Check whether target integer x lies within the closed interval [a,b]
where bounds (a,b) are given as a tuple of integers.
Returns boolean value of the expression a <= x <= b
"""
lower, upper = bounds
return lower <= target <= upper | ac9dee9092388d150611ab5e1a4a800b72cf8f83 | 10,022 |
def flatten_array(grid):
"""
Takes a multi-dimensional array and returns a 1 dimensional array with the
same contents.
"""
grid = [grid[i][j] for i in range(len(grid)) for j in range(len(grid[i]))]
while type(grid[0]) is list:
grid = flatten_array(grid)
return grid | 4c0361cf8e63d7608b4213ddd8f8a4c498282dcf | 10,027 |
import torch
def cosine_sim(x1, x2, dim=1, eps=1e-8):
"""Returns cosine similarity between x1 and x2, computed along dim."""
x1 = torch.tensor(x1)
x2 = torch.tensor(x2)
w12 = torch.sum(x1 * x2, dim)
w1 = torch.norm(x1, 2, dim)
w2 = torch.norm(x2, 2, dim)
return (w12 / (w1 * w2).clamp(min=eps)).squeeze() | a4992b3f3a4a483c96a5b18bbc3402df70a8b44d | 10,028 |
def astype(value, types=None):
"""Return argument as one of types if possible."""
if value[0] in '\'"':
return value[1:-1]
if types is None:
types = int, float, str
for typ in types:
try:
return typ(value)
except (ValueError, TypeError, UnicodeEncodeError):
pass
return value | 47d066d9d4bb5b0b96216cc722c6896d6fdcf1a4 | 10,033 |
def _minmax(*args):
""" Return the min and max of the input arguments """
min_ = min(*args)
max_ = max(*args)
return(min_, max_) | 9985ebbffd3ee0b03dc751a3c90db00e922ab489 | 10,034 |
def distance0(cell1, cell2):
"""Return 0 distance for A* to behave like Dijkstra's algorithm."""
return 0 | 7850364b245afd304e3aa3ad443d7af74b36df79 | 10,036 |
def adjacent(g,node, n):
"""
find all adjacent nodes of input node in g
g: 2D array of numbers, the adjacency matrix
node: int, the node whose neighber you wanna find
return: a list of ints
"""
result = []
for i in range(n):
if g[node][i] != 0:
result.append(i)
return result | 630160dbee314ed85980ac8bd825e96cd33765f4 | 10,037 |
def complete_sulci_name(sulci_list, side):
"""Function gathering sulci and side to obtain full name of sulci
It reads suli prefixes from a list and adds a suffix depending on a given
side.
Args:
sulci_list: a list of sulci
side: a string corresponding to the hemisphere, whether 'L' or 'R'
Returns:
full_sulci_list: a list with full sulci names, ie with side included
"""
if any("right" in s for s in sulci_list) or any("left" in s for s in sulci_list):
return sulci_list
else:
side = 'right' if side=='R' else 'left'
suffix = '_' + side
if isinstance(sulci_list, list):
full_sulci_list = []
for sulcus in sulci_list:
sulcus += suffix
full_sulci_list.append(sulcus)
return full_sulci_list
else:
return sulci_list + suffix | 5a0e969976458b81bac01ee33cfa24bbc92659c4 | 10,042 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.