content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import math
def longitude_to_utm_epsg(longitude):
"""
Return Proj4 EPSG for a given longitude in degrees
"""
zone = int(math.floor((longitude + 180) / 6) + 1)
epsg = '+init=EPSG:326%02d' % (zone)
return epsg | f17a03514cc9caf99e1307c0382d7b9fa0289330 | 3,791 |
def compute_node_depths(tree):
"""Returns a dictionary of node depths for each node with a label."""
res = {}
for leaf in tree.leaf_node_iter():
cnt = 0
for anc in leaf.ancestor_iter():
if anc.label:
cnt += 1
res[leaf.taxon.label] = cnt
return res | a633f77d0fff1f29fe95108f96ccc59817179ddd | 3,792 |
import hashlib
def calc_sign(string):
"""str/any->str
return MD5.
From: Biligrab, https://github.com/cnbeining/Biligrab
MIT License"""
return str(hashlib.md5(str(string).encode('utf-8')).hexdigest()) | 3052e18991b084b3a220b0f3096d9c065cf4661c | 3,794 |
def remove_prefix(utt, prefix):
"""
Check that utt begins with prefix+" ", and then remove.
Inputs:
utt: string
prefix: string
Returns:
new utt: utt with the prefix+" " removed.
"""
try:
assert utt[: len(prefix) + 1] == prefix + " "
except AssertionError as e:
print("ERROR: utterance '%s' does not start with '%s '" % (utt, prefix))
print(repr(utt[: len(prefix) + 1]))
print(repr(prefix + " "))
raise e
return utt[len(prefix) + 1:] | fa6717e34c6d72944636f6b319b98574f2b41a69 | 3,795 |
def format_ucx(name, idx):
"""
Formats a name and index as a collider
"""
# one digit of zero padding
idxstr = str(idx).zfill(2)
return "UCX_%s_%s" % (name, idxstr) | c3365bf66bca5fe7ab22bd642ae59dfb618be251 | 3,796 |
from typing import List
def _k_hot_from_label_names(labels: List[str], symbols: List[str]) -> List[int]:
"""Converts text labels into symbol list index as k-hot."""
k_hot = [0] * len(symbols)
for label in labels:
try:
k_hot[symbols.index(label)] = 1
except IndexError:
raise ValueError(
'Label %s did not appear in the list of defined symbols %r' %
(label, symbols))
return k_hot | e074b55e9dae2f8aec6beb14d863f7356035705d | 3,798 |
def eval_add(lst):
"""Evaluate an addition expression. For addition rules, the parser will return
[number, [[op, number], [op, number], ...]]
To evaluate that, we start with the first element of the list as result value,
and then we iterate over the pairs that make up the rest of the list, adding
or subtracting depending on the operator.
"""
first = lst[0]
result = first
for n in lst[1]:
if n[0] == '+':
result += n[1]
else:
result -= n[1]
return result | 5d6972ccc7a0857da224e30d579b159e89fb8dce | 3,799 |
def validate_target_types(target_type):
"""
Target types validation rule.
Property: SecretTargetAttachment.TargetType
"""
VALID_TARGET_TYPES = (
"AWS::RDS::DBInstance",
"AWS::RDS::DBCluster",
"AWS::Redshift::Cluster",
"AWS::DocDB::DBInstance",
"AWS::DocDB::DBCluster",
)
if target_type not in VALID_TARGET_TYPES:
raise ValueError(
"Target type must be one of : %s" % ", ".join(VALID_TARGET_TYPES)
)
return target_type | db33903e36849fb8f97efecb95f1bbfa8150ed6f | 3,800 |
def cprint(*objects, **kwargs):
"""Apply Color formatting to output in terminal.
Same as builtin print function with added 'color' keyword argument.
eg: cprint("data to print", color="red", sep="|")
available colors:
black
red
green
yellow
blue
pink
cyan
white
no-color
"""
colors = {
"black": "\033[0;30m",
"red": "\033[0;31m",
"green": "\033[0;92m",
"yellow": "\033[0;93m",
"blue": "\033[0;34m",
"pink": "\033[0;95m",
"cyan": "\033[0;36m",
"white": "\033[0;37m",
"no-color": "\033[0m"
}
color = kwargs.pop('color', 'no-color')
return print(colors[color], *objects, colors['no-color'], **kwargs) | 42d0f2357da7f84404a888cf717a737d86609aa4 | 3,801 |
def auth_code():
"""
Функция для обработки двухфакторной аутентификации
:return: Код для двухфакторной аутентификации
:rtype: tuple(str, bool)
"""
tmp = input('Введи код: ')
return tmp, True | 8b0ae26cfdd1aa9f7b9c7a0433075494fe354185 | 3,805 |
def decConvert(dec):
"""
This is a number-word converter, but for decimals.
Parameters
-----
dec:str
This is the input value
numEngA: dict
A dictionary of values that are only up to single digits
frstDP: int
The first decimal place
scndDP: int
The second decimal place
Returns
-----
:str
This checks to see if there is a valid scndp, i.e., not zero,
and then then returns a valid decmial value in English format.
"""
numEngA = {
0: 'zero', 1: 'one', 2: 'two', 3: 'three', 4: 'four',
5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine',
}
numEngB = {
1: 'ten', 2: 'twenty', 3: 'thirty', 4: 'fourty',
5: 'fifty', 6: 'sixty', 7: 'seventy', 8: 'eighty', 9: 'ninety',
}
frstDP = int(dec[0]);
scndDP = int(dec[1]);
return ' and ' + numEngA[frstDP] + ' ' + numEngA[scndDP] if not scndDP else ' and ' + numEngB[frstDP] | dedfb67448e4bd2402acb4c561ebb4669d7bc58d | 3,807 |
import json
def json_get(cid, item):
"""gets item from json file with user settings"""
with open('data/%s.json' %cid) as f:
user = json.load(f)
return user[item] | dedb369aba555ca5359e291bc39504dd4b14a790 | 3,808 |
def fov_geometry(release='sva1',size=[530,454]):
"""
Return positions of each CCD in PNG image for
a given data release.
Parameters:
release : Data release name (currently ['sva1','y1a1']
size : Image dimensions in pixels [width,height]
Returns:
list : A list of [id, xmin, ymin, xmax, ymax] for each CCD
"""
SIZE=size
WIDTH=SIZE[0]
HEIGHT=SIZE[1]
# CCDs belonging to each row
ROWS = [ [3,2,1], #range(3,0,-1),
[7,6,5,4], #range(7,3,-1),
[12,11,10,9,8], #range(12,7,-1),
[18,17,16,15,14,13], #range(18,12,-1),
[24,23,22,21,20,19], #range(24,18,-1),
[31,30,29,28,27,26,25], #range(31,24,-1),
[38,37,36,35,34,33,32], #range(38,31,-1),
[44,43,42,41,40,39], #range(44,38,-1),
[50,49,48,47,46,45], #range(50,44,-1),
[55,54,53,52,51], #range(55,50,-1),
[59,58,57,56], #range(59,55,-1),
[62,61,60], #range(62,59,-1)
]
if release.lower() == 'sva1':
# These are the old SV pngs, not the ones made for Y2A1
# Boder padding in x,y; assumed symmetric
PAD = [0,0]
ROWS = [r[::-1] for r in ROWS[::-1]]
else:
PAD = [0.02*WIDTH,0.02*HEIGHT]
ROWS = ROWS
NROWS = len(ROWS) # Number of rows
NCCDS = [len(row) for row in ROWS]
CCD_SIZE = [float(WIDTH-2*PAD[0])/max(NCCDS),
float(HEIGHT-2*PAD[1])/NROWS] # CCD dimension (assumed to span image)
ret = []
for i,ccds in enumerate(ROWS):
for j,ccd in enumerate(ccds):
xpad = (SIZE[0] - len(ccds)*CCD_SIZE[0])/2.
ypad = PAD[1]
xmin = xpad + j*CCD_SIZE[0]
xmax = xmin + CCD_SIZE[0]
ymin = ypad + i*CCD_SIZE[1]
ymax = ymin + CCD_SIZE[1]
# These are output as ints now
ret += [[int(ccd), int(xmin), int(ymin), int(xmax), int(ymax)]]
return sorted(ret) | a7e118ed223a91d5e939b24baa8bbfb0858064b9 | 3,811 |
def HHMMSS_to_seconds(string):
"""Converts a colon-separated time string (HH:MM:SS) to seconds since
midnight"""
(hhs,mms,sss) = string.split(':')
return (int(hhs)*60 + int(mms))*60 + int(sss) | f7a49ad5d14eb1e26acba34946830710384780f7 | 3,812 |
def _replace_token_range(tokens, start, end, replacement):
"""For a range indicated from start to end, replace with replacement."""
tokens = tokens[:start] + replacement + tokens[end:]
return tokens | 2848a3ad2d448e062facf78264fb1d15a1c3985c | 3,813 |
def center_crop(im, size, is_color=True):
"""
Crop the center of image with size.
Example usage:
.. code-block:: python
im = center_crop(im, 224)
:param im: the input image with HWC layout.
:type im: ndarray
:param size: the cropping size.
:type size: int
:param is_color: whether the image is color or not.
:type is_color: bool
"""
h, w = im.shape[:2]
h_start = (h - size) / 2
w_start = (w - size) / 2
h_end, w_end = h_start + size, w_start + size
if is_color:
im = im[h_start:h_end, w_start:w_end, :]
else:
im = im[h_start:h_end, w_start:w_end]
return im | ac280efd4773613f08632fe836eecc16be23adf8 | 3,815 |
def num(value):
"""Parse number as float or int."""
value_float = float(value)
try:
value_int = int(value)
except ValueError:
return value_float
return value_int if value_int == value_float else value_float | a2ea65c2afa0005dbe4450cb383731b029cb68df | 3,816 |
def __format_event_start_date_and_time(t):
"""Formats datetime into e.g. Tue Jul 30 at 5PM"""
strftime_format = "%a %b %-d at %-I:%M %p"
return t.strftime(strftime_format) | 4db0b37351308dfe1e7771be9a9ad8b98f2defa6 | 3,817 |
from typing import List
from typing import MutableMapping
def parse_template_mapping(
template_mapping: List[str]
) -> MutableMapping[str, str]:
"""Parses a string template map from <key>=<value> strings."""
result = {}
for mapping in template_mapping:
key, value = mapping.split("=", 1)
result[key] = value
return result | 49eb029a842be7c31d33444235452ecad4701476 | 3,818 |
import os
def isGZ(fn):
"""
Tests whether a file is gz-compressed.
:param fn: a filename
:type fn: str
:returns: True if fn is gz-compressed otherwise False
"""
assert os.path.exists(fn)
with open(fn, 'rb') as fi:
b1, b2 = fi.read(1), fi.read(1)
return b1 == b'\x1f' and b2 == b'\x8b' | d71fe08a10554eae2909287a1c19dadf795a4592 | 3,819 |
import os
def _file_extension(filename):
"""Return file extension without the dot"""
# openbabel expects the extension without the dot, but os.path.splitext
# returns the extension with it
dotext = os.path.splitext(filename)[1]
return dotext[1:] | 10c328d3b4670aef021c90a28f704b154be8ca5e | 3,820 |
import numpy
def integrate_sed(wavelength, flambda, wlmin=None, wlmax=None):
"""
Calculate the flux in an SED by direct integration.
A direct trapezoidal rule integration is carried out on the flambda values
and the associated wavelength values.
Parameters
----------
wavelength: A numpy float array of wavelength values, normally in
microns
flambda: A numpy float array of flux density values, normally
F_lambda in W/m^2/micron
wlmin: An optional float value for the minimum wavelength of
the calculation, or None to have no lower limit aside
from the data range
wlmax: An optional float value for the maximum wavelength of
the calculation, or None to have no upper limit aside
from the data range
Returns
-------
flux1: The float value, the estimated total flux, nominally in
W/m^2 if the input units are microns and W/m^2/micron; if
the wavelength range is bad or the two arrays do not match
in length a value of zero is returned
"""
if len(wavelength) != len(flambda):
return 0.
if wlmin is None:
xmin = 0.9 * numpy.min(wavelength)
else:
xmin = wlmin
if wlmax is None:
xmax = 1.1 * numpy.max(wavelength)
else:
xmax = wlmax
if (xmin >= xmax) or (len(wavelength) < 2):
return 0.
inds = numpy.argsort(wavelength)
newwavelength = numpy.copy(wavelength[inds])
newflambda = numpy.copy(flambda[inds])
if (xmin > numpy.min(wavelength)) or (xmax < numpy.max(wavelength)):
fl1 = numpy.interp(xmin, wavelength, flambda)
fl2 = numpy.interp(xmax, wavelength, flambda)
newwavelength[newwavelength < xmin] = xmin
newwavelength[newwavelength > xmax] = xmax
newflambda[newwavelength < xmin] = fl1
newflambda[newwavelength > xmax] = fl2
flux = numpy.trapz(newflambda, newwavelength)
return flux | e2fd2c3905bba104f8d4bc376cd56585b40332bf | 3,822 |
def test_cache_memoize_ttl(cache, timer):
"""Test that cache.memoize() can set a TTL."""
ttl1 = 5
ttl2 = ttl1 + 1
@cache.memoize(ttl=ttl1)
def func1(a):
return a
@cache.memoize(ttl=ttl2)
def func2(a):
return a
func1(1)
func2(1)
assert len(cache) == 2
key1, key2 = tuple(cache.keys())
timer.time = ttl1 - 1
assert cache.has(key1)
assert cache.has(key2)
timer.time = ttl1
assert not cache.has(key1)
assert cache.has(key2)
timer.time = ttl2
assert not cache.has(key2) | 87d274517c6166db6d174281e6785809e45609b8 | 3,823 |
def shorten_str(string, length=30, end=10):
"""Shorten a string to the given length."""
if string is None:
return ""
if len(string) <= length:
return string
else:
return "{}...{}".format(string[:length - end], string[- end:]) | d52daec3058ddced26805f259be3fc6139b5ef1f | 3,824 |
def Max(data):
"""Returns the maximum value of a time series"""
return data.max() | 0d4781da4384eae65de4e13860995848ae8de678 | 3,827 |
def parse_healing_and_target(line):
"""Helper method that finds the amount of healing and who it was provided to"""
split_line = line.split()
target = ' '.join(split_line[3:split_line.index('for')])
target = target.replace('the ', '')
amount = int(split_line[split_line.index('for')+1])
return [amount, target] | 3f11c0807ab87d689e47a79fc7e12b32c00dbd95 | 3,828 |
import torch
def as_mask(indexes, length):
"""
Convert indexes into a binary mask.
Parameters:
indexes (LongTensor): positive indexes
length (int): maximal possible value of indexes
"""
mask = torch.zeros(length, dtype=torch.bool, device=indexes.device)
mask[indexes] = 1
return mask | 0235d66f9ee5bdc7447819122b285d29efd238c9 | 3,830 |
def check_pass(value):
"""
This test always passes (it is used for 'checking' things like the
workshop address, for which no sensible validation is feasible).
"""
return True | aa3a5f536b5bc729dc37b7f09c3b997c664b7481 | 3,831 |
def is_valid_charts_yaml(content):
"""
Check if 'content' contains mandatory keys
:param content: parsed YAML file as list of dictionary of key values
:return: True if dict contains mandatory values, else False
"""
# Iterate on each list cell
for chart_details in content:
# If one of the keys is missing or, is None
if not all(chart_details.get(x) is not None
and x in chart_details
for x in ['chart_name', 'helm_repo_name', 'name_space', 'values_file', 'private_image']):
return False
# If one of the keys is not a string
if not all(type(chart_details.get(x)) is str
for x in ['chart_name', 'helm_repo_name', 'name_space', 'values_file']):
return False
# If one of the keys is not a boolean
if not all(type(chart_details.get(x)) is bool
for x in ['private_image']):
return False
if not all(type(chart_details.get(x)) is list
for x in ['extra_executes']):
return False
return True | cc68ba6bc9166f8d2f8c37da756accec667f471a | 3,832 |
def get_trader_fcas_availability_agc_status_condition(params) -> bool:
"""Get FCAS availability AGC status condition. AGC must be enabled for regulation FCAS."""
# Check AGC status if presented with a regulating FCAS offer
if params['trade_type'] in ['L5RE', 'R5RE']:
# AGC is active='1', AGC is inactive='0'
return True if params['agc_status'] == '1' else False
# Return True if a presented with a contingency FCAS offer (AGC doesn't need to be enabled)
else:
return True | fa73ae12a0934c76f12c223a05161280a6dc01f1 | 3,833 |
def normalize_field_names(fields):
"""
Map field names to a normalized form to check for collisions like 'coveredText' vs 'covered_text'
"""
return set(s.replace('_','').lower() for s in fields) | 55bdac50fd1fcf23cfec454408fbcbbae96e507e | 3,836 |
def rounder(money_dist: list, pot: int, to_coin: int = 2) -> list:
"""
Rounds the money distribution while preserving total sum
stolen from https://stackoverflow.com/a/44740221
"""
def custom_round(x):
""" Rounds a number to be divisible by to_coin specified """
return int(to_coin * round(x / to_coin))
rs = [custom_round(x) for x in money_dist]
k = pot - sum(rs)
assert k == custom_round(k)
fs = [x - custom_round(x) for x in money_dist]
indices = [
i
for order, (e, i) in enumerate(
reversed(sorted((e, i) for i, e in enumerate(fs)))
)
if order < k
]
return [r + 1 if i in indices else r for i, r in enumerate(rs)] | f315027def4646252aa7d4ee7c05ca3085625583 | 3,837 |
def remove_comments(s):
"""
Examples
--------
>>> code = '''
... # comment 1
... # comment 2
... echo foo
... '''
>>> remove_comments(code)
'echo foo'
"""
return "\n".join(l for l in s.strip().split("\n") if not l.strip().startswith("#")) | 1d3e1468c06263d01dd204c5ac89235a17f50972 | 3,840 |
import pathlib
def is_dicom(path: pathlib.Path) -> bool:
"""Check if the input is a DICOM file.
Args:
path (pathlib.Path): Path to the file to check.
Returns:
bool: True if the file is a DICOM file.
"""
path = pathlib.Path(path)
is_dcm = path.suffix.lower() == ".dcm"
is_dcm_dir = path.is_dir() and any(
p.suffix.lower() == ".dcm" for p in path.iterdir()
)
return is_dcm or is_dcm_dir | 1e20ace9c645a41817bf23a667bd4e1ac815f63f | 3,842 |
def axLabel(value, unit):
"""
Return axis label for given strings.
:param value: Value for axis label
:type value: int
:param unit: Unit for axis label
:type unit: str
:return: Axis label as \"<value> (<unit>)\"
:rtype: str
"""
return str(value) + " (" + str(unit) + ")" | cc553cf4334222a06ae4a2bcec5ec5acb9668a8f | 3,844 |
def extract_tag(inventory, url):
"""
extract data from sphinx inventory.
The extracted datas come from a C++ project
documented using Breathe. The structure of the inventory
is a dictionary with the following keys
- cpp:class (class names)
- cpp:function (functions or class methods)
- cpp:type (type names)
each value of this dictionary is again a dictionary with
- key : the name of the element
- value : a tuple where the third index is the url to the corresponding documentation
Parameters
----------
inventory : dict
sphinx inventory
url : url of the documentation
Returns
-------
dictionary with keys class, class_methods, func, type
but now the class methods are with their class.
"""
classes = {}
class_methods = {}
functions = {}
types = {}
get_relative_url = lambda x: x[2].replace(url, '')
for c, v in inventory.get('cpp:class', {}).items():
classes[c] = get_relative_url(v)
class_methods[c] = {}
for method, v in inventory.get('cpp:function', {}).items():
found = False
for c in class_methods.keys():
find = c + '::'
if find in method:
class_methods[c][method.replace(find, '')] = get_relative_url(v)
found = True
break
if not found:
functions[method] = get_relative_url(v)
for typename, v in inventory.get('cpp:type', {}).items():
types[typename] = get_relative_url(v)
return {'class': classes,
'class_methods': class_methods,
'func':functions,
'type': types
} | dcda1869fb6a44bea3b17f1d427fe279ebdc3a11 | 3,847 |
import requests
import json
def package_search(api_url, org_id=None, params=None, start_index=0, rows=100, logger=None, out=None):
"""
package_search: run the package_search CKAN API query, filtering by org_id, iterating by 100, starting with 'start_index'
perform package_search by owner_org:
https://data.ioos.us/api/3/action/package_search?q=owner_org:
"""
action = "package_search"
if org_id is not None:
if params is not None:
payload = {'q': "owner_org:{id}+{params}".format(id=org_id, params="+".join(params)), 'start': start_index, 'rows': rows}
print(payload)
else:
payload = {'q': "owner_org:{id}".format(id=org_id), 'start': start_index, 'rows': rows}
print(payload)
else:
if params is not None:
payload = {'q': "{params}".format(params=" ".join(params)), 'start': start_index, 'rows': rows}
print(payload)
else:
payload = {'start': start_index, 'rows': rows}
print(payload)
url = ("/").join([api_url, "action", action])
if logger:
logger.info("Executing {action}. URL: {url}. Parameters {params}".format(action=action, url=url, params=payload))
#r = requests.get(url=url, headers = {'content-type': 'application/json'}, params=payload)
#r = requests.post(url=url, headers = {'content-type': 'application/json'}, data=json.dumps(payload))
r = requests.post(url=url, headers = {'content-type': 'application/json'}, json=payload)
print(json.dumps(payload))
print(r.text)
# either works:
#result = json.loads(r.text)
result = r.json()
# this is the full package_search result:
#if out:
# out.write(json.dumps(result, indent=4, sort_keys=True, ensure_ascii=False))
return result | 642a869931d45fe441a146cb8e931dc530170c37 | 3,849 |
def get_atten(log, atten_obj):
"""Get attenuator current attenuation value.
Args:
log: log object.
atten_obj: attenuator object.
Returns:
Current attenuation value.
"""
return atten_obj.get_atten() | 22d69d326846105491b1fa90f319eb9e0da69a20 | 3,853 |
def is_prime(n):
"""Given an integer n, return True if n is prime and False if not.
"""
return True | 17d2d7bdf95a9d3e037e911a3271688013413fb7 | 3,854 |
import os
def path_to_newname(path, name_level=1):
"""
Takes one path and returns a new name, combining the directory structure
with the filename.
Parameters
----------
path : String
name_level : Integer
Form the name using items this far back in the path. E.g. if
path = mydata/1234/3.txt and name_level == 2, then name = 1234_3
Returns
-------
name : String
"""
name_plus_ext = path.split('/')[-name_level:]
name, ext = os.path.splitext('_'.join(name_plus_ext))
return name | e0d8fc09a8809bf8dfee26e208570b0e3c5a4d02 | 3,855 |
from collections import Counter
from typing import Iterable
def sock_merchant(arr: Iterable[int]) -> int:
"""
>>> sock_merchant([10, 20, 20, 10, 10, 30, 50, 10, 20])
3
>>> sock_merchant([6, 5, 2, 3, 5, 2, 2, 1, 1, 5, 1, 3, 3, 3, 5])
6
"""
count = Counter(arr).values()
ret = sum(n // 2 for n in count)
return ret | 1b3b8d37ccb3494ed774e26a41ebba32c87a632c | 3,857 |
def int_from_bin_list(lst):
"""Convert a list of 0s and 1s into an integer
Args:
lst (list or numpy.array): list of 0s and 1s
Returns:
int: resulting integer
"""
return int("".join(str(x) for x in lst), 2) | a41b2578780019ed1266442d76462fb89ba2a0fb | 3,858 |
import json
def read_config(path=None):
"""
Function for reading in the config.json file
"""
#create the filepath
if path:
if "config.json" in path:
file_path = path
else:
file_path = f"{path}/config.json"
else:
file_path = "config.json"
#load in config
try:
with open(file_path, "r") as json_file:
config = json.load(json_file)
except Exception:
raise Exception("Your config file is corrupt (wrong syntax, missing values, ...)")
return config | 3e3612879645509acb74f184085f7e584afbf822 | 3,860 |
def get_number_of_tickets():
"""Get number of tickets to enter from user"""
num_tickets = 0
while num_tickets == 0:
try:
num_tickets = int(input('How many tickets do you want to get?\n'))
except:
print ("Invalid entry for number of tickets.")
return num_tickets | 3703a4ed64867a9884328c09f0fd32e763265e95 | 3,862 |
def sort_ipv4_addresses_with_mask(ip_address_iterable):
"""
Sort IPv4 addresses in CIDR notation
| :param iter ip_address_iterable: An iterable container of IPv4 CIDR notated addresses
| :return list : A sorted list of IPv4 CIDR notated addresses
"""
return sorted(
ip_address_iterable,
key=lambda addr: (
int(addr.split('.')[0]),
int(addr.split('.')[1]),
int(addr.split('.')[2]),
int(addr.split('.')[3].split('/')[0]),
int(addr.split('.')[3].split('/')[1])
)
) | 97517b2518b81cb8ce4cfca19c5512dae6bae686 | 3,864 |
def script_with_queue_path(tmpdir):
"""
Pytest fixture to return a path to a script with main() which takes
a queue and procedure as arguments and adds procedure process ID to queue.
"""
path = tmpdir.join("script_with_queue.py")
path.write(
"""
def main(queue, procedure):
queue.put(procedure.pid)
"""
)
return f"file://{str(path)}" | 7c2c2b4c308f91d951496c53c9bdda214f64c776 | 3,866 |
def secret_add(secret):
"""
Return a lambda that adds the argument from the lambda to the argument passed into secret_add.
:param secret: secret number to add (integer)
:return: lambda that takes a number and adds it to the secret
"""
return lambda addend: secret + addend | 151f1cff9f0e0bbb43650d63592ba0c2cb05611e | 3,867 |
import re
def parseCsv(file_content):
"""
parseCsv
========
parser a string file from Shimadzu analysis, returning a
dictonary with current, livetime and sample ID
Parameters
----------
file_content : str
shimadzu output csv content
Returns
-------
dic
dic with irradiation parameters
"""
irradiation_parameters = {}
irradiation_parameters['sample'] = file_content.split(',')[0].split(':')[1].replace("\"", "").strip()
irradiation_parameters['current'] = re.sub(' +',' ',file_content.split(',')[12]).split(' ')[3]
irradiation_parameters['current'] = int(re.findall('\d+', irradiation_parameters['current'])[0])
irradiation_parameters['livetime'] = int(re.sub(' +',' ',file_content.split(',')[12]).split(' ')[13])
return(irradiation_parameters) | cc20a906c23093994ce53358d92453cd4a9ab459 | 3,869 |
import json
def write_to_disk(func):
"""
decorator used to write the data into disk during each checkpoint to help us to resume the operation
Args:
func:
Returns:
"""
def wrapper(*args, **kwargs):
func(*args, **kwargs)
with open("checkpoint.json", "r") as f:
f.write(json.dumps(args[0]))
return wrapper | d3614b7b75adf40021c31263fbbcdfdda025d1a3 | 3,871 |
def _get_ref_init_error(dpde, error, **kwargs):
"""
Function that identifies where the continuous gyro begins, initiates and
then carries the static errors during the continuous modes.
"""
temp = [0.0]
for coeff, inc in zip(dpde[1:, 2], error.survey.inc_rad[1:]):
if inc > kwargs['header']['XY Static Gyro']['End Inc']:
temp.append(temp[-1])
else:
temp.append(coeff)
dpde[:, 2] = temp
return dpde | 45f4072139f007f65872223c624581b7433ea2aa | 3,872 |
import re
def matchatleastone(text, regexes):
"""Returns a list of strings that match at least one of the regexes."""
finalregex = "|".join(regexes)
result = re.findall(finalregex, text)
return result | 1e0775413189931fc48a3dc82c23f0ffe28b333e | 3,874 |
def get_keys(mapping, *keys):
"""Return the values corresponding to the given keys, in order."""
return (mapping[k] for k in keys) | e3b8bdbdff47c428e4618bd4ca03c7179b9f4a2b | 3,876 |
def total_examples(X):
"""Counts the total number of examples of a sharded and sliced data object X."""
count = 0
for i in range(len(X)):
for j in range(len(X[i])):
count += len(X[i][j])
return count | faf42a940e4413405d97610858e13496eb848eae | 3,877 |
def create_preference_branch(this, args, callee):
"""Creates a preference branch, which can be used for testing composed
preference names."""
if args:
if args[0].is_literal:
res = this.traverser.wrap().query_interface('nsIPrefBranch')
res.hooks['preference_branch'] = args[0].as_str()
return res | 6e6cc013b9d6c645a6a94087fe63b3a186582003 | 3,878 |
import traceback
def no_recurse(f):
"""Wrapper function that forces a function to return True if it recurse."""
def func(*args, **kwargs):
for i in traceback.extract_stack():
if i[2] == f.__name__:
return True
return f(*args, **kwargs)
return func | cce02b5e8fff125040e457c66c7cc9c344e209cb | 3,879 |
def make_linear_colorscale(colors):
"""
Makes a list of colors into a colorscale-acceptable form
For documentation regarding to the form of the output, see
https://plot.ly/python/reference/#mesh3d-colorscale
"""
scale = 1.0 / (len(colors) - 1)
return [[i * scale, color] for i, color in enumerate(colors)] | dabd2a2a9d6bbf3acfcabcac52246048332fae73 | 3,884 |
def mapTypeCategoriesToSubnetName(nodetypecategory, acceptedtypecategory):
"""This function returns a name of the subnet that accepts nodetypecategory
as child type and can be created in a container whose child type is
acceptedtypecategory.
Returns None if these two categories are the same (ie, no need for
a subnet to accommodate nodetypecategory). Also returns None if
the mapping has not been defined yet.
"""
return '' | c9a31c571807cd2592340ce685b1f130f99da156 | 3,885 |
def odd_occurrence_parity_set(arr):
"""
A similar implementation to the XOR idea above, but more naive.
As we iterate over the passed list, a working set keeps track of
the numbers that have occurred an odd number of times.
At the end, the set will only contain one number.
Though the worst-case time complexity is the same as the hashmap
method implemented below, this will probably be significantly
faster as dictionaries have much longer lookup times than sets.
Space complexity: $O(n)$; Time complexity: $O(n)$.
Parameters
----------
arr : integer
Returns
-------
integer
"""
seen_odd_times = set()
for num in arr:
if num in seen_odd_times:
seen_odd_times.remove(num)
else:
seen_odd_times.add(num)
return list(seen_odd_times)[0] | 57f9362e05786724a1061bef07e49635b1b2b142 | 3,886 |
import copy
def _merge_meta(base, child):
"""Merge the base and the child meta attributes.
List entries, such as ``indexes`` are concatenated.
``abstract`` value is set to ``True`` only if defined as such
in the child class.
Args:
base (dict):
``meta`` attribute from the base class.
child (dict):
``meta`` attribute from the child class.
Returns:
dict:
Merged metadata.
"""
base = copy.deepcopy(base)
child.setdefault('abstract', False)
for key, value in child.items():
if isinstance(value, list):
base.setdefault(key, []).extend(value)
else:
base[key] = value
return base | ba219b8091244a60658bee826fbef5003d3f7883 | 3,887 |
import subprocess
def retrieve_email() -> str:
"""
Uses the Git command to retrieve the current configured user email address.
:return: The global configured user email.
"""
return subprocess.run(
["git", "config", "--get", "user.email"],
capture_output=True,
text=True,
).stdout.strip("\n") | 4d2308f3b9376b9b7406f9594c52b8a8ebba04f5 | 3,888 |
def trunc(x, y, w, h):
"""Truncates x and y coordinates to live in the (0, 0) to (w, h)
Args:
x: the x-coordinate of a point
y: the y-coordinate of a point
w: the width of the truncation box
h: the height of the truncation box.
"""
return min(max(x, 0), w - 1), min(max(y, 0), h - 1) | 3edecdfbd9baf24f8b4f3f71b9e35a222c6be1ea | 3,889 |
import os
import sys
def testInputLog(log_file):
""" Test the user input for issues in the DNS query logs """
# if the path is a file
if os.path.isfile(log_file):
pass
else:
print("WARNING: Bad Input - Use a DNS (text) log file which has one domain per row without any other data or punctuation.")
print("Exiting...")
sys.exit(0)
# Return NULL
return None | c50900dbef8d978e3f7b8349a7ae072c2bab3415 | 3,890 |
import typing
def check_datatype(many: bool):
"""Checks if data/filter to be inserted is a dictionary"""
def wrapper(func):
def inner_wrapper(self, _filter={}, _data=None, **kwargs):
if _data is None: # statements without two args - find, insert etc
if many: # statements that expect a list of dictionaries: insert_many
if isinstance(_filter, typing.Sequence):
return func(self, _filter, **kwargs)
else:
raise TypeError("Unexpected Datatype.")
if isinstance(_filter, dict):
return func(self, _filter, **kwargs)
else:
raise TypeError("Unexpected Datatype.")
else: # update statements
if isinstance(_filter, dict) and isinstance(_data, dict):
return func(self, _filter, _data, **kwargs)
else:
raise TypeError("Unexpected Datatype.")
return inner_wrapper
return wrapper | c5300507936db04b2ae5e4190421cc354f6ac2d4 | 3,892 |
import subprocess
def berks(berks_bin, path, action='update'):
"""
Execute various berks commands
:rtype : tuple
:param berks_bin: path to berks bin
:param path: path to change directory to before running berks commands (berks is a dir context aware tool)
:param action: berks action to run, e.g. berks install
:return: tpl. output, errors, returncode
"""
cmd = 'cd {0} && {1} {2}'.format(path, berks_bin, action)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=True)
output, errors = p.communicate()
return output, errors, p.returncode | c0f20cccc3a9be747f45f6253a61b455bef69f2c | 3,893 |
def param_to_secopt(param):
"""Convert a parameter name to INI section and option.
Split on the first dot. If not dot exists, return name
as option, and None for section."""
sep = '.'
sep_loc = param.find(sep)
if sep_loc == -1:
# no dot in name, skip it
section = None
option = param
else:
section = param[0:sep_loc]
option = param[sep_loc+1:]
return (section, option) | 7d7e2b03cb67ed26d184f85f0328236674fa6497 | 3,894 |
def rotate_char(c, n):
"""Rotate a single character n places in the alphabet
n is an integer
"""
# alpha_number and new_alpha_number will represent the
# place in the alphabet (as distinct from the ASCII code)
# So alpha_number('a')==0
# alpha_base is the ASCII code for the first letter of the
# alphabet (different for upper and lower case)
if c.islower():
alpha_base = ord('a')
elif c.isupper():
alpha_base = ord('A')
else:
# Don't rotate character if it's not a letter
return c
# Position in alphabet, starting with a=0
alpha_number = ord(c) - alpha_base
# New position in alphabet after shifting
# The % 26 at the end is for modulo 26, so if we shift it
# past z (or a to the left) it'll wrap around
new_alpha_number = (alpha_number + n) % 26
# Add the new position in the alphabet to the base ASCII code for
# 'a' or 'A' to get the new ASCII code, and use chr() to convert
# that code back to a letter
return chr(alpha_base + new_alpha_number) | b1259722c7fb2a60bd943e86d87163866432539f | 3,896 |
def split_parentheses(info):
"""
make all strings inside parentheses a list
:param s: a list of strings (called info)
:return: info list without parentheses
"""
# if we see the "(" sign, then we start adding stuff to a temp list
# in case of ")" sign, we append the temp list to the new_info list
# otherwise, just add the string to the new_info list
new_info = []
make_list = False
current_list = []
for idx in range(len(info)):
if info[idx] == "(":
make_list = True
elif info[idx] == ")":
make_list = False
new_info.append(current_list)
current_list = []
else:
if make_list:
current_list.append(info[idx])
else:
new_info.append(info[idx])
return new_info | 37006936d52abe31e6d5e5d264440ab4950d874b | 3,897 |
import re
def add_target_to_anchors(string_to_fix, target="_blank"):
"""Given arbitrary string, find <a> tags and add target attributes"""
pattern = re.compile("<a(?P<attributes>.*?)>")
def repl_func(matchobj):
pattern = re.compile("target=['\"].+?['\"]")
attributes = matchobj.group("attributes")
if pattern.search(attributes):
return "<a%s>" % re.sub(pattern, "target='%s'" % target, attributes)
else:
return "<a%s target='%s'>" % (attributes, target)
return re.sub(pattern, repl_func, string_to_fix) | 4650dcf933e9b6e153646c6b7f3535881e4db1f8 | 3,898 |
import itertools
def _get_indices(A):
"""Gets the index for each element in the array."""
dim_ranges = [range(size) for size in A.shape]
if len(dim_ranges) == 1:
return dim_ranges[0]
return itertools.product(*dim_ranges) | dc2e77c010a6cfd7dbc7b7169f4bd0d8da62b891 | 3,899 |
def update_visit_counter(visit_counter_matrix, observation, action):
"""Update the visit counter
Counting how many times a state-action pair has been
visited. This information can be used during the update.
@param visit_counter_matrix a matrix initialised with zeros
@param observation the state observed
@param action the action taken
"""
x = observation[0]
y = observation[1]
z = observation[2]
visit_counter_matrix[x,y,z,action] += 1.0
return visit_counter_matrix | 418097d34f194c81e38e3d6b122ae743c7b73452 | 3,901 |
def slices(series, length):
"""
Given a string of digits, output all the contiguous substrings
of length n in that string in the order that they appear.
:param series string - string of digits.
:param length int - the length of the series to find.
:return list - List of substrings of specified length from series.
"""
if len(series) < length:
raise ValueError("Length requested is shorter than series.")
if length < 1:
raise ValueError("Length requested is less than 1.")
substrings = []
for index, number in enumerate(series):
sub = series[index:index + length]
if len(sub) == length:
substrings.append(sub)
return substrings | ea2d1caf26a3fc2e2a57858a7364b4ebe67297d6 | 3,902 |
def textToTuple(text, defaultTuple):
"""This will convert the text representation of a tuple into a real
tuple. No checking for type or number of elements is done. See
textToTypeTuple for that.
"""
# first make sure that the text starts and ends with brackets
text = text.strip()
if text[0] != '(':
text = '(%s' % (text,)
if text[-1] != ')':
text = '%s)' % (text,)
try:
returnTuple = eval('tuple(%s)' % (text,))
except Exception:
returnTuple = defaultTuple
return returnTuple | 89fed32bff39ad9e69513d7e743eb05a3bf7141a | 3,903 |
import time
def time_func(func):
"""Times how long a function takes to run.
It doesn't do anything clever to avoid the various pitfalls of timing a function's runtime.
(Interestingly, the timeit module doesn't supply a straightforward interface to run a particular
function.)
"""
def timed(*args, **kwargs):
start = time.time()
func(*args, **kwargs)
end = time.time()
return end - start
return timed | 3506ad28c424434402f3223a43daff4eb51b7763 | 3,904 |
from typing import Tuple
def get_subpixel_indices(col_num: int) -> Tuple[int, int, int]:
"""Return a 3-tuple of 1-indexed column indices representing subpixels of a single pixel."""
offset = (col_num - 1) * 2
red_index = col_num + offset
green_index = col_num + offset + 1
blue_index = col_num + offset + 2
return red_index, blue_index, green_index | cb4a1b9a4d27c3a1dad0760267e6732fe2d0a0da | 3,905 |
def _get_field_names(field: str, aliases: dict):
"""
Override this method to customize how
:param field:
:param aliases:
:return:
"""
trimmed = field.lstrip("-")
alias = aliases.get(trimmed, trimmed)
return alias.split(",") | cb732c07018c33a546bf42ab1bf3516d2bd6c824 | 3,906 |
import numpy
def undiskify(z):
"""Maps SL(2)/U(1) poincare disk coord to Lie algebra generator-factor."""
# Conventions match (2.13) in https://arxiv.org/abs/1909.10969
return 2* numpy.arctanh(abs(z)) * numpy.exp(1j * numpy.angle(z)) | 9ac4cd521ca64decd082a34e35e0d080d3190e13 | 3,907 |
def to_null(string):
"""
Usage::
{{ string|to_null}}
"""
return 'null' if string is None else string | 1868ca2c7474a8134f2dbb0b0e542ca659bf4940 | 3,908 |
def get_table_8():
"""表 8 主たる居室の照明区画݅に設置された照明設備の調光による補正係数
Args:
Returns:
list: 表 8 主たる居室の照明区画݅に設置された照明設備の調光による補正係数
"""
table_8 = [
(0.9, 1.0),
(0.9, 1.0),
(1.0, 1.0)
]
return table_8 | 89470f0242982755104dbb2afe0198e2f5afa5f4 | 3,909 |
import requests
import warnings
def query_epmc(query):
"""
Parameters
----------
query :
Returns
-------
"""
url = "https://www.ebi.ac.uk/europepmc/webservices/rest/search?query="
page_term = "&pageSize=999" ## Usual limit is 25
request_url = url + query + page_term
r = requests.get(request_url)
if r.status_code == 200:
return r
else:
warnings.warn("request to " + str(query) + " has failed to return 200, and has returned " + str(r.status_code))
pass | a8da1ee3253d51738f1d556548f6bccf17b32b53 | 3,910 |
import pickle
def read_doc_labels(input_dir):
"""
:param input_dir:
:return: doc labels
"""
with open(input_dir + "doc_labels.pkl", 'rb') as fin:
labels = pickle.load(fin)
return labels | c0246f8e09441782a7437177877cc1e4d83ecb40 | 3,912 |
def compute_t(i, automata_list, target_events):
"""
Compute alphabet needed for processing L{automata_list}[i-1] in the
sequential abstraction procedure.
@param i: Number of the automaton in the L{automata_list}
@type i: C{int} in range(1, len(automata_list)+1)
@param automata_list: List of automata
@type automata_list: C{list} of L{Automaton}
@param target_events: List of events to preserve after abstraction
@type target_events: C{set} of L{Event}
@return: New alphabet for the next step in sequential abstraction
@rtype: C{set} of L{Event}
"""
processed = set()
for j in range(0, i):
processed = processed.union(automata_list[j].alphabet)
unprocessed = target_events.copy()
for j in range(i, len(automata_list)):
unprocessed = unprocessed.union(automata_list[j].alphabet)
result = processed.intersection(unprocessed)
processed.clear()
unprocessed.clear()
return result | 88fc64aaf917d23a29e9400cf29705e6b20665c3 | 3,914 |
def strip_new_line(str_json):
"""
Strip \n new line
:param str_json: string
:return: string
"""
str_json = str_json.replace('\n', '') # kill new line breaks caused by triple quoted raw strings
return str_json | f2faaa80dca000586a32a37cdf3dff793c0a2d9b | 3,915 |
import torch
def cosine_beta_schedule(timesteps, s = 0.008, thres = 0.999):
"""
cosine schedule
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
"""
steps = timesteps + 1
x = torch.linspace(0, timesteps, steps, dtype = torch.float64)
alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * torch.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, thres) | a1969deafdb282955a53b15978a055d15f0678a0 | 3,918 |
import os
def check_directories(directories):
"""Checks if all given directories are really directories and on the same
device.
Parameters:
directories (list of strings) - The directories to check.
Returns:
The tuple (ok, ok_dirs) where ok is a boolean and ok_dirs a list of
directories (as strings). If the given directories contained no
existing directories or it contained at least two directories that are
not on the same device, then ok is False and ok_dirs is empty.
Otherwise ok is True and ok_dirs contains all directories in the given
directories that really exist.
"""
ok_dirs = []
for d in directories:
if not os.path.exists(d):
print("'%s' does not exist. Ignoring." % d)
continue
if not os.path.isdir(d):
print("'%s' is no directory. Ignoring." % d)
continue
ok_dirs.append(d)
if len(ok_dirs) == 0:
print("No existing directory given. Exiting.")
return False, []
prev_dir = None
prev_device = None
for d in ok_dirs:
current_device = os.stat(d).st_dev
if prev_device is not None and current_device != prev_device:
print("'%s' and '%s' are not on the same device. Exiting." % \
(d, prev_dir))
return False, []
prev_dir = d
prev_device = current_device
return True, ok_dirs | 779195d7509beb4b13ed237fec654514c6226586 | 3,920 |
from datetime import datetime
def parseYear(year, patterns):
""""This function returns a string representing a year based on the input and a list of possible patterns.
>>> parseYear('2021', ['%Y'])
'2021'
>>> parseYear('2021', ['(%Y)', '%Y'])
'2021'
>>> parseYear('(2021)', ['%Y', '(%Y)'])
'2021'
"""
parsedYear = None
for p in patterns:
try:
tmp = datetime.strptime(year, p).date().year
parsedYear = str(tmp)
break
except ValueError:
pass
if parsedYear == None:
return year
else:
return parsedYear | 743378c868a2439f721e428f676092f9da0a2e7a | 3,921 |
def multiply(t1,t2):
"""
Multiplies (expands) two binary expressions t1 and t2 based on the distributive rule
Args:
t1 (str): first binary expression
t2 (str): second binary expression
Returns:
A string representing the expansion of the boolean algebraic expressions
"""
t1 = t1.split('+')
t2 = t2.split('+')
prod = ''
for m in t1:
temp = ""
for n in t2:
if t1.index(m) == len(t1)-1 and t2.index(n) == len(t2)-1:
if m!=n:
temp=(temp+m+n)
else:
temp += m
else:
if m!=n:
temp=temp + m+n+'+'
else:
temp+=m+'+'
prod+=temp
return prod | 0078ee94420722600be31edc74a86b1932c4d2f2 | 3,922 |
import re
def sanitize_value(val):
"""Remove crap from val string and then convert it into float"""
val = re.sub(u"(\xa0|\s)", '', val)
val = val.replace(',', '.')
# positive or negative multiplier
mult = 1
if '-' in val and len(val) > 1:
mult = -1
val = val.replace('-', '')
elif '-' in val:
val = '0'
if val is not None:
if '%' in val:
val = float(val.replace('%', ''))
return float(val) * mult | 0fc67bf519674575451f4fc029bee658ea2bd2da | 3,923 |
def getObjectInfo(fluiddb, about):
"""
Gets object info for an object with the given about tag.
"""
return fluiddb.about[about].get() | 8614edaf44944fcc11882ac2fcaa31ba31d48d30 | 3,924 |
import argparse
def cli_to_args():
"""
converts the command line interface to a series of args
"""
cli = argparse.ArgumentParser(description="")
cli.add_argument('-input_dir',
type=str, required=True,
help='The input directory that contains pngs and svgs of cowboys with Unicode names')
cli.add_argument('-output_dir',
type=str, required=True,
help='The output diectory where we will put pngs and svgs of cowboys with plain english names. Yee haw.')
return cli.parse_args() | db9502472d1cab92b7564abde2969daa06dbc4aa | 3,925 |
import os
def get_walkthrought_dir(dm_path):
""" return 3 parameter:
file_index[0]: total path infomation
file_index[1]: file path directory
file_index[2]: file name
"""
file_index = []
for dirPath, dirName, fileName in os.walk(dm_path):
for file in fileName:
path_info = [os.path.join(dirPath, file), dirPath, file]
file_index.append(path_info)
return file_index | 74ecef62531001c27e05ab42b731739120656695 | 3,927 |
from typing import Dict
def flatten_dict(d: Dict):
"""Recursively flatten dictionaries, ordered by keys in ascending order"""
s = ""
for k in sorted(d.keys()):
if d[k] is not None:
if isinstance(d[k], dict):
s += f"{k}|{flatten_dict(d[k])}|"
else:
s += f"{k}|{d[k]}|"
return s | 26663b52ccda2a695aa2367cbaf324698a47d56a | 3,928 |
def is_iterable(obj):
"""
Return true if object has iterator but is not a string
:param object obj: Any object
:return: True if object is iterable but not a string.
:rtype: bool
"""
return hasattr(obj, '__iter__') and not isinstance(obj, str) | c7a1353f7f62a567a65d0c4752976fefde6e1904 | 3,932 |
def get_operator_module(operator_string):
"""
Get module name
"""
# the module, for when the operator is not a local operator
operator_path = ".".join(operator_string.split(".")[:-1])
assert len(operator_path) != 0, (
"Please specify a format like 'package.operator' to specify your operator. You passed in '%s'"
% operator_string
)
return operator_path | 82b4ddc419b09b5874debbe64262b4a4f414cb8f | 3,934 |
import os
import re
def in_incident_root(current_dir_path):
"""
Helper function to determine if a sub directory is a child of an incident directory. This is useful for setting
default params in tools that has an incident directory as an input
:param current_dir_path: String of the path being evaluated
:return: tuple of (parent directory path, boolean indicating if the parent directory matches the incident dir pattern)
"""
parent_dir_path, current_dir_name = os.path.split(current_dir_path)
is_root_dir = False
if current_dir_name == 'tools':
parent_dir_name = os.path.basename(parent_dir_path)
if re.match(r'\d{4}_[a-zA-Z]*', parent_dir_name):
is_root_dir = True
return parent_dir_path.lower(), is_root_dir | 62b8f9d9bddcc8ecfa232a65f205bd8414320928 | 3,935 |
def f_all(predicate, iterable):
"""Return whether predicate(i) is True for all i in iterable
>>> is_odd = lambda num: (num % 2 == 1)
>>> f_all(is_odd, [])
True
>>> f_all(is_odd, [1, 3, 5, 7, 9])
True
>>> f_all(is_odd, [2, 1, 3, 5, 7, 9])
False
"""
return all(predicate(i) for i in iterable) | c0a0e52587a7afc9da143ac936aab87ad531b455 | 3,938 |
from typing import List
from typing import Tuple
from typing import Set
from typing import Dict
def _recursive_replace(data):
"""Searches data structure and replaces 'nan' and 'inf' with respective float values"""
if isinstance(data, str):
if data == "nan":
return float("nan")
if data == "inf":
return float("inf")
if isinstance(data, List):
return [_recursive_replace(v) for v in data]
if isinstance(data, Tuple):
return tuple([_recursive_replace(v) for v in data])
if isinstance(data, Set):
return set([_recursive_replace(v) for v in data])
if isinstance(data, Dict):
return {k: _recursive_replace(v) for k, v in data.items()}
return data | b5c21d806b462070b2d1eec7d91a5dc700f6b0ed | 3,939 |
def audio_sort_key(ex):
"""Sort using duration time of the sound spectrogram."""
return ex.src.size(1) | ec940df6bf2b74962f221b84717f51beba5c4f5f | 3,942 |
from pathlib import Path
def _filename_to_title(filename, split_char="_"):
"""Convert a file path into a more readable title."""
filename = Path(filename).with_suffix("").name
filename_parts = filename.split(split_char)
try:
# If first part of the filename is a number for ordering, remove it
int(filename_parts[0])
if len(filename_parts) > 1:
filename_parts = filename_parts[1:]
except Exception:
pass
title = " ".join(ii.capitalize() for ii in filename_parts)
return title | f62ae56901f0a58e53e84e63423bcb9f2ccf4c5a | 3,943 |
def is_versioned(obj):
"""
Check if a given object is versioned by inspecting some of its attributes.
"""
# before any heuristic, newer versions of RGW will tell if an obj is
# versioned so try that first
if hasattr(obj, 'versioned'):
return obj.versioned
if not hasattr(obj, 'VersionedEpoch'):
# overly paranoid here, an object that is not versioned should *never*
# have a `VersionedEpoch` attribute
if getattr(obj, 'version_id', None):
if obj.version_id is None:
return False
return True # probably will never get here
return False
return True | 7f5ad90ffce6a8efde50dba47cdc63673ec79f60 | 3,944 |
def merge_on_empty_fields(base, tomerge):
"""Utility to quickly fill empty or falsy field of $base with fields
of $tomerge
"""
has_merged_anything = False
for key in tomerge:
if not base.get(key):
base[key] = tomerge.get(key)
has_merged_anything = True
return has_merged_anything | f8cb14047d2e17e2155beb1ab86eab7cdf531af0 | 3,945 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.