content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import os
def disk_partitions(disk_ntuple, all=False):
"""Return all mountd partitions as a named tuple.
If all == False return physical partitions only.
"""
phydevs = []
if os.path.exists('/proc/filesystems'):
my_file = open('/proc/filesystems', 'r')
for line in my_file:
if not line.startswith('nodev'):
phydevs.append(line.strip())
else:
print ('path does not exist: /proc/filesystems')
retlist = []
if os.path.exists('/etc/mtab'):
my_file = open('/etc/mtab', 'r')
for line in my_file:
if not all and line.startswith('none'):
continue
fields = line.split()
device = fields[0]
mountpoint = fields[1]
fstype = fields[2]
if not all and fstype not in phydevs:
continue
if device == 'none':
device = ''
ntuple = disk_ntuple(device, mountpoint, fstype)
retlist.append(ntuple)
else:
print ('path does not exist: /etc/mtab')
return retlist | 1c46fe7efab860c4fe9f3be7745d3ef2c24eafa1 | 2,154 |
def get_page_title(title: str):
""" Возвращает заголовок, отображаемый на вкладках """
return f'{title} | NeuraHS' | 3df2de16325cf0c4c849e7d09111ea87e36c309a | 2,155 |
import time
import struct
def __timestamp():
"""Generate timestamp data for pyc header."""
today = time.time()
ret = struct.pack(b'=L', int(today))
return ret | 477c8473026c706785b4091bbbf647b86eaa560f | 2,157 |
def reverse_index(alist, value):
"""Finding the index of last occurence of an element"""
return len(alist) - alist[-1::-1].index(value) -1 | 21fc4e17a91000085123ea4be42c72cb27a3482c | 2,158 |
def segments_decode(aseg):
"""
Decode segments.
Parameters
----------
aseg : numpy.ndarra of uint32
Returns
-------
segments : list of list of int
"""
max = 2 ** 32 - 1
segments = []
l = []
for x in list(aseg):
if x == max:
segments.append(l)
l = []
else:
l.append(x)
return segments | d5edf85ae489b62c8820c3616a75a9ca305f06ec | 2,159 |
import sys
import traceback
def return_stack():
"""
Create the stack of the obtained exception
:return: string stacktrace.
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
return lines[0] + lines[1] | 8dd58917ffe378cf88c38429d63ed8d553df4ffb | 2,160 |
import json
def serialize(results):
"""Serialize a ``QueryDict`` into json."""
serialized = {}
for result in results:
serialized.update(result.to_dict())
return json.dumps(serialized, indent=4) | 1ce996e1172344ba72ccbb9487b51b0efc30fa5c | 2,161 |
def shingles(tokens, n):
"""
Return n-sized shingles from a list of tokens.
>>> assert list(shingles([1, 2, 3, 4], 2)) == [(1, 2), (2, 3), (3, 4)]
"""
return zip(*[tokens[i:-n + i + 1 or None] for i in range(n)]) | 93e8f3828bf4b49397e09cb46565199dcd7a68be | 2,162 |
import argparse
def _parse_args():
"""parse arguments"""
parser = argparse.ArgumentParser(description='train and export wdsr on modelarts')
# train output path
parser.add_argument('--train_url', type=str, default='', help='where training log and ckpts saved')
# dataset dir
parser.add_argument('--data_url', type=str, default='', help='where datasets located')
# train config
parser.add_argument('--data_train', type=str, default='DIV2K', help='train dataset name')
parser.add_argument('--device_target', type=str, default='Ascend', help='target device to run')
parser.add_argument('--epochs', type=int, default=1, help='number of epochs to train')
parser.add_argument('--batch_size', type=int, default=16, help='input batch size for training')
parser.add_argument('--lr', type=float, default=1e-4, help='learning rate')
parser.add_argument('--init_loss_scale', type=float, default=65536., help='scaling factor')
parser.add_argument('--loss_scale', type=float, default=1024.0, help='loss_scale')
parser.add_argument('--scale', type=str, default='2+3+4', help='super resolution scale')
parser.add_argument('--ckpt_save_path', type=str, default='ckpt', help='path to save ckpt')
parser.add_argument('--ckpt_save_interval', type=int, default=10, help='save ckpt frequency, unit is epoch')
parser.add_argument('--ckpt_save_max', type=int, default=5, help='max number of saved ckpt')
parser.add_argument('--task_id', type=int, default=0)
# export config
parser.add_argument("--export_batch_size", type=int, default=1, help="batch size")
parser.add_argument("--export_file_name", type=str, default="wdsr", help="output file name.")
parser.add_argument("--export_file_format", type=str, default="AIR",
choices=['MINDIR', 'AIR', 'ONNX'], help="file format")
args, _ = parser.parse_known_args()
return args | 8147e3d2c7bc60cb2ed379308118bcf7ef8157b6 | 2,164 |
def prefix_attrs(source, keys, prefix):
"""Rename some of the keys of a dictionary by adding a prefix.
Parameters
----------
source : dict
Source dictionary, for example data attributes.
keys : sequence
Names of keys to prefix.
prefix : str
Prefix to prepend to keys.
Returns
-------
dict
Dictionary of attributes with some keys prefixed.
"""
out = {}
for key, val in source.items():
if key in keys:
out[f"{prefix}{key}"] = val
else:
out[key] = val
return out | e1c8102fddf51cd7af620f9158419bff4b3f0c57 | 2,165 |
def _grep_first_pair_of_parentheses(s):
"""
Return the first matching pair of parentheses in a code string.
INPUT:
A string
OUTPUT:
A substring of the input, namely the part between the first
(outmost) matching pair of parentheses (including the
parentheses).
Parentheses between single or double quotation marks do not
count. If no matching pair of parentheses can be found, a
``SyntaxError`` is raised.
EXAMPLES::
sage: from sage.misc.sageinspect import _grep_first_pair_of_parentheses
sage: code = 'def foo(a="\'):", b=4):\n return'
sage: _grep_first_pair_of_parentheses(code)
'(a="\'):", b=4)'
sage: code = 'def foo(a="%s):", \'b=4):\n return'%("'")
sage: _grep_first_pair_of_parentheses(code)
Traceback (most recent call last):
...
SyntaxError: The given string does not contain balanced parentheses
"""
out = []
single_quote = False
double_quote = False
escaped = False
level = 0
for c in s:
if level>0:
out.append(c)
if c=='(' and not single_quote and not double_quote and not escaped:
level += 1
elif c=='"' and not single_quote and not escaped:
double_quote = not double_quote
elif c=="'" and not double_quote and not escaped:
single_quote = not single_quote
elif c==')' and not single_quote and not double_quote and not escaped:
if level == 1:
return '('+''.join(out)
level -= 1
elif c=="\\" and (single_quote or double_quote):
escaped = not escaped
else:
escaped = False
raise SyntaxError("The given string does not contain balanced parentheses") | 7441c1b8734c211b9b320e195155719452cf7407 | 2,166 |
import requests
def login():
"""
"""
url = "http://127.0.0.1:5001/rest/login"
data = {"username": "kivanc", "password": "1234"}
r = requests.post(url, json=data)
output = r.json()
return output["access_token"] | a2b4bd68110fd053c48988f7cc490c88f148bc1f | 2,167 |
def get_package_object():
"""Gets a sample package for the submission in Dev Center."""
package = {
# The file name is relative to the root of the uploaded ZIP file.
"fileName" : "bin/super_dev_ctr_api_sim.appxupload",
# If you haven't begun to upload the file yet, set this value to "PendingUpload".
"fileStatus" : "PendingUpload"
}
return package | d65329372f356325c08ecb814f48ad856b9509bc | 2,168 |
def find_start_end(grid):
"""
Finds the source and destination block indexes from the list.
Args
grid: <list> the world grid blocks represented as a list of blocks (see Tutorial.pdf)
Returns
start: <int> source block index in the list
end: <int> destination block index in the list
"""
#------------------------------------
#
# Fill and submit this code
#
# return (None, None)
#-------------------------------------
counter = 0
eb_index = None
rb_index = None
air_block=[]
diamond_block=[]
state=[]
for i in grid:
if i =='diamond_block':
diamond_block.append(counter)
if i =='air':
air_block.append(counter)
if i == 'emerald_block':
eb_index = counter
if i == 'redstone_block':
rb_index = counter
state.append(counter)
counter+=1
return (eb_index, rb_index,air_block,diamond_block) | d617af3d6ebf9a2c9f42250214e3fe52d2017170 | 2,169 |
from typing import Optional
import inspect
def find_method_signature(klass, method: str) -> Optional[inspect.Signature]:
"""Look through a class' ancestors and fill out the methods signature.
A class method has a signature. But it might now always be complete. When a parameter is not
annotated, we might want to look through the ancestors and determine the annotation. This is
very useful when you have a base class that has annotations, and child classes that are not.
Examples
--------
>>> class Parent:
...
... def foo(self, x: int) -> int:
... ...
>>> find_method_signature(Parent, 'foo')
<Signature (self, x: int) -> int>
>>> class Child(Parent):
...
... def foo(self, x, y: float) -> str:
... ...
>>> find_method_signature(Child, 'foo')
<Signature (self, x: int, y: float) -> str>
"""
m = getattr(klass, method)
sig = inspect.signature(m)
params = []
for param in sig.parameters.values():
if param.name == "self" or param.annotation is not param.empty:
params.append(param)
continue
for ancestor in inspect.getmro(klass):
try:
ancestor_meth = inspect.signature(getattr(ancestor, m.__name__))
except AttributeError:
break
try:
ancestor_param = ancestor_meth.parameters[param.name]
except KeyError:
break
if ancestor_param.annotation is not param.empty:
param = param.replace(annotation=ancestor_param.annotation)
break
params.append(param)
return_annotation = sig.return_annotation
if return_annotation is inspect._empty:
for ancestor in inspect.getmro(klass):
try:
ancestor_meth = inspect.signature(getattr(ancestor, m.__name__))
except AttributeError:
break
if ancestor_meth.return_annotation is not inspect._empty:
return_annotation = ancestor_meth.return_annotation
break
return sig.replace(parameters=params, return_annotation=return_annotation) | 17d3e7d554720766ca62cb4ad7a66c42f947fc1c | 2,170 |
def get_ca_pos_from_atoms(df, atoms):
"""Look up alpha carbon positions of provided atoms."""
ca = df[df['atom_name'] == 'CA'].reset_index()
nb = ca.reindex(atoms)
nb = nb.reset_index().set_index('index')
return nb | c069db751d94f6626be5d56e7b286ef3c873c04e | 2,171 |
def LoadComponent(self,filename): # real signature unknown; restored from __doc__
"""
LoadComponent(self: object,filename: str) -> object
LoadComponent(self: object,stream: Stream) -> object
LoadComponent(self: object,xmlReader: XmlReader) -> object
LoadComponent(self: object,filename: TextReader) -> object
LoadComponent(self: object,reader: XamlXmlReader) -> object
"""
return object() | 17b893a6e91f4ef62b8ba18646d9dc2005c52ccd | 2,172 |
def split_bits(word : int, amounts : list):
"""
takes in a word and a list of bit amounts and returns
the bits in the word split up. See the doctests for concrete examples
>>> [bin(x) for x in split_bits(0b1001111010000001, [16])]
['0b1001111010000001']
>>> [bin(x) for x in split_bits(0b1001111010000001, [8,8])]
['0b10011110', '0b10000001']
not the whole 16 bits!
>>> [bin(x) for x in split_bits(0b1001111010000001, [8])]
Traceback (most recent call last):
AssertionError: expected to split exactly one word
This is a test splitting MOVE.B (A1),D4
>>> [bin(x) for x in split_bits(0b0001001010000100, [2,2,3,3,3,3])]
['0b0', '0b1', '0b1', '0b10', '0b0', '0b100']
"""
nums = []
pos = 0
for amount in amounts:
# get a group of "amount" 1's
mask = 2**amount - 1
# shift mask to the left so it aligns where the last
# iteration ended off
shift = 16 - amount - pos
mask = mask << shift
# update location in the word
pos += amount
# extract the relavent bits
bits = word & mask
# shift back and insert the list to be returned
nums.append(bits >> shift)
assert pos == 16, 'expected to split exactly one word'
return nums | 556a389bb673af12a8b11d8381914bf56f7e0599 | 2,173 |
def produce_phase(pipeline_run):
"""Produce result with Produce phase data."""
scores = pipeline_run['run']['results']['scores']
if len(scores) > 1:
raise ValueError('This run has more than one score!')
scores = scores[0]
return {
'metric': scores['metric']['metric'],
'context': pipeline_run['context'],
'normalized_score': scores['normalized']
} | 7ed003281eac240a407dac1d03a5e3f5a6e5b2cd | 2,174 |
import csv
def get_genes(path):
"""Returns a list of genes from a DE results table"""
with open(path) as gene_list:
gene_list = csv.reader(gene_list)
gene_list = [row[0] for row in gene_list if row[0].startswith('P')]
return gene_list | 9deed781edc0514348b7f6c2f6ac2d302f30295d | 2,175 |
import pathlib
def _top_level_package_filenames(tarball_paths):
"""Transform the iterable of npm tarball paths to the top-level files contained within the package."""
paths = []
for path in tarball_paths:
parts = pathlib.PurePath(path).parts
if parts[0] == "package" and len(parts) == 2:
paths.append(parts[1])
return frozenset(paths) | 6b9b825eff14fe2e40f33c2caac104cf9869b277 | 2,176 |
def save_network_to_path(interactions, path):
"""Save dataframe to a tab-separated file at path."""
return interactions.to_csv(path, sep='\t', index=False, na_rep=str(None)) | f189c6e8f7791f1f97c32847f03e0cc2e167ae90 | 2,177 |
def or_func(a, b):
"""Creates a new list out of the two supplied by applying the function to each
equally-positioned pair in the lists. The returned list is truncated to the
length of the shorter of the two input lists"""
return a or b | 0f90173e05910ebc7e81079d99bfdbbb1c0ee66b | 2,178 |
import re
def is_branch_or_version(string):
"""Tries to figure out if passed argument is branch or version.
Returns 'branch', 'version', or False if deduction failed.
Branch is either 'master' or something like 3.12.x;
version is something like 3.12.5,
optionally followed by letter (3.12.5b) for aplha/beta/gamma...zeta,
optionally followed by release (3.12.5-2).
"""
if string == "master" or re.match("3\.\\d+\.x$", string):
return "branch"
if re.match("3\\.\\d+\\.\\d+[a-z]?(-\\d+)?$", string):
return "version"
return None | 6a5ad7cb7af29b6ce0e39ff86171f0f230929fb3 | 2,180 |
def heur_best_from_now(state):
"""
This heuristics computes the cost based in put all weight in the launch with the lowest variable cost.
@param state: state to compute the cost.
@return: cost
"""
try:
return min([launch.compute_variable_cost(state.left_weight()) for launch in state.launches[state.launch_nr:]])
except ValueError:
return 0 | 050c7c718ad849e8e7fc6892de7097c3bd0f83dd | 2,182 |
def get_ngram(text, ns=[1]):
"""
获取文本的ngram等特征
:param text: str
:return: list
"""
if type(ns) != list:
raise RuntimeError("ns of function get_ngram() must be list!")
for n in ns:
if n < 1:
raise RuntimeError("enum of ns must '>1'!")
len_text = len(text)
ngrams = []
for n in ns:
ngram_n = []
for i in range(len_text):
if i + n <= len_text:
ngram_n.append(text[i:i + n])
else:
break
if not ngram_n:
ngram_n.append(text)
ngrams += ngram_n
return ngrams | 3826fcdce46b455762417528ac9f31a0552b5a04 | 2,183 |
def splitter(h):
""" Splits dictionary numbers by the decimal point."""
if type(h) is dict:
for k, i in h.items():
h[k] = str(i).split('.');
if type(h) is list:
for n in range(0, len(h)):
h[n] = splitter(h[n])
return h | 1eb5e38a02ce310a068d8c1c9df2790658722662 | 2,184 |
def CONTAINS_INTS_FILTER(arg_value):
"""Only keeps int sequences or int tensors."""
return arg_value.elem_type is int or arg_value.has_int_dtypes() | c4452c5e6bbd9ead32359d8638a6bf1e49b600ba | 2,185 |
import os
def token_bytes(nbytes):
"""Return a random byte string containing *nbytes* bytes.
If *nbytes* is ``None`` or not supplied, a reasonable
default is used.
>>> token_bytes(16) #doctest:+SKIP
b'\\xebr\\x17D*t\\xae\\xd4\\xe3S\\xb6\\xe2\\xebP1\\x8b'
"""
return os.urandom(nbytes) | 3750fb9ae0be2bc3f9b52a9c3caa3dc67a3a91d0 | 2,187 |
def sort_2metals(metals):
"""
Handles iterable or string of 2 metals and returns them
in alphabetical order
Args:
metals (str || iterable): two metal element names
Returns:
(tuple): element names in alphabetical order
"""
# return None's if metals is None
if metals is None:
return None, None
if isinstance(metals, str):
if len(metals) != 4:
raise ValueError('str can only have two elements.')
metal1, metal2 = sorted([metals[:2], metals[2:]])
else:
metal1, metal2 = sorted(metals)
return metal1.title(), metal2.title() | dab922797a6c7b94d6489d8fc4d9c1d99f3ee35c | 2,188 |
from datetime import datetime
def datetime_without_seconds(date: datetime) -> datetime:
"""
Returns given datetime with seconds and microseconds set to 0
"""
return date.replace(second=0, microsecond=0) | de30c7770d84751b555c78e045f37783030d8970 | 2,189 |
import six
def format_ratio(in_str, separator='/'):
""" Convert a string representing a rational value to a decimal value.
Args:
in_str (str): Input string.
separator (str): Separator character used to extract numerator and
denominator, if not found in ``in_str`` whitespace is used.
Returns:
An integer or float value with 2 digits precision or ``in_str`` if
formating has failed.
>>> format_ratio('48000/1')
48000
>>> format_ratio('24000 1000')
24
>>> format_ratio('24000 1001')
23.98
>>> format_ratio('1,77')
'1,77'
>>> format_ratio(1.77)
1.77
"""
if not isinstance(in_str, six.string_types):
return in_str
try:
sep = separator if separator in in_str else ' '
ratio = in_str.split(sep)
if len(ratio) == 2:
ratio = round(float(ratio[0]) / float(ratio[1]), 2)
else:
ratio = float(ratio[0])
if ratio.is_integer():
ratio = int(ratio)
return ratio
except ValueError:
return in_str | 308ec972df6e57e87e24c26e769311d652118aee | 2,190 |
def retrieve_features(dataframe):
"""
Retrieves features (X) from dataframe
:param dataframe:
:return:
"""
return list(dataframe["tweet"]) | 69118d6d0b9503500f6fa4b24fb844af4ff25644 | 2,191 |
from typing import Any
def get_artist_names(res: dict[str, Any]) -> str:
"""
Retrieves all artist names for a given input to the "album" key of a response.
"""
artists = []
for artist in res["artists"]:
artists.append(artist["name"])
artists_str = ", ".join(artists)
return artists_str | 2913c813e7e6097cb2cb3d3dfb84f831bbc0a6e7 | 2,195 |
from typing import Iterator
from typing import Tuple
from typing import Any
import itertools
def _nonnull_powerset(iterable) -> Iterator[Tuple[Any]]:
"""Returns powerset of iterable, minus the empty set."""
s = list(iterable)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(1, len(s) + 1)) | ad02ab8ac02004adb54310bc639c6e2d84f19b02 | 2,196 |
import yaml
def _parse_obs_status_file(filename):
"""
Parse a yaml file and return a dictionary.
The dictionary will be of the form: {'obs': [], 'bad': [], 'mags: []}
:param filename:
:return:
"""
with open(filename) as fh:
status = yaml.load(fh, Loader=yaml.SafeLoader)
if 'obs' not in status:
status['obs'] = []
if 'bad' not in status:
status['bad'] = []
if 'mags' not in status:
status['mags'] = []
if hasattr(status['bad'], 'items'):
status['bad'] = list(status['bad'].items())
return status | 389fc921867367964001e5fc2f56a7fa7defd7c8 | 2,197 |
def printable_cmd(c):
"""Converts a `list` of `str`s representing a shell command to a printable
`str`."""
return " ".join(map(lambda e: '"' + str(e) + '"', c)) | b5e8a68fc535c186fdbadc8a669ed3dec0da3aee | 2,198 |
import itertools
def _enumerate_trees_w_leaves(n_leaves):
"""Construct all rooted trees with n leaves."""
def enumtree(*args):
n_args = len(args)
# trivial cases:
if n_args == 0:
return []
if n_args == 1:
return args
# general case of 2 or more args:
# build index array
idxs = range(0, n_args)
trees = []
# we consider all possible subsets of size n_set to gather
for n_set in range(2, n_args+1):
idxsets = list(itertools.combinations(idxs, n_set))
for idxset in idxsets:
# recurse by joining all subtrees with
# n_set leaves and (n_args - n_set) leaves
arg_set = tuple(args[i] for i in idxs if i in idxset)
arg_coset = tuple(args[i] for i in idxs if i not in idxset)
if arg_coset:
trees.extend(tuple(itertools.product(enumtree(*arg_set),
enumtree(*arg_coset))))
else:
# trivial case where arg_set is entire set
trees.append(arg_set)
return trees
# return enumerated trees with integers as leaves
return enumtree(*range(n_leaves)) | 574a2d3ec63d3aeeb06292ec361b83aebba0ff84 | 2,199 |
def part1(data):
"""Solve part 1"""
countIncreased = 0
prevItem = None
for row in data:
if prevItem == None:
prevItem = row
continue
if prevItem < row:
countIncreased += 1;
prevItem = row
return countIncreased | e01b5edc9d9ac63a31189160d09b5e6e0f11e522 | 2,201 |
def is_compiled_release(data):
"""
Returns whether the data is a compiled release (embedded or linked).
"""
return 'tag' in data and isinstance(data['tag'], list) and 'compiled' in data['tag'] | ea8c8ae4f1ccdedbcc145bd57bde3b6040e5cab5 | 2,202 |
def APIRevision():
"""Gets the current API revision to use.
Returns:
str, The revision to use.
"""
return 'v1beta3' | c748e1917befe76da449e1f435540e10ee433444 | 2,203 |
def divisor(baudrate):
"""Calculate the divisor for generating a given baudrate"""
CLOCK_HZ = 50e6
return round(CLOCK_HZ / baudrate) | a09eee716889ee6950f8c5bba0f31cdd2b311ada | 2,204 |
import pandas
def compute_balances(flows):
"""
Balances by currency.
:param flows:
:return:
"""
flows = flows.set_index('date')
flows_by_asset = flows.pivot(columns='asset', values='amount').apply(pandas.to_numeric)
balances = flows_by_asset.fillna(0).cumsum()
return balances | 98728c2c687df60194eb11b479c08fc90502807a | 2,205 |
def send(socket, obj, flags=0, protocol=-1):
"""stringify an object, and then send it"""
s = str(obj)
return socket.send_string(s) | a89165565837ad4a984905d5b5fdd73e398b35fd | 2,206 |
def dist2_test(v1, v2, idx1, idx2, len2):
"""Square of distance equal"""
return (v1-v2).mag2() == len2 | 3a268a3ba704a91f83345766245a952fe5d943dd | 2,207 |
import functools
def cached(func):
"""Decorator cached makes the function to cache its result and return it in duplicate calls."""
prop_name = '__cached_' + func.__name__
@functools.wraps(func)
def _cached_func(self):
try:
return getattr(self, prop_name)
except AttributeError:
val = func(self)
setattr(self, prop_name, val)
return val
return _cached_func | 5b23c251c03160ba2c4e87848201be46ba2f34fb | 2,208 |
def strip_extension(name: str) -> str:
"""
Remove a single extension from a file name, if present.
"""
last_dot = name.rfind(".")
if last_dot > -1:
return name[:last_dot]
else:
return name | 9dc1e3a3c9ad3251aba8a1b61f73de9f79f9a8be | 2,209 |
import os
def next_joystick_device():
"""Finds the next available js device name."""
for i in range(100):
dev = "/dev/input/js{0}".format(i)
if not os.path.exists(dev):
return dev | 56e1b859fd26e546e7a63cbf2764b78c2cd41990 | 2,210 |
import platform
def is_windows():
""" détermine si le système actuel est windows """
return platform.system().lower() == "windows" | fc9e2ca948f7cc5dc6b6cc9afb52ba701222bb7a | 2,211 |
import re
def grapheme_to_phoneme(text, g2p, lexicon=None):
"""Converts grapheme to phoneme"""
phones = []
words = filter(None, re.split(r"(['(),:;.\-\?\!\s+])", text))
for w in words:
if lexicon is not None and w.lower() in lexicon:
phones += lexicon[w.lower()]
else:
phones += list(filter(lambda p: p != " ", g2p(w)))
return phones | 2bb5195a323aa712b2725851fdde64b8e38856f0 | 2,212 |
import pytz
from datetime import datetime
async def get_time():
"""获取服务器时间
"""
tz = pytz.timezone('Asia/Shanghai')
return {
'nowtime': datetime.now(),
'utctime': datetime.utcnow(),
'localtime': datetime.now(tz)
} | 282eb1136713df8045c6ad5f659042484fe4ec8b | 2,213 |
import re
def is_untweeable(html):
"""
I'm not sure at the moment what constitutes untweeable HTML, but if we don't find DVIS in tiddlywiki,
that is a blocker
"""
# the same regex used in tiddlywiki
divs_re = re.compile(
r'<div id="storeArea"(.*)</html>',
re.DOTALL
)
return bool(divs_re.search(html)) | face6c6d30b6e26ffa3344ed8e42ed7d44cf2ea5 | 2,214 |
def remove_arm(frame):
"""
Removes the human arm portion from the image.
"""
##print("Removing arm...")
# Cropping 15 pixels from the bottom.
height, width = frame.shape[:2]
frame = frame[:height - 15, :]
##print("Done!")
return frame | 99b998da87f1aa2eca0a02b67fc5adc411603ee4 | 2,216 |
import torch
def load_model(file_path, *, epoch, model, likelihood, mll, optimizer, loss):
"""モデルの保存関数
Parameters
----------
file_path : str
モデルの保存先のパスとファイル名
epoch : int
現在のエポック数
model : :obj:`gpytorch.models`
学習済みのモデルのオブジェクト
likelihood : :obj:`gpytorch.likelihoods`
学習済みのlikelihoodsのオブジェクト
mll : :obj:`gpytorch.mlls`
学習済みのmllsのオブジェクト
optimizer : :obj:`torch.optim`
学習済みのoptimのオブジェクト
loss : list
現在のエポックまでの経過loss
Returns
-------
epoch : int
現在のエポック数
model : :obj:`gpytorch.models`
学習済みのモデルのオブジェクト
likelihood : :obj:`gpytorch.likelihoods`
学習済みのlikelihoodsのオブジェクト
mll : :obj:`gpytorch.mlls`
学習済みのmllsのオブジェクト
optimizer : :obj:`torch.optim`
学習済みのoptimのオブジェクト
loss : list
現在のエポックまでの経過loss
"""
temp = torch.load(file_path)
epoch = temp['epoch']
model.load_state_dict(temp['model'])
likelihood.load_state_dict(temp['likelihood'])
mll.load_state_dict(temp['mll'])
optimizer.load_state_dict(temp['optimizer'])
loss = temp['loss']
return epoch, model, likelihood, mll, optimizer, loss | ccc7f221164d89ed29326f720becd29e3442c52b | 2,217 |
import re
def valid_account_id(log, account_id):
"""Validate account Id is a 12 digit string"""
if not isinstance(account_id, str):
log.error("supplied account id {} is not a string".format(account_id))
return False
id_re = re.compile(r'^\d{12}$')
if not id_re.match(account_id):
log.error("supplied account id '{}' must be a 12 digit number".format(account_id))
return False
return True | 30f3aa9547f83c4bea53041a4c79ba1242ae4754 | 2,218 |
def _GetRemoteFileID(local_file_path):
"""Returns the checked-in hash which identifies the name of file in GCS."""
hash_path = local_file_path + '.sha1'
with open(hash_path, 'rb') as f:
return f.read(1024).rstrip() | 4a06dcdd30e379891fe3f9a5b3ecc2c4fd1a98ed | 2,219 |
def is_color_rgb(color):
"""Is a color in a valid RGB format.
Parameters
----------
color : obj
The color object.
Returns
-------
bool
True, if the color object is in RGB format.
False, otherwise.
Examples
--------
>>> color = (255, 0, 0)
>>> is_color_rgb(color)
True
>>> color = (1.0, 0.0, 0.0)
>>> is_color_rgb(color)
True
>>> color = (1.0, 0, 0)
>>> is_color_rgb(color)
False
>>> color = (255, 0.0, 0.0)
>>> is_color_rgb(color)
False
>>> color = (256, 0, 0)
>>> is_color_rgb(color)
False
"""
if isinstance(color, (tuple, list)):
if len(color) == 3:
if all(isinstance(c, float) for c in color):
if all(c >= 0.0 and c <= 1.0 for c in color):
return True
elif all(isinstance(c, int) for c in color):
if all(c >= 0 and c <= 255 for c in color):
return True
return False | 46b8241d26fa19e4372587ffebda3690972c3395 | 2,220 |
import re
def to_latin(name):
"""Convert all symbols to latin"""
symbols = (u"іїєабвгдеёжзийклмнопрстуфхцчшщъыьэюяІЇЄАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ",
u"iieabvgdeejzijklmnoprstufhzcss_y_euaIIEABVGDEEJZIJKLMNOPRSTUFHZCSS_Y_EUA")
tr = {ord(a): ord(b) for a, b in zip(*symbols)}
translated_name = name.translate(tr)
translated_name = re.sub("[^A-Za-z0-9]", "_", translated_name)
return translated_name | 06a0d535fa7a74feea33e58815da2792a6026def | 2,221 |
import os
def get_file_names(directory, prefix='', suffix='', nesting=True):
"""
Returns list of all files in directory
Args:
directory (str): the directory of interest
prefix (str): if provided, files returned must start with this
suffix (str): if provided, files returned must end with this
nesting (bool): if True, looks in all subdirectories of dir. If false, only looks at top-level.
"""
l = []
for path, subdirs, files in os.walk(directory):
for name in files:
if name.startswith(prefix) and name.endswith(suffix) and (nesting or (path == directory)):
l.append(os.path.join(path, name))
return l | b35d6a17ac93674a073076c36c0b84ba1361210b | 2,223 |
import random
def reservoir_sampling(items, k):
"""
Reservoir sampling algorithm for large sample space or unknow end list
See <http://en.wikipedia.org/wiki/Reservoir_sampling> for detail>
Type: ([a] * Int) -> [a]
Prev constrain: k is positive and items at least of k items
Post constrain: the length of return array is k
"""
sample = items[0:k]
for i in range(k, len(items)):
j = random.randrange(1, i + 1)
if j <= k:
sample[j] = items[i]
return sample | ab2d0dc2bb3cb399ae7e6889f028503d165fbbe4 | 2,224 |
def fprime_to_jsonable(obj):
"""
Takes an F prime object and converts it to a jsonable type.
:param obj: object to convert
:return: object in jsonable format (can call json.dump(obj))
"""
# Otherwise try and scrape all "get_" getters in a smart way
anonymous = {}
getters = [attr for attr in dir(obj) if attr.startswith("get_")]
for getter in getters:
# Call the get_ functions, and call all non-static methods
try:
func = getattr(obj, getter)
item = func()
# If there is a property named "args" it needs to be handled specifically unless an incoming command
if (
getter == "get_args"
and not "fprime_gds.common.data_types.cmd_data.CmdData"
in str(type(obj))
):
args = []
for arg_spec in item:
arg_dict = {
"name": arg_spec[0],
"description": arg_spec[1],
"value": arg_spec[2].val,
"type": str(arg_spec[2]),
}
if arg_dict["type"] == "Enum":
arg_dict["possible"] = arg_spec[2].keys()
args.append(arg_dict)
# Fill in our special handling
item = args
anonymous[getter.replace("get_", "")] = item
except TypeError:
continue
return anonymous | 899674167b51cd752c7a8aaa9979856218759022 | 2,225 |
def update_cache(cache_data, new_data, key):
"""
Add newly collected data to the pre-existing cache data
Args:
cache_data (dict): Pre-existing chip data
new_data (dict): Newly acquired chip data
key (str): The chip UL coordinates
Returns:
"""
if key in cache_data.keys():
cache_data[key].update(new_data[key])
else:
cache_data[key] = new_data[key]
return cache_data | f439f34d1e95ccd69dc10d5f8c06ca20fc869b1e | 2,226 |
from typing import Dict
def chain_head(head: int, child: int, heads: Dict[int, int]):
"""
>>> chain_head(0, 2, {1: 2, 2: 3, 3: 0})
True
>>> chain_head(2, 0, {1: 2, 2: 3, 3: 0})
False
"""
curr_child = child
while curr_child != -1:
if curr_child == head:
return True
curr_child = heads.get(curr_child, -1)
return False | d786d3dbbdc496a1a7515d9df04fa2a09968b87d | 2,228 |
import ipaddress
import logging
def _get_ip_block(ip_block_str):
""" Convert string into ipaddress.ip_network. Support both IPv4 or IPv6
addresses.
Args:
ip_block_str(string): network address, e.g. "192.168.0.0/24".
Returns:
ip_block(ipaddress.ip_network)
"""
try:
ip_block = ipaddress.ip_network(ip_block_str)
except ValueError:
logging.error("Invalid IP block format: %s", ip_block_str)
return None
return ip_block | b887c615091926ed7ebbbef8870e247348e2aa27 | 2,229 |
def mul_ntt(f_ntt, g_ntt, q):
"""Multiplication of two polynomials (coefficient representation)."""
assert len(f_ntt) == len(g_ntt)
deg = len(f_ntt)
return [(f_ntt[i] * g_ntt[i]) % q for i in range(deg)] | 504838bb812792b6bb83b1d485e4fb3221dec36e | 2,230 |
def _expected_datatypes(product_type):
"""
Aux function. Contains the most current lists of keys we expect there to be in the different forms of metadata.
"""
if product_type == "SLC":
# Only the datetimes need to be parsed.
expected_dtypes = {
"acquisition_start_utc": "parse_datetime_single",
"acquisition_end_utc": "parse_datetime_single",
"dc_estimate_time_utc": "parse_datetime_single",
"first_pixel_time_utc": "parse_datetime_single",
"state_vector_time_utc": "parse_datetime_vect",
"zerodoppler_start_utc": "parse_datetime_single",
"zerodoppler_end_utc": "parse_datetime_single",
}
elif product_type == "GRD":
# All the fields need to be parsed, so all the datatypes are input.
expected_dtypes = {
"acquisition_end_utc": "parse_datetime_single", # single datetime
"acquisition_mode": str,
"acquisition_prf": float,
"acquisition_start_utc": str,
"ant_elev_corr_flag": bool,
"area_or_point": str,
"avg_scene_height": float,
"azimuth_spacing": float,
"azimuth_look_bandwidth": float,
"azimuth_look_overlap": float,
"azimuth_looks": int,
"azimuth_time_interval": float,
"calibration_factor": float,
"carrier_frequency": float,
"chirp_bandwidth": float,
"chirp_duration": float,
"coord_center": "parse_float_vect", # 1d vect of floats, needs to be parsed
"coord_first_far": "parse_float_vect",
"coord_first_near": "parse_float_vect",
"coord_last_far": "parse_float_vect",
"coord_last_near": "parse_float_vect",
"dc_estimate_coeffs": "parse_float_vect",
"dc_estimate_poly_order": int,
"dc_estimate_time_utc": "parse_datetime_vect", # datetime vector
"dc_reference_pixel_time": float,
"doppler_rate_coeffs": "parse_float_vect",
"doppler_rate_poly_order": int,
"doppler_rate_reference_pixel_time": float,
"gcp_terrain_model": str,
"geo_ref_system": str,
"grsr_coefficients": "parse_float_vect",
"grsr_ground_range_origin": float,
"grsr_poly_order": int,
"grsr_zero_doppler_time": "parse_datetime_single", # single datetime
"heading": float,
"incidence_angle_coefficients": "parse_float_vect",
"incidence_angle_ground_range_origin": float,
"incidence_angle_poly_order": int,
"incidence_angle_zero_doppler_time": "parse_datetime_single", # single datetime
"incidence_center": float,
"incidence_far": float,
"incidence_near": float,
"look_side": str,
"mean_earth_radius": float,
"mean_orbit_altitude": float,
"number_of_azimuth_samples": int,
"number_of_dc_estimations": int,
"number_of_range_samples": int,
"number_of_state_vectors": int,
"orbit_absolute_number": int,
"orbit_direction": str,
"orbit_processing_level": str,
"orbit_relative_number": int,
"orbit_repeat_cycle": int,
"polarization": str,
"posX": "parse_float_vect",
"posY": "parse_float_vect",
"posZ": "parse_float_vect",
"processing_prf": float,
"processing_time": "parse_datetime_single", # single datetime
"processor_version": str,
"product_file": str,
"product_level": str,
"product_name": str,
"product_type": str,
"range_looks": int,
"range_sampling_rate": float,
"range_spacing": float,
"range_spread_comp_flag": bool,
"sample_precision": str,
"satellite_look_angle": str,
"satellite_name": str,
"slant_range_to_first_pixel": float,
"state_vector_time_utc": "parse_datetime_vect", # 1d vect of datetimes, need to be parsed.
"total_processed_bandwidth_azimuth": float,
"velX": "parse_float_vect",
"velY": "parse_float_vect",
"velZ": "parse_float_vect",
"window_function_azimuth": str,
"window_function_range": str,
"zerodoppler_end_utc": "parse_datetime_single", # single datetime
"zerodoppler_start_utc": "parse_datetime_single", # single datetime
}
elif product_type == "xml":
raise NotImplementedError
elif not isinstance(product_type, str):
raise TypeError(
'Did not understand input "product_type", a str was expected but a %s datatype variable was input.'
% type(product_type)
)
else:
raise ValueError(
'Did not understand input "product_type", either "SLC", "GRD" or "xml" was expected but %s was input.'
% product_type
)
return expected_dtypes | ea5a2d78bc5693259955e60847de7a663dcdbf2c | 2,231 |
def integrate(que):
"""
check if block nears another block and integrate them
@param que: init blocks
@type que: deque
@return: integrated block
@rtype: list
"""
blocks = []
t1, y, x = que.popleft()
blocks.append([y, x])
if t1 == 2:
blocks.append([y, x + 1])
elif t1 == 3:
blocks.append([y + 1, x])
return blocks | a91235f34e1151b6dd9c6c266658cca86b375278 | 2,232 |
def func_split_item(k):
""" Computes the expected value and variance of the splitting item random variable S.
Computes the expression (26b) and (26c) in Theorem 8. Remember that r.v. S is the value of index s
such that $\sum_{i=1}^{s-1} w(i) \leq k$ and $\sum_{i=1}^s w(i) > k$.
Args:
k: Int. The capacity of the Knapsack Problem instance.
Returns:
s: float. The expected value of the splitting item random variable.
var_split: float. The variance of the splitting item random variable.
"""
b = 1 + 1 / k # Defining a cumbersome base
s = (1 + 1 / k) ** k # Computing the split item
var_split = (3 + 1 / k) * b ** (k - 1) - b ** (2 * k) # Computing the variance of the split item
return s, var_split | 84ec7f4d76ced51ebdbd28efdc252b5ff3809e79 | 2,233 |
def eq(equation: str) -> int:
"""Evaluate the equation."""
code = compile(equation, "<string>", "eval")
return eval(code) | 5e88cad8009dc3dcaf36b216fa217fbadfaa50b3 | 2,234 |
from datetime import datetime
def parse_date(datestr):
""" Given a date in xport format, return Python date. """
return datetime.strptime(datestr, "%d%b%y:%H:%M:%S") | b802a528418a24300aeba3e33e9df8a268f0a27b | 2,235 |
def get_num_forces(cgmodel):
"""
Given a CGModel() class object, this function determines how many forces we are including when evaluating the energy.
:param cgmodel: CGModel() class object
:type cgmodel: class
:returns:
- total_forces (int) - Number of forces in the coarse grained model
:Example:
>>> from foldamers.cg_model.cgmodel import CGModel
>>> cgmodel = CGModel()
>>> total_number_forces = get_num_forces(cgmodel)
"""
total_forces = 0
if cgmodel.include_bond_forces:
total_forces = total_forces + 1
if cgmodel.include_nonbonded_forces:
total_forces = total_forces + 1
if cgmodel.include_bond_angle_forces:
total_forces = total_forces + 1
if cgmodel.include_torsion_forces:
total_forces = total_forces + 1
return total_forces | 5f5b897f1b0def0b858ca82319f9eebfcf75454a | 2,236 |
def initialize_stat_dict():
"""Initializes a dictionary which will hold statistics about compositions.
Returns:
A dictionary containing the appropriate fields initialized to 0 or an
empty list.
"""
stat_dict = dict()
for lag in [1, 2, 3]:
stat_dict['autocorrelation' + str(lag)] = []
stat_dict['notes_not_in_key'] = 0
stat_dict['notes_in_motif'] = 0
stat_dict['notes_in_repeated_motif'] = 0
stat_dict['num_starting_tonic'] = 0
stat_dict['num_repeated_notes'] = 0
stat_dict['num_octave_jumps'] = 0
stat_dict['num_fifths'] = 0
stat_dict['num_thirds'] = 0
stat_dict['num_sixths'] = 0
stat_dict['num_seconds'] = 0
stat_dict['num_fourths'] = 0
stat_dict['num_sevenths'] = 0
stat_dict['num_rest_intervals'] = 0
stat_dict['num_special_rest_intervals'] = 0
stat_dict['num_in_key_preferred_intervals'] = 0
stat_dict['num_resolved_leaps'] = 0
stat_dict['num_leap_twice'] = 0
stat_dict['num_high_unique'] = 0
stat_dict['num_low_unique'] = 0
return stat_dict | 42a10b93a960663a42260e1a77d0e8f5a4ff693a | 2,237 |
import os
import sys
def ircelsos_data_dir():
"""Get the data directory
Adapted from jupyter_core
"""
home = os.path.expanduser('~')
if sys.platform == 'darwin':
return os.path.join(home, 'Library', 'ircelsos')
elif os.name == 'nt':
appdata = os.environ.get('APPDATA', os.path.join(home, '.local', 'share'))
return os.path.join(appdata, 'ircelsos')
else:
# Linux, non-OS X Unix, AIX, etc.
xdg = os.environ.get("XDG_DATA_HOME", os.path.join(home, '.local', 'share'))
return os.path.join(xdg, 'ircelsos') | a8c79f3dde6d87aec8c79bd7d35f6fbab19fccd8 | 2,238 |
def get_shodan_dicts():
"""Build Shodan dictionaries that hold definitions and naming conventions."""
risky_ports = [
"ftp",
"telnet",
"http",
"smtp",
"pop3",
"imap",
"netbios",
"snmp",
"ldap",
"smb",
"sip",
"rdp",
"vnc",
"kerberos",
]
name_dict = {
"ftp": "File Transfer Protocol",
"telnet": "Telnet",
"http": "Hypertext Transfer Protocol",
"smtp": "Simple Mail Transfer Protocol",
"pop3": "Post Office Protocol 3",
"imap": "Internet Message Access Protocol",
"netbios": "Network Basic Input/Output System",
"snmp": "Simple Network Management Protocol",
"ldap": "Lightweight Directory Access Protocol",
"smb": "Server Message Block",
"sip": "Session Initiation Protocol",
"rdp": "Remote Desktop Protocol",
"kerberos": "Kerberos",
}
risk_dict = {
"ftp": "FTP",
"telnet": "Telnet",
"http": "HTTP",
"smtp": "SMTP",
"pop3": "POP3",
"imap": "IMAP",
"netbios": "NetBIOS",
"snmp": "SNMP",
"ldap": "LDAP",
"smb": "SMB",
"sip": "SIP",
"rdp": "RDP",
"vnc": "VNC",
"kerberos": "Kerberos",
}
# Create dictionaries for CVSSv2 vector definitions using https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator
av_dict = {
"NETWORK": "A vulnerability exploitable with network access means the vulnerable software is bound to the network stack and the attacker does not require local network access or local access. Such a vulnerability is often termed “remotely exploitable”. An example of a network attack is an RPC buffer overflow.",
"ADJACENT_NETWORK": "A vulnerability exploitable with adjacent network access requires the attacker to have access to either the broadcast or collision domain of the vulnerable software. Examples of local networks include local IP subnet, Bluetooth, IEEE 802.11, and local Ethernet segment.",
"LOCAL": "A vulnerability exploitable with only local access requires the attacker to have either physical access to the vulnerable system or a local (shell) account. Examples of locally exploitable vulnerabilities are peripheral attacks such as Firewire/USB DMA attacks, and local privilege escalations (e.g., sudo).",
}
ac_dict = {
"LOW": "Specialized access conditions or extenuating circumstances do not exist. The following are examples: The affected product typically requires access to a wide range of systems and users, possibly anonymous and untrusted (e.g., Internet-facing web or mail server). The affected configuration is default or ubiquitous. The attack can be performed manually and requires little skill or additional information gathering. The 'race condition' is a lazy one (i.e., it is technically a race but easily winnable).",
"MEDIUM": "The access conditions are somewhat specialized; the following are examples: The attacking party is limited to a group of systems or users at some level of authorization, possibly untrusted. Some information must be gathered before a successful attack can be launched. The affected configuration is non-default, and is not commonly configured (e.g., a vulnerability present when a server performs user account authentication via a specific scheme, but not present for another authentication scheme). The attack requires a small amount of social engineering that might occasionally fool cautious users (e.g., phishing attacks that modify a web browser’s status bar to show a false link, having to be on someone’s “buddy” list before sending an IM exploit).",
"HIGH": "Specialized access conditions exist. For example, in most configurations, the attacking party must already have elevated privileges or spoof additional systems in addition to the attacking system (e.g., DNS hijacking). The attack depends on social engineering methods that would be easily detected by knowledgeable people. For example, the victim must perform several suspicious or atypical actions. The vulnerable configuration is seen very rarely in practice. If a race condition exists, the window is very narrow.",
}
ci_dict = {
"NONE": "There is no impact to the confidentiality of the system",
"PARTIAL": "There is considerable informational disclosure. Access to some system files is possible, but the attacker does not have control over what is obtained, or the scope of the loss is constrained. An example is a vulnerability that divulges only certain tables in a database.",
"COMPLETE": "There is total information disclosure, resulting in all system files being revealed. The attacker is able to read all of the system's data (memory, files, etc.).",
}
return risky_ports, name_dict, risk_dict, av_dict, ac_dict, ci_dict | 2aace61b8339db848e95758fcb9f30856915d6fc | 2,239 |
def _passthrough_zotero_data(zotero_data):
"""
Address known issues with Zotero metadata.
Assumes zotero data should contain a single bibliographic record.
"""
if not isinstance(zotero_data, list):
raise ValueError('_passthrough_zotero_data: zotero_data should be a list')
if len(zotero_data) > 1:
# Sometimes translation-server creates multiple data items for a single record.
# If so, keep only the parent item, and remove child items (such as notes).
# https://github.com/zotero/translation-server/issues/67
zotero_data = zotero_data[:1]
return zotero_data | cec2271a7a966b77e2d380686ecccc0307f78116 | 2,240 |
def root(tmpdir):
"""Return a pytest temporary directory"""
return tmpdir | 9fa01d67461f8ce1e3d3ad900cf8a893c5a075aa | 2,241 |
def ignore_ip_addresses_rule_generator(ignore_ip_addresses):
"""
generate tshark rule to ignore ip addresses
Args:
ignore_ip_addresses: list of ip addresses
Returns:
rule string
"""
rules = []
for ip_address in ignore_ip_addresses:
rules.append("-Y ip.dst != {0}".format(ip_address))
return rules | 3ac43f28a4c8610d4350d0698d93675572d6ba44 | 2,242 |
def remove_fallen(lst):
"""removes fallen orcs from a list"""
return [x for x in lst if x.standing] | 9e621321909dc7aa13da3d2a7902bb4604ae62f6 | 2,243 |
def get_avg_no_of_feat_values(contents):
"""
Helper to calculate numbers of different values
of categorical features, averaged for all features
"""
total = 0
for i in range(0, len(contents[0])):
total += len(set([x[i] for x in contents]))
return float(total) / float(len(contents[0])) | 4e913298d7f133eb08afe23e4999f5b20f455dc1 | 2,244 |
def return_true():
"""Return True
Simple function used to check liveness of workers.
"""
return True | 3c4b469ce28aef47723a911071f01bea9eb4cf27 | 2,245 |
import os
def create_list(input_list):
"""Construct the list of items to turn into a table. File and string inputs supported"""
if os.path.isfile(input_list):
with open(input_list, 'r', encoding='UTF-8') as ifile:
return [line.rstrip() for line in ifile]
return input_list.split(',') | 7158fa8241ae6328f931f4e7a9dfe08f3d12c6a2 | 2,246 |
def filter_input(self, forced=False, context=None):
"""
Passes each hunk (file or code) to the 'input' methods
of the compressor filters.
"""
content = []
for hunk in self.hunks(forced, context=context):
content.append(hunk)
return content | 1ea0ac16cf1e20732ad8c37b6126c80fe94d2ee5 | 2,247 |
def mat_stretch(mat, target):
"""
Changes times of `mat` in-place so that it has the same average BPM and
initial time as target.
Returns `mat` changed in-place.
"""
in_times = mat[:, 1:3]
out_times = target[:, 1:3]
# normalize in [0, 1]
in_times -= in_times.min()
in_times /= in_times.max()
# restretch
new_start = out_times.min()
in_times *= (out_times.max() - new_start)
in_times += new_start
return mat | 204efb1d8a19c7efe0efb5710add62436a4b5cee | 2,248 |
def parse_range(cpu_range):
"""Create cpu range object"""
if '-' in cpu_range:
[x, y] = cpu_range.split('-') # pylint: disable=invalid-name
cpus = range(int(x), int(y)+1)
if int(x) >= int(y):
raise ValueError("incorrect cpu range: " + cpu_range)
else:
cpus = [int(cpu_range)]
return cpus | 51079648ffddbcba6a9699db2fc4c04c7c3e3202 | 2,249 |
from typing import Optional
from typing import Tuple
import crypt
def get_password_hash(password: str, salt: Optional[str] = None) -> Tuple[str, str]:
"""Get user password hash."""
salt = salt or crypt.mksalt(crypt.METHOD_SHA256)
return salt, crypt.crypt(password, salt) | ea3d7e0d8c65e23e40660b8921aa872dc9e2f53c | 2,251 |
def trim_datasets_using_par(data, par_indexes):
"""
Removes all the data points needing more fitting parameters than available.
"""
parameters_to_fit = set(par_indexes.keys())
trimmed_data = list()
for data_point in data:
if data_point.get_fitting_parameter_names() <= parameters_to_fit:
trimmed_data.append(data_point)
return trimmed_data | 5a06f7f5662fb9d7b5190e0e75ba41c858a85d0b | 2,252 |
import os
def is_valid_file(parser, filename):
"""Check if file exists, and return the filename"""
if not os.path.exists(filename):
parser.error("The file %s does not exist!" % filename)
else:
return filename | 4e9e2a49749c65fd5457578fd162baf350b94fe3 | 2,254 |
def rewrite_elife_funding_awards(json_content, doi):
""" rewrite elife funding awards """
# remove a funding award
if doi == "10.7554/eLife.00801":
for i, award in enumerate(json_content):
if "id" in award and award["id"] == "par-2":
del json_content[i]
# add funding award recipient
if doi == "10.7554/eLife.04250":
recipients_for_04250 = [
{
"type": "person",
"name": {"preferred": "Eric Jonas", "index": "Jonas, Eric"},
}
]
for i, award in enumerate(json_content):
if "id" in award and award["id"] in ["par-2", "par-3", "par-4"]:
if "recipients" not in award:
json_content[i]["recipients"] = recipients_for_04250
# add funding award recipient
if doi == "10.7554/eLife.06412":
recipients_for_06412 = [
{
"type": "person",
"name": {"preferred": "Adam J Granger", "index": "Granger, Adam J"},
}
]
for i, award in enumerate(json_content):
if "id" in award and award["id"] == "par-1":
if "recipients" not in award:
json_content[i]["recipients"] = recipients_for_06412
return json_content | aba819589e50bc847d56a0f5a122b2474425d39c | 2,256 |
def dimensionState(moons,dimension):
"""returns the state for the given dimension"""
result = list()
for moon in moons:
result.append((moon.position[dimension],moon.velocity[dimension]))
return result | e67a37e4a1556d637be74992fc3801ee56f0e6f9 | 2,260 |
def _summary(function):
"""
Derive summary information from a function's docstring or name. The summary is the first
sentence of the docstring, ending in a period, or if no dostring is present, the
function's name capitalized.
"""
if not function.__doc__:
return f"{function.__name__.capitalize()}."
result = []
for word in function.__doc__.split():
result.append(word)
if word.endswith("."):
break
return " ".join(result) | a3e3e45c3004e135c2810a5ec009aa78ef7e7a04 | 2,261 |
def sort_nesting(list1, list2):
"""Takes a list of start points and end points and sorts the second list according to nesting"""
temp_list = []
while list2 != temp_list:
temp_list = list2[:] # Make a copy of list2 instead of reference
for i in range(1, len(list1)):
if list2[i] > list2[i-1] and list1[i] < list2[i-1]:
list2[i-1], list2[i] = list2[i], list2[i-1]
return list2 | 11693e54eeba2016d21c0c23450008e823bdf1c1 | 2,262 |
def statementTVM(pReact):
"""Use this funciton to produce the TVM statemet"""
T,V,mass = pReact.T,pReact.volume,pReact.mass
statement="\n{}: T: {:0.2f} K, V: {:0.2f} m^3, mass: {:0.2f} kg".format(pReact.name,T,V,mass)
return statement | cda356678d914f90d14905bdcadf2079c9ebfbea | 2,263 |
def remove_namespace(tag, ns):
"""Remove namespace from xml tag."""
for n in ns.values():
tag = tag.replace('{' + n + '}', '')
return tag | d4837a3d906baf8e439806ccfea76284e8fd9b87 | 2,266 |
def compareTo(s1, s2):
"""Compares two strings to check if they are the same length and whether one is longer
than the other"""
move_slice1 = 0
move_slice2 = 1
if s1[move_slice1:move_slice2] == '' and s2[move_slice1:move_slice2] == '':
return 0 # return 0 if same length
elif s1[move_slice1:move_slice2] == '' and s2[move_slice1:move_slice2] != '':
return len(s2) * -1 # return negative number if s2 > s1
elif s1[move_slice1:move_slice2] != '' and s2[move_slice1:move_slice2] == '':
return len(s1) # return positive number if s1 > s2
else:
move_slice1 += 1 # with each new call, the next object in the string is checked if empty or not
move_slice2 += 1
return compareTo(s1[1:], s2[1:]) | 4700360d10561227a6d4995c66953993dce1cea3 | 2,267 |
def is_unique(x):
# A set cannot contain any duplicate, so we just check that the length of the list is the same as the length of the corresponding set
"""Check that the given list x has no duplicate
Returns:
boolean: tells if there are only unique values or not
Args:
x (list): elements to be compared
"""
return len(x) == len(set(x)) | 12b4513a71fc1b423366de3f48dd9e21db79e73a | 2,268 |
def str2format(fmt, ignore_types=None):
"""Convert a string to a list of formats."""
ignore_types = ignore_types if ignore_types else ()
token_to_format = {
"s": "",
"S": "",
"d": "g",
"f": "f",
"e": "e",
}
base_fmt = "{{:{}}}"
out = []
for i, token in enumerate(fmt.split(",")):
n = token[:-1]
if i in ignore_types:
out.append(base_fmt.format(n.split(".")[0]))
elif token[-1].lower() == "s":
out.append(base_fmt.format("{}.{}".format(n, n)))
else:
out.append(base_fmt.format(">{}{}".format(n, token_to_format[token[-1]])))
return out | 9cbe719abe6b37a0adcd52af250dfe768f850ffa | 2,269 |
def auth_token_required(func):
"""Your auth here"""
return func | e65b94d40c914c57ff8d894409b664cf97aa790d | 2,273 |
import subprocess
def silent_popen(args, **kwargs):
"""Wrapper for subprocess.Popen with suppressed output.
STERR is redirected to STDOUT which is piped back to the
calling process and returned as the result.
"""
return subprocess.Popen(args,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE, **kwargs).communicate()[0] | 5075a3ea1891ad3c237d5b05b474f563005ff48f | 2,275 |
def number_field_choices(field):
"""
Given a field, returns the number of choices.
"""
try:
return len(field.get_flat_choices())
except AttributeError:
return 0 | b8776e813e9eb7471a480df9d6e49bfeb48a0eb6 | 2,276 |
def resize_image(image, size):
"""
Resize the image to fit in the specified size.
:param image: Original image.
:param size: Tuple of (width, height).
:return: Resized image.
:rtype: :py:class: `~PIL.Image.Image`
"""
image.thumbnail(size)
return image | 67db04eac8a92d27ebd3ec46c4946b7662f9c03f | 2,277 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.