content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import jinja2
def render_template(authors, configuration):
"""
Renders a template in `adoc`, `html`, `md`,
`rst`, or `txt` format.
Parameters
----------
authors : list
The authors to include in the rendered
template.
configuration : dict
Configuration settings relevant to the
rendered template (`heading`, `opening`,
and `closing`).
Returns
-------
str
The rendered template.
"""
loader = jinja2.PackageLoader("authors", "templates")
environment = jinja2.Environment(
loader=loader,
lstrip_blocks=True,
trim_blocks=True
)
source_file = "template.{}".format(configuration["kind"])
template = environment.get_template(source_file)
return template.render(authors=authors, **configuration) | 879693831529fb1786a04df3df1173601adebd63 | 10,975 |
def get_body_part_colour(shot):
"""
Decide the colour of a plot element based on the shooter's body part.
"""
body_part = shot['shot']['body_part']['name']
if body_part == 'Right Foot':
return 'orange'
if body_part == 'Left Foot':
return 'red'
if body_part == 'Head':
return 'magenta'
# Else, body part == "Other"
return 'cyan' | 89a6187840ee2f830f2c07579a555fc1944ea086 | 10,980 |
def _HasOption(_, option):
"""Validate the option exists in the config file.
Args:
option: string, the config option to check.
Returns:
bool, True if test is not in the option name.
"""
return 'test' not in option | 5c8304b8e4abe91ec8e3e55f14201a1a3d7f5c57 | 10,988 |
def get_parameters_nodes(input_nodes):
"""Find operations containing the parameters of the model.
Args:
input_nodes (:obj:`list` of :obj:`Node`): the input operations of the
model.
Returns:
parameters (:obj:`list` of :obj:`Node`): the operations containing
the parameters of the model.
"""
parameters = list()
for node in input_nodes:
if node.is_trainable:
parameters.append(node)
return parameters | a4185512a577521f0ed3d7cc9b098800fda58974 | 10,990 |
def get_method(interface, method):
"""
Get a specific method
Parameters:
----------
interface : interface of CST Studio
method : string
Specific method that you want
Returns:
----------
instance : function
Instance of the method
"""
return getattr(interface, method) | bd0a8322e2a47f8c0b760894617c9ed2429fcb02 | 10,992 |
def read_file_header( filename ):
"""
A header is:
1. Any number of blank lines before the header
2. Any number of KEY=value pairs (anything else is ignored)
3. One or more blank lines stops the header
Returns a tuple (format type, version integer, header dict, hdr lines),
where the format type and version integer may be None if the header key
"FILE_VERSION" was not found. The header lines is the number of lines
of header data in the file.
"""
fp = open( filename, 'r' )
cnt = 0
hdr = {}
line = fp.readline()
while line:
line = line.strip()
if line[:5] == 'TEST:':
break
elif line:
cnt += 1
L = line.split('=',1)
if len(L) == 2 and L[0].strip():
if L[1].strip() == 'None':
hdr[ L[0].strip() ] = None
else:
hdr[ L[0].strip() ] = L[1].strip()
elif cnt > 0:
break
line = fp.readline()
if type(filename) == type(''):
fp.close()
vers = hdr.get( 'FILE_VERSION', None )
if vers:
i = len(vers) - 1
while i >= 0 and vers[i] in '0123456789':
i -= 1
t = vers[:i+1]
n = 0
sn = vers[i+1:]
if sn:
n = int(sn)
return t,n,hdr,cnt
return None,None,hdr,cnt | 54bec5d22db1d04ba081786b287abe678c71e487 | 10,993 |
def get_pip_command_packages(command):
"""Return packages included in a pip command."""
return command.split()[2:] | 63757643862b590f27eed927b2037ad45eaf4792 | 10,994 |
def gini(k_proportions):
"""
Gini impurity function. This is used to determine the impurity of a given
set of data, given the proportions of the classes in the dataset.
This is equivalent to:
H = ∑ pk(1-pk) for all k classes.
k_proportions, in this case, is an array of pk's
:param k_proportions: array containing proportions of different classes. Proportions sum to 1.
:return: the impurity of the dataset.
"""
return (k_proportions*(1-k_proportions)).sum() | db0e74b29166603ed2bda3aa5fa9614ba9206b67 | 10,996 |
from pathlib import Path
def fixture_project_dir() -> Path:
"""Return path to the Python project directory.
:return: a path
"""
return Path(__file__).parent.parent | 08ef4059aa5fa4b125928e9843b83f2befff8576 | 10,997 |
def return_element_from_list(i, l):
"""
Returns an element from the list
@param: i is an integer corresponding to the index of the element in the list
@param: l is a list of elements
return:
element of the list if 0 <= i <= len(l) - 1
None otherwise
"""
if(i < 0 or i >= len(l)):
return None
else:
return l[i] | 7d57263d67fe85c13f34428c23cdaf9ae7671855 | 10,998 |
import re
def is_indvar(expr):
"""
An individual variable must be a single lowercase character other than 'e', 't', 'n', 's',
followed by zero or more digits.
@param expr: C{str}
@return: C{boolean} True if expr is of the correct form
"""
assert isinstance(expr, str), "%s is not a string" % expr
return re.match(r'^[a-df-mo-ru-z]\d*$', expr) | c00e62199263214596a0b9519868ffdeb86e9580 | 11,004 |
def uniq_count(data):
"""
Count number of unique elements in the data.
Args:
data (list): values.
Returns the number of unique elements in the data.
"""
uniq_atom_list = list(set(data))
return len(uniq_atom_list) | d61eff27aed7d788fa6cc80eb25661f0ebae7dfd | 11,006 |
import pickle
def load_dt(filename):
"""
加载保存好的决策树
:param filename: 文件名
:return: python dict
"""
# 'b' 表示二进制模式
fr = open(filename, 'rb')
return pickle.load(fr) | c61772d6c8606e45ef323bd8dd30cb0c9e6ebf35 | 11,008 |
from typing import MutableSequence
from typing import Any
def swap(seq: MutableSequence[Any], frst_idx: int, snd_idx: int) -> MutableSequence[Any]:
"""Swaps two elements in the `seq`."""
if seq[frst_idx] == seq[snd_idx]:
return seq
seq[frst_idx], seq[snd_idx] = seq[snd_idx], seq[frst_idx]
return seq | 1d4c3260e57f827293d849b490bd35c2ae9f9698 | 11,012 |
def parse_problems(lines):
""" Given a list of lines, parses them and returns a list of problems. """
problems = []
i = 0
while i < len(lines):
h, w = map(int, lines[i].split(" "))
problems.append((w, h, lines[i + 1:i + h + 1]))
i += h + 1
return problems | 883a6a7cfaa8104c171a6e166fcbb8f4403f4c01 | 11,013 |
def read_input(fpath):
"""
Read a specified file, and return a list of the file's contents
separated by new lines.
Args:
fpath (str): Path of the file to read.
Returns:
list
"""
with open(fpath, 'r') as f:
return f.read().splitlines() | ac10f24d7b5769ee85b3f13f3fde278d56ef1741 | 11,015 |
def safe_get(dictionary, key, default_value, can_return_none=True):
"""
Safely perform a dictionary get,
returning the default value if the key is not found.
:param dict dictionary: the dictionary
:param string key: the key
:param variant default_value: the default value to be returned
:param bool can_return_none: if ``True``, the function can return ``None``;
otherwise, return ``default_value`` even if the
dictionary lookup succeeded
:rtype: variant
"""
return_value = default_value
try:
return_value = dictionary[key]
if (return_value is None) and (not can_return_none):
return_value = default_value
except (KeyError, TypeError):
# KeyError if key is not present in dictionary
# TypeError if dictionary is None
pass
return return_value | eb53ad7a17db3f2c66c8b16b20ae2aac9b1e34e3 | 11,016 |
def parse_grid(grid):
"""
converts a grid like
K F A
L G B
M H C
N I D
O J E
to
ABCDEFGHIJKLMNO
"""
rows = [row.strip().split(" ") for row in grid]
return "".join(rows[row][col] for col in range(2, -1, -1) for row in range(0, 5)) | f1cb825e3d20edd2db92fee4104204e9bcb1f54a | 11,018 |
def predicate_contains_hello(x):
"""Predicate True when 'hello' is in value."""
return 'hello' in x | 884ef0a9a925865d5bd9093d52f1c248e498aa80 | 11,021 |
def get_tile_url(xtile, ytile, zoom):
"""
Return a URL for a tile given some OSM tile co-ordinates
"""
return "http://tile.openstreetmap.org/%d/%d/%d.png" % (zoom, xtile, ytile) | 20af9f5b4065d96c285e6c4e5c64123f0343b659 | 11,022 |
import json
def json_to_dict(json_file_path):
"""
Convert a .json file to a Python dictionary.
Parameters
----------
json_file_path: str
Path of the JSON file
Returns
-------
dictionary: dict
The original JSON file as a Python dictionary
"""
with open(json_file_path, "r") as json_data:
dictionary = json.load(json_data, encoding="utf8")
return dictionary | d8ef47ab43c3477212795e795690bad4081e0321 | 11,025 |
def format_bases(bases):
"""
Generate HTML that colours the bases in a string.
Args:
bases: A string containing a genetic sequence.
Returns:
An HTML string.
"""
formatted = ''
for b in bases:
formatted += '<span class="base-{}">{}</span>'.format(b,b)
return formatted | a94f34a53178ceb5dd1640eaa8897a128c7c4d67 | 11,026 |
def get_services(services):
"""
Get all services from the response and make the comma-separated string.
:param services: List of services.
:return: comma-separated list.
"""
return ', '.join(services) | 6c965dff4c85f772b41d10e12170547f156ee86e | 11,028 |
def check_for_default_value_for_missing_params(missing_params, method_params):
"""
:param missing_params: Params missing from Rule
:param method_params: Params defined on method, which could have default value for missing param
[{
'label': 'action_label',
'name': 'action_parameter',
'fieldType': 'numeric',
'defaultValue': 123
},
...
]
:return Params that are missing from rule but have default params: {'action_parameter'}
"""
missing_params_with_default_value = set()
if method_params:
for param in method_params:
if param['name'] in missing_params and param.get('defaultValue', None) is not None:
missing_params_with_default_value.add(param['name'])
return missing_params_with_default_value | bad583976d4c88af93540c9d64f7b0711fd24e12 | 11,029 |
import math
def ph(concentration):
"""Returns the pH from the hydronium ion concentration."""
return -math.log(concentration) | 46e59ed147006ac4cc3aaefea5b10c015a9e18b9 | 11,033 |
def hash_file(upload_context):
"""
Function run by HashFileCommand to calculate a file hash.
:param upload_context: PathData: contains path to a local file to hash
:return HashData: result of hash (alg + value)
"""
path_data = upload_context.params
hash_data = path_data.get_hash()
return hash_data | 3819e9617e5726cf4178a8382eb22b0ec8bd7da0 | 11,034 |
def process_lower(cont):
""" Make the value in lowercase """
return cont.lower() | f863852b0aff952bce080e20360d6fcc571acc21 | 11,046 |
def get_word_counter(word_vocab):
"""
Convert a list of tuple of words and their frequencies in word vocabulary to dictionary.
Key is word and value is its frequency.
Args:
word_vocab: A list of tuple of words and their frequencies.
Returns: A dictionary with word as the key and its frequency in word vocabulary as the value.
"""
return {word: count for word, count in word_vocab} | 8e76652c721d9ca175f79d9bb0acfccfb90da647 | 11,051 |
import torch
def format_metric(val):
"""Format a tensor/number as a float with 4 digits"""
if isinstance(val, torch.Tensor):
val = val.detach().data
return str('{:.4f}'.format(val)) | 337e266bca4ff0433e2c69864b1a493976d12c44 | 11,052 |
import re
def get_csrf_token(res):
"""Extract the CSRF token from a response."""
for header in res.headers:
m = re.search('token=(.+?);', header[1])
if m:
return m.group(1)
raise RuntimeError('Could not find CSRF token in response headers: ' + str(res.headers)) | 2c2670a6909ed87d60b44a5cf1cbaeffdc4fc653 | 11,055 |
def create_to_idx_dict(source):
""" Creates a dictionary of item-specific indices form a tuple of lists. """
idx_dict = dict()
for i in source:
if i not in idx_dict:
idx_dict[i] = len(idx_dict)
return idx_dict | 0c8de2455b4fa78b4c29b75879c87f1ee2a2de40 | 11,059 |
def chomp_empty(seq):
"""Return slice of sequence seq without trailing empty tuples."""
n = len(seq)
while (n > 0) and seq[n - 1] == ():
n -= 1
return seq[:n] | 1c6f5f58bb2e73d44b2638d796b1b0a38bba414c | 11,061 |
def _delta(x, y):
"""Computes |y|/|x|."""
return max(float(len(y))/float(len(x)), 1.0) | 22a55950406daeb3e7653d9a0a232d52a7bd76e4 | 11,064 |
def forward_method_kwargs(**kwargs) -> dict:
"""Return all the keyword-arguments of a method, excluding the 'self' argument"""
retval = {}
for key, value in kwargs.items():
if key == 'self' or key.startswith('_'):
continue
elif key == 'kwargs':
retval.update(value)
else:
retval[key] = value
return retval | 571ad0c61f33e608ce253c16f452e488257cbf31 | 11,067 |
import torch
def pdist(X, Y):
""" Computes all the pairwise distances
Parameters
----------
X : torch.tensor
shape [n, d]
Y : torch.tensor
shape [m, d]
Returns
-------
torch.tensor
shape [n, m] of all pairwise distances
"""
n, m = X.shape[0], Y.shape[0]
X_norm2 = (X ** 2).sum(1)
Y_norm2 = (Y ** 2).sum(1)
X_dot_Y = X @ Y.T
return (
X_norm2.unsqueeze(1) @ torch.ones((1, m), device=X.device)
- 2 * X_dot_Y
+ torch.ones((n, 1), device=Y.device) @ Y_norm2.unsqueeze(0)
) | 10fce38b390211999344bd1b0daf0a4484aee2a8 | 11,073 |
def detab(self, elem="", lab="", v1="", v2="", v3="", v4="", v5="", v6="",
**kwargs):
"""Modifies element table results in the database.
APDL Command: DETAB
Parameters
----------
elem
Element for which results are to be modified. If ALL, modify all
selected elements [ESEL] results. If ELEM = P, graphical picking
is enabled and all remaining command fields are ignored (valid only
in the GUI). A component name may also be substituted for ELEM.
lab
Label identifying results. Valid labels are as defined with the
ETABLE command. Issue ETABLE,STAT to display labels and values.
v1
Value assigned to this element table result in the database. If
zero, a zero value will be assigned. If blank, value remains
unchanged.
v2, v3, v4, . . . , v6
Additional values (if any) assigned to consecutive element table
columns.
Notes
-----
Modifies element table [ETABLE] results in the database. For example,
DETAB,35,ABC,1000,2000,1000 assigns 1000, 2000, and 1000 to the first
three table columns starting with label ABC for element 35. Use the
PRETAB command to list the current results. After deleting a column of
data using ETABLE,Lab,ERASE, the remaining columns of data are not
shifted to compress the empty slot. Therefore, the user must allocate
null (blank) values for V1, V2...V6 for any ETABLE entries which have
been deleted by issuing ETABLE,Lab,ERASE. All data are stored in the
solution coordinate system but will be displayed in the results
coordinate system [RSYS].
"""
command = f"DETAB,{elem},{lab},{v1},{v2},{v3},{v4},{v5},{v6}"
return self.run(command, **kwargs) | 01f13f7e971c2c24e291712a7e634838e2e2ac5a | 11,074 |
def convert(s):
"""
Convert a probability string to a float number.
:param s: probability string.
:return: a float probability.
"""
try:
return float(s)
except ValueError:
num, denom = s.split('/')
return float(num) / float(denom) | 84333f21edfdcb2f3c917f23c23b9d399c5f6e56 | 11,075 |
def get_dwtype_floor_area(dwtype_floorarea_by, dwtype_floorarea_ey, sim_param):
"""Calculates the floor area per dwelling type for every year
Parameters
----------
dwtype_distr_by : dict
Distribution of dwelling types base year
assump_dwtype_distr_ey : dict
Distribution of dwelling types end year
sim_param : list
Simulation parameters
Returns
-------
dwtype_floor_area : dict
Contains the floor area change per dwelling type
Note
-----
- A linear change over time is assumed
Example
-------
out = {year: {'dwtype': 0.3}}
"""
dwtype_floor_area = {}
for curr_yr in sim_param['sim_period']:
nr_sim_yrs = curr_yr - sim_param['base_yr']
if curr_yr == sim_param['base_yr']:
y_distr = dwtype_floorarea_by
else:
y_distr = {}
for dwtype in dwtype_floorarea_by:
val_by = dwtype_floorarea_by[dwtype]
val_ey = dwtype_floorarea_ey[dwtype]
diff_val = val_ey - val_by
# Calculate linear difference up to sim_yr
diff_y = diff_val / sim_param['sim_period_yrs']
y_distr[dwtype] = val_by + (diff_y * nr_sim_yrs)
dwtype_floor_area[curr_yr] = y_distr
return dwtype_floor_area | 3b87386bbf5bf051aec5d43ebd47f3359885c8b4 | 11,079 |
def look_and_say(n):
"""Returns the nth term of the "look and say" sequence, which is defined as follows: beginning
with the term 1, each subsequent term visually describes the digits appearing in the previous term.
The first few terms are as follows:
1
11
21
1211
111221
Ex: the fourth term is 1211, since the third term consists of one 2 and one 1.
"""
assert n, "There is no zeroth term."
if n == 1:
return 1
else:
s = str(look_and_say(n-1))
res = ''
i = 0
while i in range(len(s)):
count = 1
num = s[i]
while i in range(len(s)-1) and s[i] == s[i+1]:
count += 1
i += 1
res += str(count) + num
i += 1
return int(res) | 5cb341d0bce8bc363b2b734e9da66a07b7da2434 | 11,081 |
def get_text_ls(filename):
"""Returns text of file as a list of strings"""
with open(filename, 'r') as f_in:
return f_in.readlines() | 5b788f38e683c82648224b7cc1b5875a0f602dce | 11,083 |
def get_title(soup):
"""Given a soup, pick out a title"""
if soup.title:
return soup.title.string
if soup.h1:
return soup.h1.string
return '' | bcec80e0c6e5163ed90a4975b81a5c5a3d418132 | 11,086 |
import glob
def getAllOfAFile(file_dir, ext):
"""
Returns a list of all the files the direcotry with the specified file extenstion
:param file_dir: Directory to search
:param ext: The file extension (IE: ".py")
"""
return glob.glob(file_dir+"/*"+ext) | 876a4f4b30653bd08454db9ee425d56fe408623d | 11,093 |
def OffsetPosition(in_ra,in_dec,delta_ra,delta_dec):
"""
Offset a position given in decimal degrees.
Parameters
----------
in_ra: float
Initial RA (decimal degrees).
in_dec: float
Initial DEC (demical degrees).
delta_ra: float
Offset in RA (decimal degrees).
delta_dec: float
Offset in DEC (decimal degrees).
Returns
-------
ra: float
Offset RA.
dec: float
Offset DEC.
"""
ra = in_ra
dec = in_dec + delta_dec
if dec > 90.:
dec = 180 - dec
ra = 180 + ra
if dec < -90.:
dec = -180 - dec
ra = 180 + ra
ra = ra + delta_ra
if ra > 360.:
ra = ra - 360.
if ra < 0.:
ra = ra + 360.
return ra,dec | 7b027a2e0bf87dba9d1136e68af258b21223cedb | 11,096 |
def get_database_user(config, credentials):
"""
Returns the database user from the credentials.
"""
return credentials.get('user', 'unknown') | 90e0a25a888e2de73ea7688a4da2e5de2ae02fbb | 11,099 |
def dms2deg(valin):
"""
Converts DMS input to decimal degrees.
Input can be either a string delimeted by : or spaces, or a list of [D,M,S] numbers.
Parameters
----------
valin: float
Input value in DMS. Can be either: \n
- a string delimeted by : or spaces \n
- a list of [D,M,S] numbers (floats or ints) \n
Returns
-------
valout : float
Degrees corresponding to the DMS value
Examples
--------
# e.g., '-78:12:34.56' corresponds to -77.7904 deg \n
obs.dms2deg('-78:12:34.56') #--> -77.79039999999999 \n
obs.dms2deg('-78 12 34.56') #--> -77.79039999999999 \n
obs.dms2deg([-78,12,34.56]) #--> -77.79039999999999
"""
if type(valin)==str:
if ':' in valin: ra=[float(val) for val in valin.split(':')]
else: ra=[float(val) for val in valin.split(' ')]
else: ra=valin
valout=ra[0]+ra[1]/60.+ra[2]/3600.
return valout | 3efac9d11c8a7b5933766a0610f49a884e20925b | 11,101 |
def featureScale(x, xRef=None):
"""Helper function to perform feature scaling.
INPUTS:
x: pandas DataFrame or Series.
xRef: reference pandas DataFrame.
If only x is provided, x will be normalized against itself.
If xRef is additionally supplied, x will be normalized against xRef
OUTPUTS:
xPrime: pandas DataFrame (or Series, depending on type of x). Each column
is scaled to that all values fall in the range [0, 1]
"""
if xRef is None:
xRef = x
xPrime = (x - xRef.min()) / (xRef.max() - xRef.min())
# If an entire column is NaN, zero it out.
if len(xPrime.shape) > 1:
# Pandas DataFrame (multi-dimensional)
NaNSeries = xPrime.isnull().all()
elif len(xPrime.shape) == 1:
# Pandas Series (1-dimensional)
NaNSeries = xPrime.isnull()
else:
raise UserWarning('Something went wrong in featureScale...')
# Loop and zero out.
for index in NaNSeries.index[NaNSeries]:
xPrime[index] = 0
return xPrime | bffa3403e8083efaa8deb24e03b48d0210d39652 | 11,103 |
import getpass
def get_user(prompt=None):
"""
Prompts the user for his login name, defaulting to the USER environment
variable. Returns a string containing the username.
May throw an exception if EOF is given by the user.
:type prompt: str|None
:param prompt: The user prompt or the default one if None.
:rtype: string
:return: A username.
"""
# Read username and password.
try:
env_user = getpass.getuser()
except KeyError:
env_user = ''
if prompt is None:
prompt = "Please enter your user name"
if env_user is None or env_user == '':
user = input('%s: ' % prompt)
else:
user = input('%s [%s]: ' % (prompt, env_user))
if user == '':
user = env_user
return user | fc392cfacc931ee915bb218a80e5db46245f2a1f | 11,108 |
def menu_item_flag(context, flag_type='', flag_iso='', flag_style='', flag_classes='', **kwargs):
"""
Templatetag menu_item_flag
:param context: Getting context
:param flag_type: Default empty, It accepts the string 'square'
:param flag_iso: Default empty, ISO language country code
:param flag_style: Pass inline styles to the img tag
:param flag_classes: Pass classes to use on the img tag
:param kwargs: Classes to HTML tags
:return: A dict with classes
"""
icon_full_path = f'icons/{flag_type}/{flag_iso}.svg'
default = dict(li_class='', a_class='')
classes = dict(default, **kwargs)
return {
'icon_class': flag_type,
'icon_path': icon_full_path,
'icon_iso': flag_iso,
'icon_style': flag_style,
'icon_classes': flag_classes,
'classes': classes,
'redirect_to': context.request.get_full_path
} | 2520a67ea2436743a1b5dec5a7d0321c68f31221 | 11,111 |
def _full_link(provider, word):
"""Return a website link for dictionary provider and word."""
return 'http://' + provider + word | 56681e50523910a0519e29f7446355a20d932284 | 11,113 |
def element_text(member_elt, elt_name):
"""Extract all `para` text from (`elt_name` in) `member_elt`."""
text = []
if elt_name:
elt = member_elt.find(elt_name)
else:
elt = member_elt
if elt:
paras = elt.findAll('para')
for p in paras:
text.append(p.getText(separator=u' ').strip())
return '\n\n'.join(text) | 13ff356e1a584bcaa9c905c93dcafaa787ca936f | 11,114 |
import torch
def to_device(data, device):
"""Move data to device
Arguments:
data {TupleTree, tensor} -- Tensors that should be moved to device.
device {str, torch.device} -- Device data is moved to.
Returns:
TupleTree, tensor -- Data moved to device
"""
if type(data) is not torch.Tensor:
raise RuntimeError(f"Need 'data' to be tensors, not {type(data)}.")
return data.to(device) | 9e0661951e7793a92d7f1953bfb481ccf4ec4ca9 | 11,117 |
def word_list_to_string(word_list, delimeter=" "):
"""Creates a single string from a list of strings
This function can be used to combine words in a list into one long sentence
string.
Args:
word_list (list/tuple): A list (or other container) of strings.
delimeter (str, Optional): A string to delimit the strings in the list
when combining the strings.
Returns:
A string.
"""
string = ""
for word in word_list:
string+=word+delimeter
nchar = len(string)
return str(string[0:nchar-1]) | 040479df7e0d5aadda0b12dc944a53d4b380f044 | 11,122 |
def is_s3(url: str) -> bool:
"""Predicate to determine if a url is an S3 endpoint."""
return url is not None and url.lower().startswith('s3') | f1e36654ae86057fb4ae73a90648095119f1b5af | 11,123 |
def dict2tsv(condDict):
"""Convert a dict into TSV format."""
string = str()
for i in condDict:
string += i + "\t" + "{%f, %f}" % condDict[i] + "\n"
return string | c73f8e3158ade699cc4589d541f05397f559d190 | 11,129 |
import re
def deduce_look_back(in_features, target_features):
"""From the feature names, determine how large of a look back is used.
Args:
in_features (list of str): Names of input features
target_features (list of str): Names of target features.
Returns:
int: Number of look back features.
int: Look back value.
"""
def is_shared(target_feature):
for in_feature in in_features:
if re.match(re.escape(target_feature) + r'\d+$', in_feature):
return True
return False
shared_features = list(filter(is_shared, target_features))
if len(shared_features) == 0:
return 0, None
look_backs = []
for shared_feature in shared_features:
look_backs.append(0)
for in_feature in in_features:
if re.match(re.escape(shared_feature) + r'\d+$', in_feature):
look_backs[-1] += 1
if look_backs.count(look_backs[0]) != len(look_backs):
raise ValueError('Inconsistent look back.')
return len(look_backs), look_backs[0] | bae20baec986c888acfff159b491635e2e75a455 | 11,142 |
def attachment(url: str, filename="") -> dict:
"""
Returns a dictionary using the expected dicitonary format for attachments.
When creating an attachment, ``url`` is required, and ``filename`` is optional.
Airtable will download the file at the given url and keep its own copy of it.
All other attachment object properties will be generated server-side soon afterward.
Note:
Attachment field values muest be **an array of objects**.
Usage:
>>> table = Table(...)
>>> profile_url = "https://myprofile.com/id/profile.jpg
>>> rec = table.create({"Profile Photo": [attachment(profile_url)]})
{
'id': 'recZXOZ5gT9vVGHfL',
'fields': {
'attachment': [
{
'id': 'attu6kbaST3wUuNTA',
'url': 'https://aws1.discourse-cdn.com/airtable/original/2X/4/411e4fac00df06a5e316a0585a831549e11d0705.png',
'filename': '411e4fac00df06a5e316a0585a831549e11d0705.png'
}
]
},
'createdTime': '2021-08-21T22:28:36.000Z'
}
"""
return {"url": url} if not filename else {"url": url, "filename": filename} | 24564ca3e7dfb8cc35242b1d16fb7351fc9576ce | 11,148 |
def NIST_SU(results):
"""Number of segmentation errors (missed segments and
false alarm segments)
over number of reference segments.
"""
assert len(results) == 3
TPs = results[0]
FPs = results[1]
FNs = results[2]
if (FNs + FPs) == 0:
return 0.0
return ((FNs + FPs)/(TPs + FNs)) * 100 | 3c60a612223dc247109d24be45b32739af8587ef | 11,150 |
def parse_default_kv(default, default_dict):
"""parse a string in form key1=value1;key2=value2,... as used for some template fields
Args:
default: str, in form 'photo=foto;video=vidéo'
default_dict: dict, in form {"photo": "fotos", "video": "vidéos"} with default values
Returns:
dict in form {"photo": "fotos", "video": "vidéos"}
"""
default_dict_ = default_dict.copy()
if default:
defaults = default[0].split(";")
for kv in defaults:
try:
k, v = kv.split("=")
k = k.strip()
v = v.strip()
default_dict_[k] = v
except ValueError:
pass
return default_dict_ | 4d461589118915cde5461b6b8ea7cd5e5e4d5165 | 11,157 |
def is_on(S, j):
"""
Returns 1 if and only if the `j`-th item of the set `S` is on.
Examples
========
Check if the 3-th and then 2-nd item of the set is on:
>>> S = 0b101010
>>> is_on(S, 3), is_on(S, 2)
(1, 0)
"""
return (S & (1 << j)) >> j | 76034f083372ee2cbe0c711e3a09daf26634550a | 11,165 |
def _get_pil_image_dimensions(pil_image):
"""Gets the dimensions of the Pillow Image.
Args:
pil_image: Image. A file in the Pillow Image format.
Returns:
tuple(int, int). Returns height and width of the image.
"""
width, height = pil_image.size
return height, width | d191ae705df97b1729be2dd03e9a5ff4ddcb4518 | 11,167 |
def wrap_list(item):
"""
Returns an object as a list.
If the object is a list, it is returned directly. If it is a tuple or set, it
is returned as a list. If it is another object, it is wrapped in a list and
returned.
"""
if item is None:
return []
elif isinstance(item, list):
return item
elif isinstance(item, (tuple, set)):
return list(item)
else:
return [item] | 6b2af543af39058f7df28e7d89dbb9231cf2b247 | 11,169 |
from typing import Union
import zlib
def zlib_compress(data: Union[bytes, str]) -> bytes:
"""
Compress things in a py2/3 safe fashion
>>> json_str = '{"test": 1}'
>>> blob = zlib_compress(json_str)
"""
if isinstance(data, str):
return zlib.compress(bytes(data, "utf-8"))
return zlib.compress(data) | 0e7eaf018873ce335b06c4ca4857f9bf8b58864b | 11,170 |
def get_manual_iface(manual_iface, node_class):
""" Returns standardized interface dict based on manual_iface """
iface_dict = {
"node_id": "manual",
"node_uri": None,
"node_name": None,
"node_addr": None,
"node_fqdn": None,
"node_class": node_class,
"iface_id": "manual",
"iface_uri": None,
"iface_name": None,
"iface_addr": None,
"iface_speed": None,
}
iface_dict.update(manual_iface)
return iface_dict | 070207a1ba399b660147f0d3cb95419347e0344e | 11,175 |
import string
import re
def tokenize_count(s: str) -> int:
"""
Tokenizes the given strings to count the number of words.
:param s:
:return: number of words
"""
s = s.translate(str.maketrans('', '', string.punctuation + "„“–"))
return len(re.split(r'\W+', s)) | c68822f313a2ffcab11edf0c0ce146d758cb8e3f | 11,177 |
def set_idle_override(isUserActive: bool, isScreenUnlocked: bool) -> dict:
"""Overrides the Idle state.
Parameters
----------
isUserActive: bool
Mock isUserActive
isScreenUnlocked: bool
Mock isScreenUnlocked
**Experimental**
"""
return {
"method": "Emulation.setIdleOverride",
"params": {"isUserActive": isUserActive, "isScreenUnlocked": isScreenUnlocked},
} | 21b51d27edef13f66818d8d72583745e6c3449e9 | 11,178 |
def is_explicitly_rooted(path):
"""Return whether a relative path is explicitly rooted relative to the
cwd, rather than starting off immediately with a file or folder name.
It's nice to have paths start with "./" (or "../", "../../", etc.) so, if a
user is that explicit, we still find the path in the suffix tree.
"""
return path.startswith(('../', './')) or path in ('..', '.') | bde26849889ac5c951160e441cdd0c3c60871ab1 | 11,181 |
from typing import List
def snip_out(file_str:str, start_key:str)->str:
"""From an anvil.yaml file, snips out only the string you want: the database description."""
good_string:List[str]=[]
save_this_one = False
for line in file_str.split('\n'):
if line.startswith(start_key):
good_string.append(line)
save_this_one=True
elif save_this_one is False:
continue
elif line[0]==' ' or line[0]=='\t':
good_string.append(line)
else:
save_this_one=False
return '\n'.join(good_string) | 8e9ebde180fb5ff6faefcbd92629c75f260ce518 | 11,182 |
def next_nuc(seq, pos, n):
""" Returns the nucleotide that is n places from pos in seq. Skips gap symbols.
"""
i = pos + 1
while i < len(seq):
if seq[i] != '-':
n -= 1
if n == 0: break
i += 1
if i < len(seq) :
return seq[i]
else :
return 'N' | dbe3d204d3399167630cf83c74b0f1742d1c8367 | 11,186 |
def load_h5(h5f):
"""
Load fiberbundles configurations from a hdf5 class
Parameters
----------
h5f : hdf5 class
h5-file or group object
Returns
-------
res : list(list(fiber)), fibers are (n,4)-arrays with (x,y,z,radii) for each fiber point
"""
fiber_bundles = []
fb_list = list(map(int, list(h5f.keys())))
fb_list.sort()
for fb in fb_list:
fiber_bundles.append([])
f_list = list(map(int, list(h5f[str(fb)].keys())))
f_list.sort()
for f in f_list:
fiber_bundles[-1].append(h5f[str(fb)][str(f)][:].astype(float))
return fiber_bundles | 47487b43ae375c5ade27c82ec083570ee9655e27 | 11,195 |
from typing import List
def load_file(file_path: str) -> List[str]:
"""
Just loader for file with lines
:param file_path: path to file
:return: list of lines of your data
"""
data: List[str] = list()
with open(file_path) as file_object:
for line in file_object:
data.append(line.strip())
return data | 9aadf5c0a90f5c65868862e4366276c448077944 | 11,198 |
def pcmd(progressive, lr, fb, vv, va):
"""
Makes the drone move (translate/rotate).
Parameters:
progressive -- True: enable progressive commands, False: disable (i.e.
enable hovering mode)
lr -- left-right tilt: float [-1..1] negative: left, positive: right
rb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right
The above float values are a percentage of the maximum speed.
"""
assert type(progressive) == bool
assert all(map(lambda x: abs(x) <= 1, (lr, fb, vv, va)))
return progressive, float(lr), float(fb), float(vv), float(va) | 4534ac48f00a39c944b1be01ea0818235aea2559 | 11,204 |
def b2h(n):
"""Convert bytes int into human friendly string format.
>>> b2h(10000)
'9.8 KB'
>>> b2h(100001221)
'95.4 MB'
"""
t = "{0:.2f} {1}".format
symbols = ("KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
prefix = {s: 1 << (i + 1) * 10 for i, s in enumerate(symbols)}
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return t(value, s)
return t(n, "B") | 6337fa1d1c7a2e324bcbe99eac28225551f84ef5 | 11,205 |
from typing import List
from typing import Tuple
def plotlines(
optical_start: List[float], ratio_wanted: List[float]
) -> Tuple[List[float], List[float], List[float]]:
"""Draws the 'Distance Lines' for the main plot.
Takes inputs of desired optical and radio limits of where the distance
lines should be drawn on the plot.
Args:
optical_start: The optical flux values where the lines should start
from.
radio_wanted: The desired radio fluxes where the lines should end.
Return:
The optical points to plot along with the radio range to plot.
Also returns the radio wanted (I can't remember why!).
"""
radio_range = []
radio_range.reverse()
optical_points = []
OFLUX = optical_start
optical_points.append(OFLUX)
this_radio = optical_points[0] * ratio_wanted
radio_range.append(this_radio)
while this_radio < 99999.:
this_radio *= 10.
NEWOFLUX = this_radio / ratio_wanted
optical_points.append(NEWOFLUX)
radio_range.append(this_radio)
return optical_points, radio_range, ratio_wanted | 5c2b83b881c8b8101a5177709096c79dfce8c16c | 11,214 |
def get_unique_fields(fld_lists):
"""Get unique namedtuple fields, despite potential duplicates in lists of fields."""
flds = []
fld_set = set([f for flst in fld_lists for f in flst])
fld_seen = set()
# Add unique fields to list of fields in order that they appear
for fld_list in fld_lists:
for fld in fld_list:
# Add fields if the field has not yet been seen
if fld not in fld_seen:
flds.append(fld)
fld_seen.add(fld)
assert len(flds) == len(fld_set)
return flds | 0e131c5b3fe695670fafb51810c674e859c29b63 | 11,217 |
def cmp_func_different_hash(request):
"""Return a comparison function that checks whether two hashes are different."""
return request.param | 5e917de2db60c03d17fc5536e5af48e0328423fc | 11,218 |
import torch
def picp(target, predictions:list, total = True):
"""
Calculate PICP (prediction interval coverage probability) or simply the % of true
values in the predicted intervals
Parameters
----------
target : torch.Tensor
true values of the target variable
predictions : list
- predictions[0] = y_pred_upper, predicted upper limit of the target variable (torch.Tensor)
- predictions[1] = y_pred_lower, predicted lower limit of the target variable (torch.Tensor)
total : bool, default = True
- When total is set to True, return overall PICP
- When total is set to False, return PICP along the horizon
Returns
-------
torch.Tensor
The PICP, which depending on the value of 'total' is either a scalar (PICP in %, for
significance level alpha = 0.05, PICP should >= 95%)
or 1d-array over the horizon, in which case it is expected to decrease as we move
along the horizon. Generally, higher is better.
"""
# coverage_horizon = torch.zeros(targets.shape[1], device= targets.device,requires_grad=True)
# for i in range(targets.shape[1]):
# # for each step in forecast horizon, calcualte the % of true values in the predicted interval
# coverage_horizon[i] = (torch.sum((targets[:, i] > y_pred_lower[:, i]) &
# (targets[:, i] <= y_pred_upper[:, i])) / targets.shape[0]) * 100
assert len(predictions) == 2
#torch.set_printoptions(precision=5)
y_pred_upper = predictions[0]
y_pred_lower = predictions[1]
coverage_horizon = 100. * (torch.sum((target > y_pred_lower) &
(target <= y_pred_upper), dim=0)) / target.shape[0]
coverage_total = torch.sum(coverage_horizon) / target.shape[1]
if total:
return coverage_total
else:
return coverage_horizon | ef075b4cf3904a5f854ab4db3ecfbc6ba66ad674 | 11,220 |
import re
def time_to_frame(line: str, fps=24000 / 1001) -> int:
"""
Converts a timestamp in the format <hours>:<minutes>:<seconds>.<milliseconds> into the corresponding frame number.
<hours> and <milliseconds> are optional,
and milliseconds can have arbitrary precision (which means they are no longer milliseconds :thonking: ).
A parameter for the framerate can be passed if required.
Valid example inputs: '1:23.456', '01:10:00', '0:00:00.000', '24:30.2'
"""
timestamp = re.match(r'(\d{1,2}:)?\d{1,2}:\d{1,2}(\.\d{1,3})', line)
if not timestamp:
return -1
times = timestamp.group(0).split(':')
if '.' in times[-1]: # milliseconds are specified
times[-1], ms = times[-1].split('.')
frame = fps * (int(ms) / 10**len(ms))
else:
frame = 0
for t in reversed(times):
frame += (fps * int(t))
fps *= 60
return round(frame) | 0942ce5526592eeb8c6a858631382530ad2dccbd | 11,224 |
def file_order(entry):
"""
For a PlaylistEntry, return its original order in the Playlist.
"""
return entry['lineno'] | 8bbd6546e120cec018c0f7628fd1473ae5926dad | 11,229 |
import json
def create_diff_json(image_diff, file_output_name):
"""diff image file and save as file
args:
image_diff (object)
file_output_name (str)
returns:
saved_file (str)
"""
diff_content = {}
for attr in image_diff.attributes:
diff_content[attr] = {}
diff_content[attr]["before"] = getattr(image_diff, attr)[0]
diff_content[attr]["after"] = getattr(image_diff, attr)[1]
if diff_content[attr]["before"] != diff_content[attr]["after"]:
diff_content[attr]["diff"] = True
diff_content["pixel"] = image_diff.pixel_diff
with open(file_output_name + ".diff.json", "w") as diff_file:
json.dump(diff_content, diff_file, indent=4)
return file_output_name + ".diff.json" | 0e5b506b7acbc15ca26f21640ee2748598858008 | 11,230 |
def format_class_name(spider_name):
"""Format the spider name to A class name."""
return spider_name.capitalize() + 'Spider' | a477c02873347e8df975dade64b8c316ab8dfe67 | 11,231 |
import torch
def complex_abs(tensor):
"""Compute absolute value of complex image tensor
Parameters
----------
tensor : torch.Tensor
Tensor of shape (batch, 2, height, width)
Returns
-------
Tensor with magnitude image of shape (batch, 1, height, width)
"""
tensor = (tensor[:, 0] ** 2 + tensor[:, 1] ** 2) ** 0.5
return torch.unsqueeze(tensor, dim=1) | dd206d9fb58b819d5ed1c58af8e9fc559430ac3a | 11,234 |
def test_pointer_indexing(pointer_value, type_p):
"""
>>> a = np.array([1, 2, 3, 4], dtype=np.float32)
>>> test_pointer_indexing(a.ctypes.data, float32.pointer())
(1.0, 2.0, 3.0, 4.0)
>>> a = np.array([1, 2, 3, 4], dtype=np.int64)
>>> test_pointer_indexing(a.ctypes.data, int64.pointer())
(1L, 2L, 3L, 4L)
"""
p = type_p(pointer_value)
return p[0], p[1], p[2], p[3] | 21f87a5ec840e3fd789c9e1ca9382d96f451e1e5 | 11,239 |
def find_by_id(object_id, items):
""" Find an object given its ID from a list of items """
for item in items:
if object_id == item["id"]:
return item
raise Exception(f"Item with {object_id} not found") | 822bf82ea68bd94d0bb1ed2dd5db754aee9b0cba | 11,240 |
import requests
import json
def create_snippet(data, baseurl, timeout, raw):
"""
Creates snippet with the given data on the haste server specified by the
baseurl and returns URL of the created snippet.
"""
try:
url = baseurl + "/documents"
response = requests.post(url, data.encode('utf-8'), timeout=float(timeout))
except requests.exceptions.Timeout:
exit("Error: connection timed out")
dockey = json.loads(response.text)['key']
return baseurl + ("/raw/" if raw else "/") + dockey | eb34861909b61749ef2f29da16450a82dcc8d83e | 11,241 |
def wh_to_kwh(wh):
"""
Convert watt hours to kilowatt hours and round to two decimal places
:param wh: integer or decimal value
:return: two decimal spot kilowatt hours
"""
kw = float("{0:.2f}".format(wh / 1000.00))
return kw | 19488960a9c7a4d2fc748f4d897d082cfdaee2b8 | 11,244 |
def widthHeightDividedBy(image, value):
"""Divides width and height of an image by a given value."""
w, h = image.shape[:2]
return int(w/value), int(h/value) | 78bbe60c43a1bbf362c98125bfddc080cf568861 | 11,249 |
def analytical_pulse_energy(q, ekev):
"""
Estimate of analytical_pulse_energy from electron bunch charge and radiation energy
:param q: electron bunch charge [nC]
:param ekev: radiation energy [keV]
:return P: pulse energy [J]
"""
P = 19*q/ekev
return P/1e3 | d81ddafcc41e0e8619922dce0583bf19579112bc | 11,250 |
def get_file_date(tree):
"""
Get publication date from dta file xml tree.
:param tree: the xml tree
:return: int, the publication date
"""
date = tree.find("{http://www.dspin.de/data/metadata}MetaData/{http://www.dspin.de/data/metadata}source/{http://www.clarin.eu/cmd/}CMD/{http://www.clarin.eu/cmd/}Components/{http://www.clarin.eu/cmd/}teiHeader/{http://www.clarin.eu/cmd/}fileDesc/{http://www.clarin.eu/cmd/}sourceDesc/{http://www.clarin.eu/cmd/}biblFull/{http://www.clarin.eu/cmd/}publicationStmt/{http://www.clarin.eu/cmd/}date").text
return date | 64b0e43b9926d94f1cec066af5bf58ecc10d5044 | 11,251 |
def even_or_odd(n):
"""Return a string odd or even for odd or even values of n."""
if n % 2 == 0:
return 'Even'
else:
return 'Odd' | 956e071eeb4be5b9ec2851fc1b566ff1e3e2ef98 | 11,256 |
def hex_to_int(input: str) -> int:
"""Given a hex string representing bytes, returns an int."""
return int(input, 16) | 844714eaec1a2b2804cb4725e458980555516ce2 | 11,262 |
import string
def filter_filename(filename):
"""Utility to filter a string into a valid filename"""
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
filename = ''.join(c for c in filename if c in valid_chars)
return filename | 8ab97e9a3d9b806090a4b55f6a47d2f011f99ede | 11,266 |
def dot_product(A, B):
"""
Computes the dot product of vectors A and B.
@type A: vector
@type B: vector
@rtype: number
@return: dot product of A and B
"""
if len(A) != len(B):
raise ValueError("Length of operands do not match")
result = 0.0
for i, v in enumerate(A):
result += A[i] * B[i]
return result | f63528bda5d3890137a35de5fd639086262b5c93 | 11,268 |
def has_sources(target, extension=None):
"""Returns True if the target has sources.
If an extension is supplied the target is further checked for at least 1 source with the given
extension.
"""
return (target.has_label('sources')
and (not extension
or (hasattr(target, 'sources')
and any(source.endswith(extension) for source in target.sources)))) | 71853d034f6fe8283f2178daf3648ac75457ec2e | 11,269 |
def fibo(n):
"""Returns nth fibonacci number."""
a, b = 0, 1
for i in range(1, n):
a, b = b, a+b
return b | b9a8d3960cc01f1745151eda250fea97c0e62b14 | 11,270 |
def or_(*args):
"""Compute the logic OR between expressions.
"""
return any(*args) | 17b775029490d5ca4ea1574186bd02a77a92782a | 11,275 |
def decimalHour(time_string):
""" Converts time from the 24hrs hh:mm:ss format to HH.hhh format. """
hh, mm, ss = time_string.split(':')
hh, mm, ss = map(float, (hh, mm, ss))
result = ((ss/60) + mm)/60 + hh
return result | b931ce0bd1c57c0d2b47a5d51331685e6c478b61 | 11,276 |
def getFileName(fileRequest):
"""returns the file name from a file Request"""
nameByteLength = int(((fileRequest[3] << 8 ) | (fileRequest[4])))
return fileRequest[5:nameByteLength * 2] | d6f500b4101677ab2655650f33f0b3f206c55e9c | 11,277 |
def calc_air_density(temperature, pressure, elevation_ref=None, elevation_site=None, lapse_rate=-0.113,
specific_gas_constant=286.9):
"""
Calculates air density for a given temperature and pressure and extrapolates that to the site if both reference
and site elevations are given.
:param temperature: Temperature values in degree Celsius
:type temperature: float or pandas.Series or pandas.DataFrame
:param pressure: Pressure values in hectopascal, hPa, (1,013.25 hPa = 101,325 Pa = 101.325 kPa =
1 atm = 1013.25 mbar)
:type pressure: float or pandas.Series or pandas.DataFrame
:param elevation_ref: Elevation, in meters, of the reference temperature and pressure location.
:type elevation_ref: Floating point value (decimal number)
:param elevation_site: Elevation, in meters, of the site location to calculate air density for.
:type elevation_site: Floating point values (decimal number)
:param lapse_rate: Air density lapse rate kg/m^3/km, default is -0.113
:type lapse_rate: Floating point value (decimal number)
:param specific_gas_constant: Specific gas constant, R, for humid air J/(kg.K), default is 286.9
:type specific_gas_constant: Floating point value (decimal number)
:return: Air density in kg/m^3
:rtype: float or pandas.Series depending on the input
**Example usage**
::
import brightwind as bw
#For a series of air densities
data = bw.load_campbell_scientific(bw.demo_datasets.demo_campbell_scientific_site_data)
air_density = bw.calc_air_density(data.T2m, data.P2m)
#For a single value
bw.calc_air_density(15, 1013)
#For a single value with ref and site elevation
bw.calc_air_density(15, 1013, elevation_ref=0, elevation_site=200)
"""
temp = temperature
temp_kelvin = temp + 273.15 # to convert deg C to Kelvin.
pressure = pressure * 100 # to convert hPa to Pa
ref_air_density = pressure / (specific_gas_constant * temp_kelvin)
if elevation_ref is not None and elevation_site is not None:
site_air_density = round(ref_air_density + (((elevation_site - elevation_ref) / 1000) * lapse_rate), 3)
return site_air_density
elif elevation_site is None and elevation_ref is not None:
raise TypeError('elevation_site should be a number')
elif elevation_site is not None and elevation_ref is None:
raise TypeError('elevation_ref should be a number')
else:
return ref_air_density | 964bff72d67354abeff9a355788d3624d7ec230c | 11,282 |
import pathlib
def module_to_path(module: str, suffix=".py") -> pathlib.Path:
"""convert module a.b.c to path(a/b/c)"""
return pathlib.Path(*module.split(".")).with_suffix(suffix) | 682fd05379e81a5d8d24f5d0f4ab8134cbbce0e7 | 11,288 |
def byte(value):
"""Converts a char or int to its byte representation."""
if isinstance(value, str) and len(value) == 1:
return ord(value)
elif isinstance(value, int):
if value > 127:
return byte(value - 256)
if value < -128:
return byte(256 + value)
return value | 6ef59f49a5d0d49ee7222387a8615567ff9d6267 | 11,289 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.