content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def load_coco_name(path):
"""Load labels from coco.name
"""
coco = {}
with open(path, 'rt') as file:
for index, label in enumerate(file):
coco[index] = label.strip()
return coco | 2da456b7c2879ec5725172280dacbcaaacd86bfc | 704,690 |
import base64
import gzip
import json
def decompress_metadata_string_to_dict(input_string): # pylint: disable=invalid-name
"""
Convert compact string format (dumped, gzipped, base64 encoded) from
IonQ API metadata back into a dict relevant to building the results object
on a returned job.
Parameters:
input_string (str): compressed string format of metadata dict
Returns:
dict: decompressed metadata dict
"""
if input_string is None:
return None
encoded = input_string.encode()
decoded = base64.b64decode(encoded)
decompressed = gzip.decompress(decoded)
return json.loads(decompressed) | c521da786d2a9f617c560916cc5f058b20cb3e21 | 704,691 |
import struct
import socket
def inet_atoni(ip):
"""Like inet_aton() but returns an integer."""
return struct.unpack('>I', socket.inet_aton(ip))[0] | 3bd18b7aecf9a5a45033c7873163ee1387cb8a13 | 704,692 |
import re
def rep_unicode_in_code(code):
""" Replace unicode to str in the code
like '\u003D' to '='
:param code: type str
:return: type str
"""
pattern = re.compile('(\\\\u[0-9a-zA-Z]{4})')
m = pattern.findall(code)
for item in set(m):
code = code.replace(item, chr(int(item[2:], 16))) # item[2:]去掉\u
return code | 70e28ea741f0347190628876b59e27a56a5c0ccf | 704,693 |
import ast
def is_py3(file_path):
"""Check if code is Python3 compatible."""
# https://stackoverflow.com/a/40886697
code_data = open(file_path, "rb").read()
try:
ast.parse(code_data)
except SyntaxError:
return False
return True | 78a48bdcc682108ce4fbe6fffe4a235898beec1c | 704,699 |
def part_1(input_data: list[int]) -> int:
"""Count the number of times a depth measurement increases from the previous measurement.
Args:
input_data (str): depths
Returns:
int: number of depth increases
"""
inc_count = 0
for i, depth in enumerate(input_data):
if i != 0 and input_data[i] > input_data[i - 1]:
inc_count += 1
return inc_count | 3ee506aca019f9393c93ced75e430d53b31a9fc2 | 704,702 |
def single_varint(data, index=0):
"""
The single_varint function processes a Varint and returns the
length of that Varint.
:param data: The data containing the Varint (maximum of 9
bytes in length as that is the maximum size of a Varint).
:param index: The current index within the data.
:return: varint, the processed varint value,
and index which is used to identify how long the Varint was.
"""
# If the decimal value is => 128 -- then first bit is set and
# need to process next byte.
if ord(data[index:index+1]) >= 128:
# Check if there is a three or more byte varint
if ord(data[index + 1: index + 2]) >= 128:
raise ValueError
varint = (ord(data[index:index+1]) - 128) * 128 + ord(
data[index + 1: index + 2])
index += 2
return varint, index
# If the decimal value is < 128 -- then first bit is not set
# and is the only byte of the Varint.
else:
varint = ord(data[index:index+1])
index += 1
return varint, index | 55b052300cc0cf5ac2fd8f7451ac121b408c1313 | 704,703 |
from typing import Tuple
def _color_int_to_rgb(integer: int) -> Tuple[int, int, int]:
"""Convert an 24 bit integer into a RGB color tuple with the value range (0-255).
Parameters
----------
integer : int
The value that should be converted
Returns
-------
Tuple[int, int, int]:
The resulting RGB tuple.
"""
return ((integer >> 16) & 255, (integer >> 8) & 255, integer & 255) | df3eb5ad92d9383b0e6fe5c1603e0caec0df5c45 | 704,705 |
def label2binary(y, label):
"""
Map label val to +1 and the other labels to -1.
Paramters:
----------
y : `numpy.ndarray`
(nData,) The labels of two classes.
val : `int`
The label to map to +1.
Returns:
--------
y : `numpy.ndarray`
(nData,) Maps the val label to +1 and the other label to -1.
"""
return (2*(y == label).astype(int))-1 | 5bce8491e9eef3a8c36b784ee0e252c641b24fdf | 704,706 |
def extract_power(eeg, D=3, dt=0.2, start=0):
""" extract power vaules for image
Parameters
----------
seizure : EEG | dict
eeg data
D : int, optional
epoch duration, by default 3
dt : float, optional
time step (seconds), by default 0.2
start : int, optional
time to start, by default 0
Returns
-------
baseline_ex_power : ndarray
baseline power
seizure_ex_power : ndarray
seizure power
"""
assert int(D/dt)*dt == D
num_steps = int(D/dt)
seiz = eeg['seizure']['eeg']
sfreq = seiz.info['sfreq']
onset = seiz.annotations.onset[0] - (seiz.first_samp/sfreq) + start
first = int(onset/dt)
baseline_ex_power = eeg['baseline']['ave_power'][:, :num_steps]
seizure_ex_power = eeg['seizure']['ave_power'][:, first:first+num_steps]
return baseline_ex_power, seizure_ex_power | 04c3fed38fa2a2d46ba7edee4bb3f04011d9d2a7 | 704,708 |
def calc_fm_perp_for_fm_loc(k_loc_i, fm_loc):
"""Calculate perpendicular component of fm to scattering vector."""
k_1, k_2, k_3 = k_loc_i[0], k_loc_i[1], k_loc_i[2]
mag_1, mag_2, mag_3 = fm_loc[0], fm_loc[1], fm_loc[2]
mag_p_1 = (k_3*mag_1 - k_1*mag_3)*k_3 - (k_1*mag_2 - k_2*mag_1)*k_2
mag_p_2 = (k_1*mag_2 - k_2*mag_1)*k_1 - (k_2*mag_3 - k_3*mag_2)*k_3
mag_p_3 = (k_2*mag_3 - k_3*mag_2)*k_2 - (k_3*mag_1 - k_1*mag_3)*k_1
return mag_p_1, mag_p_2, mag_p_3 | 00ba68c74d781748f39d2a577f227316dc523f0f | 704,710 |
from typing import Optional
def injection_file_name(
science_case: str, num_injs_per_redshift_bin: int, task_id: Optional[int] = None
) -> str:
"""Returns the file name for the raw injection data without path.
Args:
science_case: Science case.
num_injs_per_redshift_bin: Number of injections per redshift major bin.
task_id: Task ID.
"""
file_name = f"injections_SCI-CASE_{science_case}_INJS-PER-ZBIN_{num_injs_per_redshift_bin}.npy"
if task_id is not None:
file_name = file_name.replace(".npy", f"_TASK_{task_id}.npy")
return file_name | 57b034b6a60c317f0c071c1313d0d99f2802db30 | 704,717 |
def sd_title(bs4_object, target=None):
"""
:param bs4_object: An object of class BeautifulSoup
:param target: Target HTML tag. Defaults to class:title-text, a dict.
:return: Returns paper title from Science Direct
"""
if target is None:
target = {"class": "title-text"}
return bs4_object.find_all("span", target)[0].text | 8429fe680fafb86c773a0cd2b3280e893b95fc9a | 704,720 |
def split_formula(formula, net_names_list):
"""
Splits the formula into two parts - the structured and unstructured part.
Parameters
----------
formula : string
The formula to be split, e.g. '~ 1 + bs(x1, df=9) + dm1(x2, df=9)'.
net_names_list : list of strings
A list of all network names defined by the user.
Returns
-------
structured_part : string
A string holding only the structured part of the original formula.
unstructured_terms: list of strings
A list holding all the unstructured parts of the original formula.
"""
structured_terms = []
unstructured_terms = []
# remove spaces the tilde and split into formula terms
formula = formula.replace(' ','')
formula = formula.replace('~','')
formula_parts = formula.split('+')
# for each formula term
for part in formula_parts:
term = part.split('(')[0]
# if it an unstructured part
if term in net_names_list:
# append it to a list
unstructured_terms.append(part)
else:
structured_terms.append(part)
# join the structured terms together again
structured_term = '+'.join(structured_terms)
return structured_term, unstructured_terms | 1fce8617cbdaf767c1aebb6d0d685ca63975c820 | 704,721 |
import requests
def analyze_comments_page(username, repo, per_page, page, print_comments, print_stage_results):
"""
Analyzes one page of GitHub comments. Helping function.
Parameters
----------
username : str
The GitHub alias of the repository owner
repo : str
The GitHub repository name
per_page : int
The number of comments on the page (from 0 to 100)
page : int
The page number of the results to fetch
print_comments : bool
If True, each fetched comment and its analysis will be printed
print_stage_results : bool
If True, final statistics of the analyzed comments will be printend in the end
Returns
-------
total : int
The number of comments fetched (if number of comments on the page is less than per_page parameter all the available comments will be processed and their number will be returned. Else, equal to per_page)
pos : int
The number of positive comments fetched
neg : int
The number of negative comments fetched
neut : int
The number of neutral comments fetched
"""
total = 0
pos = 0
neg = 0
neut = 0
print("Processing page #"+str(page)+"...\n")
query = {'per_page': per_page, 'page': page}
resp = requests.get("https://api.github.com/repos/" +
username+"/"+repo+"/issues/comments", params=query)
comments = resp.json()
for comment in comments:
total = total+1
if print_comments:
print(str(total) + '. ' + comment.get("body"))
query = {'text': comment.get("body")}
response = requests.post(
"http://text-processing.com/api/sentiment/", data=query)
if print_comments:
print(response.json())
print('\n')
sentiment = response.json().get("label")
if sentiment == 'pos':
pos = pos+1
elif sentiment == 'neg':
neg = neg+1
else:
neut = neut+1
if print_stage_results:
print('Processed: '+str(total))
print('Positive comments: '+str(pos))
print('Negative comments: '+str(neg))
print('Neutral comments: '+str(neut))
return total, pos, neg, neut | e3d153a0319db0bc723df65cb8a92533f9b37b82 | 704,725 |
def get_remotes(y, x):
"""
For a given pair of ``y`` (tech) and ``x`` (location), return
``(y_remote, x_remote)``, a tuple giving the corresponding indices
of the remote location a transmission technology is connected to.
Example: for ``(y, x) = ('hvdc:region_2', 'region_1')``,
returns ``('hvdc:region_1', 'region_2')``
"""
y_remote = y.split(':')[0] + ':' + x
x_remote = y.split(':')[1]
return (y_remote, x_remote) | 3c479d818947362349982c77a9bbd87a97a3d4d5 | 704,726 |
from typing import List
def ingrid(x: float, y: float, subgrid: List[int]) -> bool:
"""Check if position (x, y) is in a subgrid"""
i0, i1, j0, j1 = subgrid
return (i0 <= x) & (x <= i1 - 1) & (j0 <= y) & (y <= j1 - 1) | d296d8a7abe5eeb3da8d57691755a2bd19dd15b6 | 704,727 |
from typing import Union
from pathlib import Path
from typing import Any
import json
def load_jsonl(path: Union[Path, str]) -> list[dict[str, Any]]:
""" Load from jsonl.
Args:
path: path to the jsonl file
"""
path = Path(path)
return [json.loads(line) for line in path.read_text().splitlines()] | a59d2920bfa491b1d4daa693b5e2e1b4846d6fc6 | 704,728 |
def row2string(row, sep=', '):
"""Converts a one-dimensional numpy.ndarray, list or tuple to string
Args:
row: one-dimensional list, tuple, numpy.ndarray or similar
sep: string separator between elements
Returns:
string representation of a row
"""
return sep.join("{0}".format(item) for item in row) | f81a2ec54b8c37285715cadca4458918962440b9 | 704,734 |
def build_aggregation(facet_name, facet_options, min_doc_count=0):
"""Specify an elasticsearch aggregation from schema facet configuration.
"""
exclude = []
if facet_name == 'type':
field = 'embedded.@type'
exclude = ['Item']
elif facet_name.startswith('audit'):
field = facet_name
else:
field = 'embedded.' + facet_name
agg_name = facet_name.replace('.', '-')
facet_type = facet_options.get('type', 'terms')
facet_length = 200
if facet_options.get('length') == 'long':
facet_length = 3000
if facet_type in ['terms', 'typeahead']:
agg = {
'terms': {
'field': field,
'min_doc_count': min_doc_count,
'size': facet_length,
},
}
if exclude:
agg['terms']['exclude'] = exclude
elif facet_type == 'exists':
agg = {
'filters': {
'filters': {
'yes': {
'bool': {
'must': {
'exists': {'field': field}
}
}
},
'no': {
'bool': {
'must_not': {
'exists': {'field': field}
}
}
},
},
},
}
else:
raise ValueError('Unrecognized facet type {} for {} facet'.format(
facet_type, field))
return agg_name, agg | b8c3f337143a229401b9a41a8fde8903027cf67e | 704,735 |
def inline(text):
"""
Convert all newline characters to HTML entities:
This can be used to prevent Hypertag from indenting lines of `text` when rendering parent nodes,
and to safely insert `text` inside <pre>, <textarea>, or similar elements.
"""
return text.replace('\n', ' ') | 658f7e5adbf5747ea069fad8a9599e9bd499a381 | 704,737 |
def get_bq_col_type(col_type):
"""
Return correct SQL column type representation.
:param col_type: The type of column as defined in json schema files.
:return: A SQL column type compatible with BigQuery
"""
lower_col_type = col_type.lower()
if lower_col_type == 'integer':
return 'INT64'
if lower_col_type == 'string':
return 'STRING'
if lower_col_type == 'float':
return 'FLOAT64'
if lower_col_type == 'numeric':
return 'DECIMAL'
if lower_col_type == 'time':
return 'TIME'
if lower_col_type == 'timestamp':
return 'TIMESTAMP'
if lower_col_type == 'date':
return 'DATE'
if lower_col_type == 'datetime':
return 'DATETIME'
if lower_col_type == 'bool':
return 'BOOL'
return 'UNSET' | 86cac08a04d804cc6addbeee86014f1aa6d35735 | 704,738 |
import unicodedata
def remove_accents(string):
"""
Removes unicode accents from a string, downgrading to the base character
"""
nfkd = unicodedata.normalize('NFKD', string)
return u"".join([c for c in nfkd if not unicodedata.combining(c)]) | 41c8e05aa8982c85cf5cf2135276cdb5e26fefec | 704,740 |
def parse_range(rng, dictvars={}):
"""Parse a string with an integer range and return a list of numbers, replacing special variables in dictvars."""
parts = rng.split('-')
if len(parts) not in [1, 2]:
raise ValueError("Bad range: '%s'" % (rng,))
parts = [int(i) if i not in dictvars else dictvars[i] for i in parts]
start = parts[0]
end = start if len(parts) == 1 else parts[1]
if start > end:
end, start = start, end
return range(start, end + 1) | 214109a71c84d06241e29cacaa052d9ce00302c5 | 704,741 |
def is_odd(num: int) -> bool:
"""Is num odd?
:param num: number to check.
:type num: int
:returns: True if num is odd.
:rtype: bool
:raises: ``TypeError`` if num is not an int.
"""
if not isinstance(num, int):
raise TypeError("{} is not an int".format(num))
return num % 2 == 1 | 0e5781596a99909e58583859948332c3afb06fb0 | 704,742 |
def interpolation(x0: float, y0: float, x1: float, y1: float, x: float) -> float:
"""
Performs interpolation.
Parameters
----------
x0 : float.
The coordinate of the first point on the x axis.
y0 : float.
The coordinate of the first point on the y axis.
x1 : float.
The coordinate of the second point on the x axis.
y1 : float.
The coordinate of the second point on the y axis.
x : float.
A value in the interval (x0, x1).
Returns
-------
float.
Is the interpolated or extrapolated value.
Example
-------
>>> from pymove.utils.math import interpolation
>>> x0, y0, x1, y1, x = 2, 4, 3, 6, 3.5
>>> print(interpolation(x0,y0,x1,y1,x), type(interpolation(x0,y0,x1,y1,x)))
7.0 <class 'float'>
"""
return y0 + (y1 - y0) * ((x - x0) / (x1 - x0)) | f8fc96c6dc6c2eeeeceb22f92b32023f3873fe3e | 704,744 |
import collections
def product_counter_v3(products):
"""Get count of products in descending order."""
return collections.Counter(products) | 22c57d50dc36d3235e6b8b642a4add95c9266687 | 704,745 |
def rossler(x, y, z, a, b, c):
""" Rössler System of Ordinary Differential Equations """
dx = - y - z
dy = x + a*y
dz = b + z*(x - c)
return dx, dy, dz | bcf27c7ff8223681d6dc7d0c49497e975b826d80 | 704,747 |
import re
def get_extension(filename):
"""
Extract file extension from filename using regex.
Args:
filename (str): name of file
Returns:
str: the file extension
"""
match = re.search(r"\.(?P<ext>[^.]+)$", filename)
if match:
return match.group("ext")
raise ValueError(f"No extension could be extracted from '{filename}'") | 8f5195b339a153d5fa144182505dba986992d4df | 704,748 |
def scale_val(val, factor, direction):
"""Scale val by factor either 'up' or 'down'."""
if direction == 'up':
return val+(val*factor)
if direction == 'down':
return val-(val*factor)
raise ValueError('direction must be "up" or "down"') | 16c2efe16fc787fe4461fb0ae640e2cf22d556e0 | 704,749 |
def adjust_update_rules_for_fixed_nodes(predecessor_node_lists, truth_tables, fixed_nodes):
"""
Adjust "update rules" matrix and its free element vector so that the fixed nodes will end up in their fixed
states on each time step automatically, with no manual interventions required.
:param predecessor_node_lists: list of predecessor node lists
:param truth_tables: list of dicts (key: tuple of predecessor node states, value: resulting node state)
:param fixed_nodes: dict with fixed nodes (key: node, value: node state)
:return: (predecessor node lists and truth tables, adjusted with respect to fixed nodes)
"""
adjusted_predecessor_node_lists = \
[predecessor_nodes.copy() for predecessor_nodes in predecessor_node_lists]
adjusted_truth_tables = [truth_table.copy() for truth_table in truth_tables]
for node, node_state in fixed_nodes.items():
adjusted_predecessor_node_lists[node] = []
adjusted_truth_tables[node] = {(): node_state}
return adjusted_predecessor_node_lists, adjusted_truth_tables | f41609ae25c3622100674372de5a364b095650f8 | 704,751 |
def parse_list_from_string(value):
"""
Handle array fields by converting them to a list.
Example:
1,2,3 -> ['1','2','3']
"""
return [x.strip() for x in value.split(",")] | 51e9c654b9d18b8be61c37aab5f5029dfdea2213 | 704,753 |
import itertools
def merge(d1, d2):
"""Merge to dicts into one.
Args:
d1 (dict): dataset 1
d2 (dict): dataset 2
Returns:
dict: merged dict
"""
return dict(itertools.chain(list(d1.items()), list(d2.items()))) | bb1d38f3cb45de6e98855fb04ae1d3d7e73e4a40 | 704,755 |
import re
def is_valid(number):
"""
Check if number is roman
:param number: string to check
:type number: str
:return: True or False
:rtype: bool
"""
return re.match(
r"^(M{0,3})(D?C{0,3}|C[DM])(L?X{0,3}|X[LC])(V?I{0,3}|I[VX])$", number
) | 52e1937418d28701ee3d30da139f16ae64cfe480 | 704,756 |
def row_contains_data(fieldnames, row):
"""Returns True if the value of atleast on of the fields is truthy"""
for field in fieldnames:
if row.get(field):
return True
return False | 7575d1280186c582a652ab37deb4a93e667b51b2 | 704,761 |
from typing import Callable
from typing import Iterable
from typing import List
def lmap(f: Callable, x: Iterable) -> List:
"""list(map(f, x))"""
return list(map(f, x)) | 51b09a3491769aafba653d4198fde94ee733d68f | 704,769 |
def estimate_infectious_rate_constant_vec(event_times,
follower,
t_start,
t_end,
kernel_integral,
count_events=None):
"""
Returns estimation of infectious rate for given event time and followers on defined interval.
Optimized using numpy.
:param event_times: nd-array of event times
:param follower: nd-array of follower counts
:param t_start: time interval start
:param t_end: time interval end
:param kernel_integral: integral function of kernel function
:param count_events: count of observed events in interval (used for time window approach)
:return: estimated values for infectious rate
"""
kernel_int = follower * kernel_integral(t_start - event_times,
t_end - event_times)
if count_events is not None:
return count_events / kernel_int.sum()
else:
return event_times.size / kernel_int.sum() | 207833e1b32885fe39a209bfef227665c8c59ad1 | 704,772 |
def find(word,letter):
"""
find letter in word , return first occurence
"""
index=0
while index < len(word):
if word[index]==letter:
#print word,' ',word[index],' ',letter,' ',index,' waht'
return index
index = index + 1
return -1 | bdeb0f0993fb4f7904b4e9f5244ea9d7817fa15f | 704,773 |
def has_file_ext(view, ext):
"""Returns ``True`` if view has file extension ``ext``.
``ext`` may be specified with or without leading ``.``.
"""
if not view.file_name() or not ext.strip().replace('.', ''):
return False
if not ext.startswith('.'):
ext = '.' + ext
return view.file_name().endswith(ext) | 043edf03874d1ec20e08fcb5795fd205206f7194 | 704,775 |
def get_genes(exp_file, samples, threshold, max_only):
"""
Reads in and parses the .bed expression file.
File format expected to be:
Whose format is tab seperated columns with header line:
CHR START STOP GENE <sample 1> <sample 2> ... <sample n>
Args:
exp_file (str): Name of expression file.
samples (list): Names of the samples in the vcf file.
threshold (float): Expression threshold to filter lowly/unexpressed genes.
max_only (bool): if true, gene_dict value is 1 value = max expression
if false gene_dict value is list of expression values
YYY: WARNING: if want list to have meaning
then values needs to be tied to header sample names
Returns:
gene_dict (dict): {gene_name: [expression_vals]}.
Only include values for samples in the vcf.
"""
data_cols = []
gene_dict = {}
print('start read exp_file:' + format(exp_file))
if max_only:
# read and only return max exp value in gene_dict
with open(exp_file) as f:
header = f.readline().strip().split('\t')
for samp in header[4:]:
if samp in samples:
data_idx = header.index(samp)
data_cols.append(data_idx)
# Read in expression levels for each gene.
for line in f:
line = line.strip().split('\t')
gene_name = line[3].upper()
exp_val = -1e1000
for idx in data_cols:
if float(line[idx]) > exp_val:
exp_val = float(line[idx])
gene_dict[gene_name] = exp_val
else:
# read and return exp value list in gene_dict
with open(exp_file) as f:
header = f.readline().strip().split('\t')
for samp in header[4:]:
if samp in samples:
data_idx = header.index(samp)
data_cols.append(data_idx)
# Read in expression levels for each gene.
for line in f:
line = line.strip().split('\t')
gene_name = line[3].upper()
exp_vals = []
for idx in data_cols:
exp_vals.append(line[idx])
gene_dict[gene_name] = exp_vals
return gene_dict | 62b27eef9c863078c98dee0d09bada5e058909e2 | 704,776 |
def conv_name_to_c(name):
"""Convert a device-tree name to a C identifier
This uses multiple replace() calls instead of re.sub() since it is faster
(400ms for 1m calls versus 1000ms for the 're' version).
Args:
name: Name to convert
Return:
String containing the C version of this name
"""
new = name.replace('@', '_at_')
new = new.replace('-', '_')
new = new.replace(',', '_')
new = new.replace('.', '_')
return new | 150af670d8befea7374bbb5b13da9d6e0734863e | 704,777 |
def get_account_id(role_arn):
"""
Returns the account ID for a given role ARN.
"""
# The format of an IAM role ARN is
#
# arn:partition:service:region:account:resource
#
# Where:
#
# - 'arn' is a literal string
# - 'service' is always 'iam' for IAM resources
# - 'region' is always blank for IAM resources
# - 'account' is the AWS account ID with no hyphens
#
# See https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns
try:
arn, _, service, region, account, _ = role_arn.split(":")
except ValueError:
raise ValueError(f"Is this a valid AWS ARN? {role_arn}")
if arn != "arn":
raise ValueError(f"Is this a valid AWS ARN? {role_arn}")
if service != "iam" or region != "" or not account.isnumeric():
raise ValueError(f"Is this an IAM role ARN? {role_arn}")
return account | 623eb66eefd59b9416deb478c527062ae4454df7 | 704,778 |
from typing import Any
def list_to_dict(data: list, value: Any = {}) -> dict:
"""Convert list to a dictionary.
Parameters
----------
data: list
Data type to convert
value: typing.Any
Default value for the dict keys
Returns
-------
dictionary : dict
Dictionary of the input data
"""
return {item: value for item in data} | 1e73bb6ca98b5e2d9b1e0f8d4cb19fc044a9ce63 | 704,780 |
def get_tag_name(tag):
"""
Extract the name portion of a tag URI.
Parameters
----------
tag : str
Returns
-------
str
"""
return tag[tag.rfind("/") + 1:tag.rfind("-")] | e24f0ae84ed096ec71f860291d1e476c75bf8370 | 704,781 |
def imap_any(conditions):
"""
Generate an IMAP query expression that will match any of the expressions in
`conditions`.
In IMAP, both operands used by the OR operator appear after the OR, and
chaining ORs can create very verbose, hard to parse queries e.g. "OR OR OR
X-GM-THRID 111 X-GM-THRID 222 OR X-GM-THRID 333 X-GM-THRID 444 X-GM-THRID
555". Using logical equivalence, a functionally identical query can be
built with "AND" and "NOT"; (a || b || c...) == !(!a && !b && !c...).
Arguments:
conditions: List of IMAP expressions.
Returns:
An IMAP expression that evaluates to "true" if any of the conditions are
true.
"""
if not conditions:
return ""
negations = [("NOT %s" % condition) for condition in conditions]
return "(NOT (%s))" % " ".join(negations) | de4ef1680cd2c8370d82640ff95186ed3ea81202 | 704,783 |
def format_sources(sources):
"""
Make a comma separated string of news source labels.
"""
formatted_sources = ""
for source in sources:
formatted_sources += source["value"] + ','
return formatted_sources | f9f86f11e4dfe9ecd3fbbd5e14d3ca750a4e1a5a | 704,784 |
def launch_coef_scores(args):
"""
Wrapper to compute the standardized scores of the regression coefficients, used when computing the number of
features in the reduced parameter set.
@param args: Tuple containing the instance of SupervisedPCABase, feature matrix and response array.
@return: The standardzed scores of the coefficients.
"""
spca, X, y = args
scoefs = spca._compute_stnd_coefs(X, y)
return scoefs | 02423ef564b55dfcc37bddadcc813edffba05795 | 704,786 |
def create_link(url):
"""Create an html link for the given url"""
return (f'<a href = "{url}" target="_blank">{url}</a>') | 77a5375369be2be140a69a4521c50a92cee2d5ed | 704,787 |
def cummean(x):
"""Return a same-length array, containing the cumulative mean."""
return x.expanding().mean() | b5a35c56cb78e0588dd5be64a75384c4cd81ccb5 | 704,788 |
def get_syntax_errors(graph):
"""List the syntax errors encountered during compilation of a BEL script.
Uses SyntaxError as a stand-in for :exc:`pybel.parser.exc.BelSyntaxError`
:param pybel.BELGraph graph: A BEL graph
:return: A list of 4-tuples of line number, line text, exception, and annotations present in the parser
:rtype: list[tuple]
"""
return [
(number, line, exc, an)
for number, line, exc, an in graph.warnings
if isinstance(exc, SyntaxError)
] | a0f3493b88b081de3613397c997d71dabdae78be | 704,789 |
def is_valid_combination( row ):
"""
Should return True if combination is valid and False otherwise.
Test row that is passed here can be incomplete.
To prevent search for unnecessary items filtering function
is executed with found subset of data to validate it.
"""
n = len(row)
if n>1:
# Brand Y does not support Windows 98
if "98" == row[1] and "Brand Y" == row[0]:
return False
# Brand X does not work with XP
if "XP" == row[1] and "Brand X" == row[0]:
return False
if n > 4:
# Contractors are billed in 30 min increments
if "Contr." == row[3] and row[4] < 30:
return False
return True | c0758c3d30debbd3fc3d5f07d6728c23bfb71145 | 704,790 |
def get_callback(request, spider):
"""Get request.callback of a scrapy.Request, as a callable."""
if request.callback is None:
return getattr(spider, 'parse')
return request.callback | a1f62822d812bebdeabafa14edda4462949657d8 | 704,793 |
def merge_values(list1, list2):
"""Merge two selection value lists and dedup.
All selection values should be simple value types.
"""
tmp = list1[:]
if not tmp:
return list2
else:
tmp.extend(list2)
return list(set(tmp)) | 9412dd28c6110bc6df70ac7d563cb19d1211beb8 | 704,798 |
def format_heading(level, text):
"""Create a heading of <level> [1, 2 or 3 supported]."""
underlining = ['=', '-', '~', ][level-1] * len(text)
return '%s\n%s\n\n' % (text, underlining) | 6b8caaa134ddc32666a4d7ce62a775d6ffda7425 | 704,800 |
def get_new_size_zoom(current_size, target_size):
"""
Returns size (width, height) to scale image so
smallest dimension fits target size.
"""
scale_w = target_size[0] / current_size[0]
scale_h = target_size[1] / current_size[1]
scale_by = max(scale_w, scale_h)
return (int(current_size[0] * scale_by), int(current_size[1] * scale_by)) | e0b42eab3d35ba5c662282cab1ffa798327ad92a | 704,801 |
def get_name_component(x509_name, component):
"""Gets single name component from X509 name."""
value = ""
for c in x509_name.get_components():
if c[0] == component:
value = c[1]
return value | 6a473a96b99daa6f69fd6aac45f2594af933d4bd | 704,802 |
def song_line(line):
"""Parse one line
Parameters
----------
line: str
One line in the musixmatch dataset
Returns
-------
dict
track_id: Million song dataset track id, track_id_musixmatch:
Musixmatch track id and bag_of_words: Bag of words dict in
{word: count} format
Notes
-----
Musixmatch starts words at index 1, we are shifting it so it starts at 0
"""
elements = line.split(',')
track_id = elements[0]
track_id_musixmatch = elements[1]
bag_of_words = [s.split(':') for s in elements[2:]]
# shift index so it starts at zero
bag_of_words_dict = {int(idx) - 1: int(count) for idx, count
in bag_of_words}
return dict(track_id=track_id, track_id_musixmatch=track_id_musixmatch,
bag_of_words=bag_of_words_dict) | 2108dfa037aa6293a0b3111a97c354e62c0dd2a5 | 704,803 |
def remove_st_less_than(dataframe, column='ST', less_than=0.001):
"""
Remove any entry with an ST less than specified
Args:
dataframe (pandas.Dataframe): dataframe containing sensitivity analysis output
column (str): Column name, default is 'ST'
less_than (float): Remove anything less than this
Returns:
New dataframe.
"""
new_df = dataframe[dataframe[column] > less_than]
return new_df | 2ba052004c436f8d527ab9b5bc3e76c90aa5dce9 | 704,804 |
def fact(n):
"""Return the factorial of the given number."""
r = 1
while n > 0:
r = r * n
n = n - 1
return r | 7bdcdc759b49a9cd72f7bf3f12a18fc03ce50668 | 704,806 |
def apply_functions(lst, functions):
"""
:param lst: list of values
:param functions: list of functions to apply to each value.
Each function has 2 inputs: index of value and value
:return: [func(x) for x in lst], i.e apply the respective function to each
of the values
"""
assert len(lst) == len(functions)
for i, item in enumerate(lst):
func = functions[i] # get function
lst[i] = func(i, item) # apply function
return lst | 679a2219008e438249e1227d5aab6529019c497c | 704,811 |
def dict_is_test(data):
"""helper function to check whether passed argument is a proper :class:`dict` object describing a test.
:param dict data: value to check
:rtype: bool
"""
return (
isinstance(data, dict)
and "type" in data
and data["type"] == "test"
and "id" in data
and "attributes" in data
and isinstance(data["attributes"], dict)
)
# optionally, it can have "links" dict | 320b47f8f41f42f6a6554741c9b2de38b370605a | 704,813 |
from typing import Set
def GetZonesInRegion(region: str) -> Set[str]:
"""Returns a set of zones in the region."""
# As of 2021 all Azure AZs are numbered 1-3 for eligible regions.
return set([f'{region}-{i}' for i in range(1, 4)]) | e539662604eb5da2583630844dd54d11d266c827 | 704,814 |
def compare_3PC_keys(key1, key2) -> int:
"""
Return >0 if key2 is greater than key1, <0 if lesser, 0 otherwise
"""
if key1[0] == key2[0]:
return key2[1] - key1[1]
else:
return key2[0] - key1[0] | d134eaaa4ef8f218164be4e5bc6fced01c3de7eb | 704,815 |
def _GetMSBuildToolSettings(msbuild_settings, tool):
"""Returns an MSBuild tool dictionary. Creates it if needed."""
return msbuild_settings.setdefault(tool.msbuild_name, {}) | 3a2cb3e9c8910a6901be0937be18aac00d532e2b | 704,816 |
def convert_to_date(col):
"""Convert datetime to date."""
return col.date() | c6ac8febf4751e8f2c2c27fc740de286f2870cbe | 704,820 |
from typing import Any
def isstring(var:Any, raise_error:bool=False) -> bool:
"""Check if var is a string
Args:
var (str): variable to check
raise_error (bool, optional): TypeError raised if set to `True`. Defaults to `False`.
Raises:
TypeError: raised if var is not string
Returns:
bool: `True` if var is a string
"""
is_ =isinstance(var, str)
if not is_ and raise_error:
raise TypeError(f'String expected: {var=} is not a str')
return is_ | 897c43539099c3d0b9b38abccce88869a90b9d9e | 704,826 |
def get_index_from_filename(
file_name: str
) -> str:
"""
Returns the index of chart from a reproducible JSON filename.
:param file_name: `str`
The name of the file without parent path.
:returns: `str`
The index of the chart (e.g., 1) or an empty string.
"""
assembled_index = ""
for name_char in file_name.replace(".json", "")[::-1]:
if name_char.isnumeric():
assembled_index += name_char
else:
break
return assembled_index[::-1] | 2cddcbcd9bf5079d58c75f19b5d2bf5b44ded173 | 704,829 |
def sexastr2deci(sexa_str):
"""Converts as sexagesimal string to decimal
Converts a given sexagesimal string to its decimal value
Args:
A string encoding of a sexagesimal value, with the various
components separated by colons
Returns:
A decimal value corresponding to the sexagesimal string
Examples:
>>> sexastr2deci('15:30:00')
15.5
>>> sexastr2deci('-15:30:45')
-15.5125
"""
if sexa_str[0] == '-':
sgn = -1.0
dms = sexa_str[1:].split(':') # dms = degree minute second
else:
sgn = 1.0
dms = sexa_str.split(':')
decival = 0
for i in range(0, len(dms)):
decival = decival + float(dms[i]) / (60.0 ** i)
return decival * sgn | 46a9d8752b05b1579ecc2b85d94c28613a08ab3c | 704,832 |
def reverb2mix_transcript_parse(path):
"""
Parse the file format of the MLF files that
contains the transcripts in the REVERB challenge
dataset
"""
utterances = {}
with open(path, "r") as f:
everything = f.read()
all_utt = everything.split("\n.\n")
for i, utt in enumerate(all_utt):
if i == 0:
assert utt[:7] == "#!MLF!#"
utt = utt[7:]
words = utt.split("\n")
label = words[0][4:-6]
sentence = " ".join(words[1:])
speaker = label[:-5]
utterance = label[-5:]
utterances[label] = {
"utterance_id": utterance,
"speaker_id": speaker,
"transcript": sentence,
}
return utterances | c8a1aa0c8a4d0dec6626cf8e9d2491336ee42d5a | 704,833 |
def architecture_is_32bit(arch):
"""
Check if the architecture specified in *arch* is 32-bit.
:param str arch: The value to check.
:rtype: bool
"""
return bool(arch.lower() in ('i386', 'i686', 'x86')) | a0cfaef4b03bc8cf335f0d19a3e46457db7574a9 | 704,838 |
def mpls_label_group_id(sub_type, label):
"""
MPLS Label Group Id
sub_type:
- 1: L2 VPN Label
- 2: L3 VPN Label
- 3: Tunnel Label 1
- 4: Tunnel Label 2
- 5: Swap Label
"""
return 0x90000000 + ((sub_type << 24) & 0x0f000000) + (label & 0x00ffffff) | f0235d1cd8baaf601baf0db43b81417d3d5823ac | 704,845 |
import torch
def get_spin_interp(zeta: torch.Tensor) -> torch.Tensor:
"""Compute spin interpolation function from fractional polarization `zeta`."""
exponent = 4.0 / 3
scale = 1.0 / (2.0 ** exponent - 2.0)
return ((1.0 + zeta) ** exponent + (1.0 - zeta) ** exponent - 2.0) * scale | b1abced09aead7394be773d93d59a621cda98d14 | 704,847 |
def string2token(t,nl,nt):
"""
This function takes a string and returns a token. A token is a tuple
where the first element specifies the type of the data stored in the
second element.
In this case the data types are limited to numbers, either integer, real
or complex, and strings. The types a denoted as follows:
i - integer
f - float/real
c - complex
s - string
For navigational purposes two more elements added to identify the line
number (nl) the token was on, and the token number (nt) within the line.
"""
try:
i_a = int(t)
#
# Toldiff should recognise that -0 and 0 are the same, however, in
# a text based comparison that is not automatic so we have to force this.
#
if i_a == 0:
i_a = 0
token = ("i",i_a,nl,nt)
except ValueError:
#
# In Fortran double precision constants are often printed with a
# "D" for the exponent rather than an "E", i.e. 1.0E+01 might be
# printed as 1.0D+01 in Fortran. Python is not aware of this convention
# so we need to replace any potential "D"-s to obtain valid floating
# values.
#
z = t.replace("d","e")
z = z.replace("D","e")
try:
i_f = float(z)
#
# Toldiff should recognise that -0.0 and 0.0 are the same, however,
# in a text based comparison that is not automatic so we have to
# force this.
#
if i_f == 0.0:
i_f = 0.0
token = ("f",i_f,nl,nt)
except ValueError:
#
# The handling of complex numbers is unlikely to work in practice
# as in most cases complex numbers are printed as (1.0,2.0)
# rather than 1.0+2.0j. Therefore it is impossible to reliably
# distinguish between a complex number and a list of 2 real numbers.
#
try:
i_c = complex(z)
#
# Toldiff should recognise that x-0.0j and x+0.0j and that
# -0.0+y*j and 0.0+y*j are the same, however, in a text based
# comparison that is not automatic so we have to force this.
#
if i_c.real == 0.0:
i_c = complex(0.0,i_c.imag)
if i_c.imag == 0.0:
i_c = complex(i_c.real,0.0)
token = ("c",i_c,nl,nt)
except ValueError:
token = ("s",t,nl,nt)
return token | 23fd5da01a49076b1fcf474fbe1047329ad7471a | 704,852 |
def get_div(integer):
"""
Return list of divisors of integer.
:param integer: int
:return: list
"""
divisors = [num for num in range(2, int(integer**0.5)+1) if integer % num == 0]
rem_divisors = [int(integer/num) for num in divisors]
divisors += rem_divisors
divisors.append(integer)
res = list(set(divisors)) # remove duplicates
res.sort()
return res | 4c40a2b2da1d9681c1d7ca69a53975dd27c7bdb8 | 704,853 |
from typing import get_origin
def is_dict_type(tp):
"""Return True if tp is a Dict"""
return (
get_origin(tp) is dict
and getattr(tp, '_name', None) == 'Dict'
) | 3b9992b7b131e936472d4d0e2994ac476f0d0f76 | 704,855 |
def sum_up_validation_dataset(dataset, batch_size, repeat=True,
number_of_repetitions=0):
"""Define how the validation dataset is suppose to behave during training.
This function is applied to the validation dataset just before the actual
training process. The characteristics defined here address how images are
picked and how large the batch size is.
Args:
dataset (tensorflow dataset): The dataset to which the functions are
applied.
batch_size (int): Defines the number of images per validation step.
repeat (boolean): If set to false the validation data is only considered
once. If set to true, the dataset is either
considered endlessly or number_of_repetitions times.
number_of_repetitions (int): Defines how often the validation data is
considered.
Returns:
The tensorflow dataset which the applied changes described above.
"""
if (repeat):
if (number_of_repetitions > 0):
dataset = dataset.batch(batch_size).repeat(number_of_repetitions)
else:
dataset = dataset.batch(batch_size).repeat()
else:
dataset = dataset.batch(batch_size)
return dataset | 9bab85eba802d5198bfd39bc42bd2fae5209d356 | 704,856 |
def cleanup_code(content: str):
"""Automatically removes code blocks from the code."""
# remove ```py\n```
if content.startswith('```') and content.endswith('```'):
return '\n'.join(content.split('\n')[1:-1])
# remove `foo`
return content.strip('` \n') | a026668f01e1641618c5b25b06396516410dbe1e | 704,859 |
import operator
def lcs(l1, l2, eq=operator.eq):
"""Finds the longest common subsequence of l1 and l2.
Returns a list of common parts and a list of differences.
>>> lcs([1, 2, 3], [2])
([2], [1, 3])
>>> lcs([1, 2, 3, 3, 4], [2, 3, 4, 5])
([2, 3, 4], [1, 3, 5])
>>> lcs('banana', 'baraban')
(['b', 'a', 'a', 'n'], ['a', 'r', 'b', 'n', 'a'])
>>> lcs('abraban', 'banana')
(['b', 'a', 'a', 'n'], ['a', 'r', 'n', 'b', 'a'])
>>> lcs([1, 2, 3], [4, 5])
([], [4, 5, 1, 2, 3])
>>> lcs([4, 5], [1, 2, 3])
([], [1, 2, 3, 4, 5])
"""
prefs_len = [
[0] * (len(l2) + 1)
for _ in range(len(l1) + 1)
]
for i in range(1, len(l1) + 1):
for j in range(1, len(l2) + 1):
if eq(l1[i - 1], l2[j - 1]):
prefs_len[i][j] = prefs_len[i - 1][j - 1] + 1
else:
prefs_len[i][j] = max(prefs_len[i - 1][j], prefs_len[i][j - 1])
common = []
diff = []
i, j = len(l1), len(l2)
while i and j:
assert i >= 0
assert j >= 0
if eq(l1[i - 1], l2[j - 1]):
common.append(l1[i - 1])
i -= 1
j -= 1
elif prefs_len[i - 1][j] >= prefs_len[i][j - 1]:
i -= 1
diff.append(l1[i])
else:
j -= 1
diff.append(l2[j])
diff.extend(reversed(l1[:i]))
diff.extend(reversed(l2[:j]))
return common[::-1], diff[::-1] | 4b5d3cb9911a6834c006e78f7b40061695c464e2 | 704,863 |
def calc_sparsity(optimizer, total_params, total_quant_params):
"""
Returns the sparsity of the overall network and the sparsity of quantized layers only.
Parameters:
-----------
optimizer:
An optimizer containing quantized model layers in param_groups[1]['params'] and non-quantized layers,
such as BatchNorm, Bias, etc., in param_groups[1]['params'].
total_params:
Total number of parameters.
total_quant_params:
Number of quantized parameters.
Returns:
--------
sparsity_total:
Sparsity of the overall network.
sparsity_quant:
Sparsity of quantized layers of the network.
"""
nonzero_elements_quant = 0
for layer in optimizer.param_groups[1]['params']:
nonzero_elements_quant += layer[layer != 0].numel()
nonzero_elements_total = 0
for layer in optimizer.param_groups[0]['params']:
nonzero_elements_total += layer[layer != 0].numel()
nonzero_elements_total += nonzero_elements_quant
sparsity_total = (total_params - nonzero_elements_total) / total_params
sparsity_quant = (total_quant_params - nonzero_elements_quant) / total_quant_params
return sparsity_total, sparsity_quant | 92ee924239ee8d7ac97aebba2958671043aa2d89 | 704,864 |
def dict_get(d, key, default=None):
""":yaql:get
Returns value of a dictionary by given key or default if there is
no such key.
:signature: dict.get(key, default => null)
:receiverArg dict: input dictionary
:argType dict: dictionary
:arg key: key
:argType key: keyword
:arg default: default value to be returned if key is missing in dictionary.
null by default
:argType default: any
:returnType: any (appropriate value type)
.. code::
yaql> {"a" => 1, "b" => 2}.get("c")
null
yaql> {"a" => 1, "b" => 2}.get("c", 3)
3
"""
return d.get(key, default) | 5fb6a71e507f62eb530215385c97c56a75765df7 | 704,872 |
import pytz
def as_utc(time):
"""Convert a time to a UTC time."""
return time.astimezone(pytz.utc) | 716858e88daa43b61f5cedae72e74dafcf67d423 | 704,878 |
def insertion_sort(L):
"""Implementation of insertion sort."""
n = len(L)
if n < 2:
return L
for i in range(1, n):
tmp = L[i]
j = i
while j > 0 and tmp < L[j - 1]:
L[j] = L[j - 1]
j -= 1
L[j] = tmp | ca7cbb5c676173ad10ce98d8b9e579a65afad0fb | 704,882 |
def get_img_space(wsp, img):
"""
Find out what image space an image is in
Note that this only compares the voxel->world transformation matrix to the
reference image for each space. It is quite possible for two images to be in
the same space but not be registered to one another. In this case,
the returned space may not be accurate when determining whether a registration
is required.
:param wsp: Workspace object
:param img: Image
:return: Name of image space for ``img``, e.g. ``native``, ``struc``
"""
img_space = None
for space in ('native', 'calib', 'struc', 'std', 'custom'):
ref = getattr(wsp.reg, "%sref" % space)
if ref is not None and img.sameSpace(ref):
img_space = space
break
if img_space is None:
raise RuntimeError("Could not determine space for image: %s" % str(img))
return img_space | b455dd6300cf13cbba5d8e2d44685e06d8fb4cad | 704,887 |
def _not_exhausted(last_fetched):
"""Check if the last fetched tasks were the last available."""
return len(last_fetched) == 100 | 570cf94ba9c723cced8ec3a746f2ce070d780fd5 | 704,891 |
def has_oxidation_states(comp):
"""Check if a composition object has oxidation states for each element
Args:
comp (Composition): Composition to check
Returns:
(boolean) Whether this composition object contains oxidation states
"""
for el in comp.elements:
if not hasattr(el, "oxi_state") or el.oxi_state is None:
return False
return True | 702595070b588761142055bc1532ce26acd287fb | 704,895 |
from typing import Optional
def parse_opt_int(s: Optional[str]) -> Optional[int]:
"""
parse_opt_int(s: Optional[str]) -> Optional[int]
If s is a string, parse it for an integer value (raising a ValueError if
it cannot be parsed correctly.)
If s is None, return None.
Otherwise, raise a TypeError.
"""
if s is None:
return None
if isinstance(s, str):
return int(s)
raise TypeError(f"value must be a string or None: {type(s).__name__}") | 91a102c8c8e6a6ee109e9c88c56d9a6959f1f838 | 704,897 |
def setup(hass, config):
"""Mock a successful setup."""
return True | fd2977534aa8a165b49c4fbddc513c8f77b0588d | 704,898 |
def get_attachment_file_upload_to(instance, filename):
""" Returns a valid upload path for the file of an attachment. """
return instance.get_file_upload_to(filename) | e38c51a2ca947bebe1ed274c4265081c6b9e7c41 | 704,902 |
def read_to_ulens_in_intvls(read, intvls):
"""Extract units within `intvls` from `read.units`."""
return [unit.length for unit in read.units
if unit.length in intvls] | 11159bea8bbf0cb68f0e9a7355c82e93b430065d | 704,909 |
def find_merge_commit_in_prs(needle, prs):
"""Find the merge commit `needle` in the list of `prs`
If found, returns the pr the merge commit comes from. If not found, return
None
"""
for pr in prs[::-1]:
if pr['merge_commit'] is not None:
if pr['merge_commit']['hash'] == needle[1][:12]:
return pr
return None | 42320473aff84985e35cdf9024a64a18fe6f14f1 | 704,913 |
def update_board(position, board, player):
"""
Update the board with the user input position if position not taken
returns board, True=position taken or False=position not taken and board updated
args: position (int 1-9, user input)
board (np.array 2d)
player ("X" or "O")
"""
#make position 1-9 compatible with an 3x3 2d array indexed 0-8
position = position - 1
#logic to find row,col, uncomment 2 lines to print/test
#print(position, 'int(/3) ', int(position/3))
#print(position, '%3 ',position%3)
#find position in array, obtain row/col index
row = int(position/3)
if position>2: col = position%3
else: col = position
#If position not taken, update board
if board[row][col] == '-':
board[row][col] = player
return board, False
#else position is taken, do not update board
else:
return board, True | eb53d24c4976499e6611c97757d0c33b4cb3254f | 704,918 |
def marks(category, mark=None, category_marks=None, public=False):
"""Assign marks to a test or suite of tests, grouped by a category."""
def decorator(test_item):
if mark is None and category_marks is None:
raise ValueError("One of mark or category_marks must be defined")
test_item.__marks_category__ = category
test_item.__marks_mark__ = mark
test_item.__marks_category_marks__ = category_marks
test_item.__marks_public__ = public
return test_item
return decorator | 2d47a8df4f610dbc081dd57fce169e2f89b88ca4 | 704,922 |
def underscore_to_camelcase(value):
"""
Converts underscore notation (something_named_this) to camelcase notation (somethingNamedThis)
>>> underscore_to_camelcase('country_code')
'countryCode'
>>> underscore_to_camelcase('country')
'country'
>>> underscore_to_camelcase('price_GBP')
'priceGBP'
>>> underscore_to_camelcase('recommended_horizontal_resolution')
'recommendedHorizontalResolution'
>>> underscore_to_camelcase('postal_or_zip_code')
'postalOrZipCode'
>>> underscore_to_camelcase('test_ABC_test')
'testABCTest'
"""
words = value.split('_')
return '%s%s' % (words[0], ''.join(x if x.isupper() else x.capitalize() for x in words[1:])) | 94bb5c007d3b50112c62ca9b3e97c5bf4f155fff | 704,925 |
def findCenter(S):
"""Find the approximate center atom of a structure.
The center of the structure is the atom closest to (0.5, 0.5, 0.5)
Returns the index of the atom.
"""
best = -1
bestd = len(S)
center = [0.5, 0.5, 0.5] # the cannonical center
for i in range(len(S)):
d = S.lattice.dist(S[i].xyz, center)
if d < bestd:
bestd = d
best = i
return best | 634945a5560b3791f3835f3da090decd1b06b933 | 704,926 |
from datetime import datetime
def _make_todays_date() -> str:
""" build today's date as a standard format """
return datetime.now().strftime("%a %d-%b") | fdb9bc420689081586ac19fe91a17ea871576d59 | 704,930 |
def get_keywords(string):
"""Get keywords for a given string.
Args:
string (str): A string to get keywords for.
Returns:
(list): A list of keywords.
"""
keywords = string.lower().split(' ')
keywords = [x.strip() for x in keywords if x]
keywords = list(set(keywords))
return keywords | 8d4e0781701dc3574583baf417c573967638e86f | 704,934 |
def distancia(ponto1, ponto2):
"""
Calcula a distância entre dois pontos
"""
xdif = ponto2.getx() - ponto1.getx()
ydif = ponto2.gety() - ponto1.gety()
dif = (xdif**2 + ydif**2)**0.5
return dif | 36a980a1081133fb6496585c25cca5782ceef06d | 704,935 |
def prep_tweet_body(tweet_obj, args, processed_text):
""" Format the incoming tweet
Args:
tweet_obj (dict): Tweet to preprocess.
args (list): Various datafields to append to the object.
0: subj_sent_check (bool): Check for subjectivity and sentiment.
1: subjectivity (num): Subjectivity result.
2: sentiment (dict): Sentiment result.
processed_text (list): List of tokens and ngrams etc.
Returns:
dict: Tweet with formatted fields
"""
subj_sent_check = args[0]
result = tweet_obj
if subj_sent_check:
subjectivity = args[1]
sentiment = args[2]
result["subjectivity"] = subjectivity
result["compound_score"] = sentiment["compound"]
result["neg_score"] = sentiment["neg"]
result["neu_score"] = sentiment["neu"]
result["pos_score"] = sentiment["pos"]
result["hs_keyword_count"] = len(processed_text[4])
result["hs_keyword_matches"] = processed_text[4]
result["tokens"] = processed_text[0]
result["stopwords"] = processed_text[1]
result["hashtags"] = processed_text[2]
result["user_mentions"] = processed_text[3]
result["unigrams"] = processed_text[5][0]
result["bigrams"] = processed_text[5][1]
result["trigrams"] = processed_text[5][2]
result["quadgrams"] = processed_text[5][3]
result["pentagrams"] = processed_text[5][4]
result["stopword_ngrams"] = processed_text[6]
result["ordered_tokens"] = processed_text[7]
return result | 9163d7bb10e3bb31849090d8ebfe4d00c19db2df | 704,939 |
import time
def timedcall(fn, *args):
"""
Run a function and measure execution time.
Arguments:
fn : function to be executed
args : arguments to function fn
Return:
dt : execution time
result : result of function
Usage example:
You want to time the function call "C = foo(A,B)".
--> "T, C = timedcall(foo, A, B)"
"""
t0 = time.time()
result = fn(*args)
t1 = time.time()
dt = t1 - t0
return dt, result | 60779c4f4b63796995d722133c304edf519ecd8f | 704,940 |
def tokuda_gap(i):
"""Returns the i^th Tokuda gap for Shellsort (starting with i=0).
The first 20 terms of the sequence are:
[1, 4, 9, 20, 46, 103, 233, 525, 1182, 2660, 5985, 13467, 30301, 68178, 153401, 345152, 776591, 1747331, 3931496, 8845866, ...]
h_i = ceil( (9*(9/4)**i-4)/5 ) for i>=0.
If 9*(9/4)**i-4)/5 is not an integer, I believe this is the same as
h_i = ((9**(i+1)>>(i<<1))-4)//5 + 1,
and I believe the above should be non-integer valued for all i>0.
(We have to explicitly return 1 when i=0, as the above formula would return 2.)
"""
return 1 if i==0 else ((9**(i+1)>>(i<<1))-4)//5 + 1 | 710633e924cb6e31a866683b91da6489c781ba4a | 704,941 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.