content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def is_stateful(change, stateful_resources):
""" Boolean check if current change references a stateful resource """
return change['ResourceType'] in stateful_resources | 055465870f9118945a9e5f2ff39be08cdcf35d31 | 5,182 |
import difflib
def _get_diff_text(old, new):
"""
Returns the diff of two text blobs.
"""
diff = difflib.unified_diff(old.splitlines(1), new.splitlines(1))
return "".join([x.replace("\r", "") for x in diff]) | bd8a3d49ccf7b6c18e6cd617e6ad2ad8324de1cc | 5,185 |
import argparse
from datetime import datetime
def get_args(args):
"""Get the script arguments."""
description = "tvtid - Feteches the tv schedule from client.dk"
arg = argparse.ArgumentParser(description=description)
arg.add_argument(
"-d",
"--date",
metavar="datetime",
type=lambda s: datetime.strptime(s, "%Y-%m-%d"),
help="The date of which you want to get the schedule",
)
arg.add_argument("channel", nargs="+", type=str, help="The channel")
return arg.parse_args(args) | 0068f54fc5660896a8ab6998de9da3909c8e1a6b | 5,186 |
def ft2m(ft):
"""
Converts feet to meters.
"""
if ft == None:
return None
return ft * 0.3048 | ca2b4649b136c9128b5b3ae57dd00c6cedd0f383 | 5,187 |
from typing import Dict
def _get_setup_keywords(pkg_data: dict, keywords: dict) -> Dict:
"""Gather all setuptools.setup() keyword args."""
options_keywords = dict(
packages=list(pkg_data),
package_data={pkg: list(files)
for pkg, files in pkg_data.items()},
)
keywords['options'].update(options_keywords)
return keywords | 34f2d52c484fc4e49ccaca574639929756cfa4dc | 5,188 |
def release_date(json):
"""
Returns the date from the json content in argument
"""
return json['updated'] | 635efd7140860c8f0897e90433a539c8bd585945 | 5,190 |
import argparse
def parse_arguments():
"""Argument parser for extract_branch_length"""
parser = argparse.ArgumentParser(
description="extract_branch_length.py: extract the branch length of the "
" common ancestor of a set of species"
)
parser.add_argument(
"-t",
"--tree",
help="Input species tree (Newick; not transcript tree)",
required=True,
)
parser.add_argument(
"-s",
"--species",
help="Target species, separated by commas. If some are missing, won't be taken into account",
required=True,
)
return vars(parser.parse_args()) | 2ee5fcce15420e77307e4444b86b51e18b0fadb7 | 5,191 |
def part_1_solution_2(lines):
"""Shorter, but not very readable.
A good example of "clever programming" that saves a few lines of code, while
making it unbearably ugly.
Counts the number of times a depth measurement increases."""
return len([i for i in range(1, len(lines)) if lines[i] > lines[i - 1]]) | d393f0385a1afbea4c2f3b4d3f51d8e7d0ade204 | 5,192 |
import six
def bool_from_string(subject, strict=False, default=False):
"""
将字符串转换为bool值
:param subject: 待转换对象
:type subject: str
:param strict: 是否只转换指定列表中的值
:type strict: bool
:param default: 转换失败时的默认返回值
:type default: bool
:returns: 转换结果
:rtype: bool
"""
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
if isinstance(subject, bool):
return subject
if not isinstance(subject, six.string_types):
subject = six.text_type(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
elif lowered in FALSE_STRINGS:
return False
elif strict:
acceptable = ', '.join(
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
msg = "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" % {'val': subject,
'acceptable': acceptable}
raise ValueError(msg)
else:
return default | 3c6efa416471da391e60b82aec3753d823ee2878 | 5,195 |
import argparse
def arguments_parser() -> argparse.Namespace:
""" Parses arguments. """
parser = argparse.ArgumentParser(description="Input File containing list of repositories url's")
parser.add_argument("-u", "--username", default='Luzkan',
help="GitHub Username. \
(default: %(default)s)")
parser.add_argument("-r", "--repositories", default=['DeveloperEnvironment', 'PythonCourse'], nargs='*',
help="List of repository names that should be linted. \
(default: %(default)s)")
return parser.parse_args() | 1c3eafc82ac2014c205f1fe5a9356ef47fe9b864 | 5,197 |
def find_used_modules(modules, text):
"""
Given a list of modules, return the set of all those imported in text
"""
used = set()
for line in text.splitlines():
for mod in modules:
if 'import' in line and mod in line:
used.add(mod)
return used | 0b1b2b31f60a565d7ba30a9b21800ba7ec265d0c | 5,198 |
def string2mol2(filename, string):
"""
Writes molecule to filename.mol2 file, input is a string of Mol2 blocks
"""
block = string
if filename[-4:] != '.mol2':
filename += '.mol2'
with open(filename, 'w') as file:
file.write(block)
return None | 51043e7f4edde36682713455dc33c643f89db397 | 5,199 |
from datetime import datetime
def tradedate_2_dtime(td):
""" convert trade date as formatted by yfinance to a datetime object """
td_str = str(int(td))
y, m, d = int(td_str[:4]), int(td_str[4:6]), int(td_str[6:])
return datetime(y, m, d) | 29db7ed41a5cac48af1e7612e1cd2b59ab843a1f | 5,200 |
from typing import Callable
from typing import Optional
def _get_utf16_setting() -> Callable[[Optional[bool]], bool]:
"""Closure for holding utf16 decoding setting."""
_utf16 = False
def _utf16_enabled(utf16: Optional[bool] = None) -> bool:
nonlocal _utf16
if utf16 is not None:
_utf16 = utf16
return _utf16
return _utf16_enabled | 1f0caeab03047cc847d34266c1ed53eabdf01a10 | 5,201 |
import requests
def get(target: str) -> tuple:
"""Fetches a document via HTTP/HTTPS and returns a tuple containing a boolean indicating the result of the request,
the URL we attempted to contact and the request HTML content in bytes and text format, if successful.
Otherwise, returns a tuple containing a boolean indicating the result of the request, the URL we attempted to
contact and the HTTP status code or exception error output from the request.
:param target:
:return: tuple
"""
if target.startswith('http://') is False and target.startswith('https://') is False:
target = 'http://{0}'.format(target)
try:
request = requests.get(url=target, timeout=3, verify=False)
except Exception as e:
return False, target, e.__str__()
try:
request.raise_for_status()
except requests.exceptions.HTTPError as e:
return False, target, e.__str__()
if request.ok:
return True, request.url, request.content, request.text
return False, request.url, request.status_code | 1d9d650d77776419318cbd204b722d8abdff94c5 | 5,204 |
import torch
def f_score(pr, gt, beta=1, eps=1e-7, threshold=.5):
"""dice score(also referred to as F1-score)"""
if threshold is not None:
pr = (pr > threshold).float()
tp = torch.sum(gt * pr)
fp = torch.sum(pr) - tp
fn = torch.sum(gt) - tp
score = ((1 + beta ** 2) * tp + eps) \
/ ((1 + beta ** 2) * tp + beta ** 2 * fn + fp + eps)
return score | 2c54fd24cd04ac2b41a9d5ca4bf8a7afc5e88640 | 5,205 |
def makeSatelliteDir(metainfo):
"""
Make the directory name for the 'satellite' level.
"""
satDir = "Sentinel-" + metainfo.satId[1]
return satDir | dfbb43f235bc027f25fc9b624097e8f2e0dee4f9 | 5,206 |
import os
import re
def parse_requirements(file_name):
"""Taken from http://cburgmer.posterous.com/pip-requirementstxt-and-setuppy"""
requirements = []
for line in open(os.path.join(os.path.dirname(__file__), "config", file_name), "r"):
line = line.strip()
# comments and blank lines
if re.match(r"(^#)|(^$)", line):
continue
requirements.append(line)
return requirements | f34892163087cecdf84aa7f14da4fc5e56e9f100 | 5,207 |
def grid_id_from_string(grid_id_str):
"""Convert
Parameters
----------
grid_id_str : str
The string grid ID representation
Returns
-------
ret : tuple of ints
A 4-length tuple representation of the dihedral id
"""
return tuple(int(i) for i in grid_id_str.split(',')) | cec058302aae701c1aa28fcb4c4a9d762efa724e | 5,208 |
import re
def safe_filename(name: str, file_ending: str = ".json") -> str:
"""Return a safe version of name + file_type."""
filename = re.sub(r"\s+", "_", name)
filename = re.sub(r"\W+", "-", filename)
return filename.lower().strip() + file_ending | 98a887788046124354676a60b1cf7d990dbbc02f | 5,211 |
def text_editor():
"""Solution to exercise R-2.3.
Describe a component from a text-editor GUI and the methods that it
encapsulates.
--------------------------------------------------------------------------
Solution:
--------------------------------------------------------------------------
The spellchecker is a common component in a text editor. Its methods
might include:
1. run_spellcheck()
2. parse_text()
3. lookup_word()
4. underline_misspelled_word()
"""
return True | 39fd3f41cbc28d333dd5d39fc8d1967164bd7bc4 | 5,213 |
import os
def env_path_contains(path_to_look_for, env_path=None):
"""Check if the specified path is listed in OS environment path.
:param path_to_look_for: The path the search for.
:param env_path: The environment path str.
:return: True if the find_path exists in the env_path.
:rtype: bool
"""
if not path_to_look_for:
return False
if not env_path:
env_path = os.environ['PATH']
path_to_look_for = str.replace(path_to_look_for, os.pathsep, '')
paths = env_path.split(os.pathsep)
for path in paths:
if path == path_to_look_for:
return True
return False | 75d650ed6ef21c479404def73edca5cdee4a4bb4 | 5,215 |
def _epsilon(e_nr, atomic_number_z):
"""For lindhard factor"""
return 11.5 * e_nr * (atomic_number_z ** (-7 / 3)) | 8c6115b77ce3fb4956e5596c400c347e68382502 | 5,216 |
import six
def find_only(element, tag):
"""Return the only subelement with tag(s)."""
if isinstance(tag, six.string_types):
tag = [tag]
found = []
for t in tag:
found.extend(element.findall(t))
assert len(found) == 1, 'expected one <%s>, got %d' % (tag, len(found))
return found[0] | fd4ec56ba3e175945072caec27d0438569d01ef9 | 5,217 |
import os
def GetFilesSplitByOwners(files):
"""Returns a map of files split by OWNERS file.
Returns:
A map where keys are paths to directories containing an OWNERS file and
values are lists of files sharing an OWNERS file.
"""
files_split_by_owners = {}
for action, path in files:
dir_with_owners = os.path.dirname(path)
# Find the closest parent directory with an OWNERS file.
while (dir_with_owners not in files_split_by_owners
and not os.path.isfile(os.path.join(dir_with_owners, 'OWNERS'))):
dir_with_owners = os.path.dirname(dir_with_owners)
files_split_by_owners.setdefault(dir_with_owners, []).append((action, path))
return files_split_by_owners | a7c61be189b628a025bb6cff3e67430dfab122b8 | 5,218 |
def get_url(city):
"""
Gets the full url of the place you want to its weather
You need to obtain your api key from open weather, then give my_api_key the value of your key below
"""
my_api_key = 'fda7542e1133fa0b1b312db624464cf5'
unit = 'metric' # To get temperature in Celsius
weather_query = 'http://api.openweathermap.org/data/2.5/weather?q='
full_query = weather_query + city + '&units=' + unit + '&APPID=' + my_api_key
# This full_query results in smth like
# 'http://api.openweathermap.org/data/2.5/weather?q=Nairobi&units=metric&APPID=YOUR-KEY-HERE'
return full_query | 9454a9ad4a2baacb7988216c486c497a0253056c | 5,219 |
def verse(day):
"""Produce the verse for the given day"""
ordinal = [
'first',
'second',
'third',
'fourth',
'fifth',
'sixth',
'seventh',
'eighth',
'ninth',
'tenth',
'eleventh',
'twelfth',
]
gifts = [
'A partridge in a pear tree.',
'Two turtle doves,',
'Three French hens,',
'Four calling birds,',
'Five gold rings,',
'Six geese a laying,',
'Seven swans a swimming,',
'Eight maids a milking,',
'Nine ladies dancing,',
'Ten lords a leaping,',
'Eleven pipers piping,',
'Twelve drummers drumming,',
]
# First part of the verse is a constant
day_verse = [
f'On the {ordinal[day - 1]} day of Christmas,',
f'My true love gave to me,'
]
# extend takes in a list as an arg, expands it,
# and adds each element to the new list
# day is used to slice gifts, that is then reversed to
# count down
# gifts[:3] would return 'A partridge in a pear tree.',
# 'Two turtle doves,', 'Three French hens,', this slice
# is then reversed added to day_verse
day_verse.extend(reversed(gifts[:day]))
# if there are multiple days, verse needs to have
# 'And a partridge...' not just 'A'
if day > 1:
day_verse[-1] = 'And ' + day_verse[-1].lower()
return '\n'.join(day_verse)
# My first attempt below, using dicts adds to readability,
# same with if block down below, however lists would be
# better off to utilize slicing and get rid of the
# for loop
# ordinal = {
# 1 : 'first',
# 2 : 'second',
# 3 : 'third',
# 4 : 'fourth',
# 5 : 'fifth',
# 6 : 'sixth',
# 7 : 'seventh',
# 8 : 'eighth',
# 9 : 'ninth',
# 10 : 'tenth',
# 11 : 'eleventh',
# 12 : 'twelfth',
# }
# gifts = {
# 1 : 'partridge in a pear tree.',
# 2 : 'Two turtle doves,',
# 3 : 'Three French hens,',
# 4 : 'Four calling birds,',
# 5 : 'Five gold rings,',
# 6 : 'Six geese a laying,',
# 7 : 'Seven swans a swimming,',
# 8 : 'Eight maids a milking,',
# 9 : 'Nine ladies dancing,',
# 10 : 'Ten lords a leaping,',
# 11 : 'Eleven pipers piping,',
# 12 : 'Twelve drummers drumming,',
# }
# day_verse = [
# f'On the {ordinal[day]} day of Christmas,',
# f'My true love gave to me,'
# ]
# for n in range(day, 0, -1):
# if day > 1 and n > 1:
# day_verse.append(f'{gifts[n]}')
# elif day > 1 and n == 1:
# day_verse.append(f'And a {gifts[n]}')
# else:
# day_verse.append(f'A {gifts[n]}')
# return '\n'.join(day_verse) | 027cedf0b1c2108e77e99610b298e1019629c880 | 5,220 |
def mode(lyst):
"""Returns the mode of a list of numbers."""
# Obtain the set of unique numbers and their
# frequencies, saving these associations in
# a dictionary
theDictionary = {}
for number in lyst:
freq = theDictionary.get(number, None)
if freq == None:
# number entered for the first time
theDictionary[number] = 1
else:
# number already seen, increment its freq
theDictionary[number] = freq + 1
# Find the mode by obtaining the maximum freq
# in the dictionary and determining its key
if len(theDictionary) == 0:
return 0
else:
theMaximum = max(theDictionary.values())
for key in theDictionary:
if theDictionary[key] == theMaximum:
return key | bccf7955741ad4258dea7686559b9cb0bf934ab4 | 5,222 |
def visit_bottomup(f, d):
"""Visits and rewrites a nested-dict ``d`` from the bottom to the top,
using the ``f`` predicate."""
if isinstance(d, dict):
return f({k: visit_bottomup(f, v) for (k, v) in d.items()})
else:
return f(d) | 9fb4884f1280afe06a1819a44e3055c1173284b1 | 5,223 |
def valid_flavor_list():
"""
this includes at least 'BIAS', 'LIGHT' based on forDK.tar.gz samples
capitalization inconsistent in forDK.tar.gz samples
need to keep an eye out for additional valid flavors to add
"""
# not sure how to deal with reduced image flavors that I've invented:
# REDUCED, INVVAR, BITMASK
valid_flavors = ['BIAS', 'LIGHT', 'FLAT', 'MASK']
return valid_flavors | b12451ff4725f5fcea3592373ef6e53cbe04b23c | 5,224 |
def pairs_to_annotations(annotation_pairs):
"""
Convert an array of annotations pairs to annotation array.
:param annotation_pairs: list(AnnotationPair) - annotations
:return: list(Annotation)
"""
annotations = []
for ap in annotation_pairs:
if ap.ann1 is not None:
annotations.append(ap.ann1)
if ap.ann2 is not None:
annotations.append(ap.ann2)
return annotations | b0e08889f541b14d596616d08b366f59b7f8ddd3 | 5,226 |
def default_meta(inherit=True):
"""Initialize default meta for particular plugin.
Default Meta is inherited by all children comparing to Meta which is unique
per plugin.
:param inherit: Whatever to copy parents default meta
"""
def decorator(plugin):
plugin._default_meta_init(inherit)
return plugin
return decorator | 174b37f389160c007e7a609a78b5071031970004 | 5,227 |
def get_infection_probas_mean_field(probas, transmissions):
"""
- probas[i,s] = P_s^i(t)
- transmissions = csr sparse matrix of i, j, lambda_ij(t)
- infection_probas[i] = sum_j lambda_ij P_I^j(t)
"""
infection_probas = transmissions.dot(probas[:, 1])
return infection_probas | 70d5585b405bdff54f65bced166dead6ae45d26b | 5,228 |
def binom(n, k):
"""Binomial coefficients for :math:`n choose k`
:param n,k: non-negative integers
:complexity: O(k)
"""
prod = 1
for i in range(k):
prod = (prod * (n - i)) // (i + 1)
return prod | 73e06e4c312f6634d9a97914f330ade845a9ce00 | 5,229 |
def find_project(testrun_url):
"""
Find a project name from this Polarion testrun URL.
:param testrun_url: Polarion test run URL
:returns: project name eg "CEPH" or "ContainerNativeStorage"
"""
url_suffix = testrun_url[59:]
index = url_suffix.index('/')
return url_suffix[:index] | a19019846fa084398a4967cb99417e7aebc90499 | 5,230 |
def c2ip(c2, uname):
""" return complete ip address for c2 with substituted username """
return c2['ip_address'].replace('USER', uname) | c6f79b2330e78c8ebc85a3fb99ce1c5be407f158 | 5,231 |
def t_returns(inv, pfl, prices, date):
""" Computes the total return of a portfolio.
Parameters:
- `inv` : :class:`list` investment session `db` row
- `pfl` : :class:`string` name of the portfolio
- `prices` : :class:`dict` latest investment's ticker prices
- `date` : :class:`string` date of the purchase
Computes the sum of the shares when the invesment was made to the sum of the
shares now. The absolute change and returns are calculated with the same
formulas as in :py:func:`check.returns`
Returns a :class:`dict` containing the total initial price, the new
price, the absolute change, the returns and the date of the purchase.
"""
t_old = sum(map(lambda key: inv[pfl][key]*inv['prc'][key], inv[pfl].keys()))
t_old = round(t_old, 1)
t_new = sum(map(lambda key: inv[pfl][key]*prices[key], inv[pfl].keys()))
t_new = round(t_new, 1)
abs = round(t_new - t_old, 1)
rel = round(((t_new - t_old) / t_old) * 100, 2)
return {'abs': abs, 'rel': rel, 'old': t_old,
'new': t_new, 'qty': 'NA', 'date': date} | 8a928e0806b0e87d2a0539ff905112ad0d3d66ae | 5,232 |
def read_words(file="words.txt"):
"""
Reads a list of words from a file.
There needs to be one word per line, for this to work properly.
Args:
file: the file to read from
Returns:
An array of all the words in the file
"""
with open(file, "r") as f:
return f.read().lower().splitlines() | d3d82c4f9afc7db73b4f82f4715cab9b2e99973c | 5,233 |
def get_intersphinx_label(is_map, cur_project_dir):
"""
The top set of keys in the intersphinx map are shortname labels that intersphinx uses to identify different projects
A sub-tuple in the dict (here invdata[1]) is a list of possible locations for the project's objects.inv file
This utility checks all the locations (only filepath ones) to see if the current project dir name is in the filepath
If a match is found this immediately returns the shortname label, which can be used to locate current project data in the intersphinx map
This is a 'good guess' to determine which intersphinx entry relates to the current project
"""
for shortname, invdata in is_map.items():
for invpath in invdata[1]:
if invpath and not invpath.startswith("http"):
if cur_project_dir in invpath:
return shortname
return None | 87115f45c966b838566d6909d3a66af5359a2a1d | 5,234 |
import asyncio
async def run_command(*args, **kwargs):
"""Shortcut for asyncronous running of a command"""
fn = asyncio.subprocess.create_subprocess_exec
if kwargs.pop("shell", False):
fn = asyncio.subprocess.create_subprocess_shell
check = kwargs.pop("check", False)
process = await fn(*args, **kwargs)
stdout, stderr = await process.communicate()
if check:
if process.returncode != 0:
raise Exception("Command failed: %s" % args)
return process.returncode, stdout, stderr | 948ccb127afb8cf1c2a1731a5198bc493a1e9fe4 | 5,236 |
def convert_2d_list_to_string(data):
"""Utility function."""
s = ''
for row in data:
c = '{'
for e in row:
c += str(e) + ','
s += c[:-1] + '},\n'
return s[:-2] | a6ac2c05f481a339c68ffc3543baba1f1d0d5e8e | 5,238 |
def PyMapping_Keys(space, w_obj):
"""On success, return a list of the keys in object o. On failure, return NULL.
This is equivalent to the Python expression o.keys()."""
return space.call_function(space.w_list,
space.call_method(w_obj, "keys")) | 452b384a421fd675a53ff20d868b8f7353eb3d79 | 5,240 |
import re
def is_arabicrange(text):
""" Checks for an Arabic Unicode block characters
@param text: input text
@type text: unicode
@return: True if all charaters are in Arabic block
@rtype: Boolean
"""
if re.search(u"([^\u0600-\u06ff\ufb50-\ufdff\ufe70-\ufeff\u0750-\u077f])", text):
return False
return True | 70862e901236eb94fec95ac6f7eb673729397e49 | 5,241 |
def str_repeat(space, s, repeat):
"""Repeat a string."""
return space.newstr(s * repeat) | 3e947da1fa3bf403b0836bd4e7ae0052d310636e | 5,242 |
import json
def file_to_dict(file: str):
"""Dump json file to dictionary"""
try:
with open(file) as json_file:
return json.load(json_file)
except json.decoder.JSONDecodeError:
print(f'File {file} is not a valid json file. Returning empty dict')
return {}
except FileNotFoundError:
print(f'File {file} does not exist. Returning empty dict')
return {} | 2265f2ad5e10931e93a08bafd8e8a7e20c91ae93 | 5,243 |
from typing import Dict
def basic_extractor(
data: Dict,
) -> list:
"""
Returns list of the total_recieved token, the total sent token and the number of transactions the wallet participated in.
"""
return [data["total_received"],data["total_sent"],data["n_tx"]] | 946611423cf98c6104fa49e0ccb82308d741f900 | 5,245 |
def replace_if_present_else_append(
objlist,
obj,
cmp=lambda a, b: a == b,
rename=None):
"""
Add an object to a list of objects, if that obj does
not already exist. If it does exist (`cmp(A, B) == True`),
then replace the property in the property_list. The names
are compared in a case-insensitive way.
Input
=====
:objlist, list: list of objects.
:obj, object: object to Add
Options
=======
:cmp, (bool) cmp (A, B): compares A to B. If True, then the
objects are the same and B should replace A. If False,
then B should be appended to `objlist`.
:param rename: Should A be renamed instead of overwritten? If not False,
then rename should be a unary function that changes the name of A.
:type rename: bool or unary function
Output
======
List is modified in place. A reference to the list is returned.
"""
print(type (objlist))
for i in range(len(objlist)):
# was a matching object found in the list?
if cmp(objlist[i], obj):
# if so, should the old object be renamed?
if rename is not None:
newA = rename(objlist[i])
# is the renamed object distinct from the object
# (`obj`) that is to be added to the list?
if cmp(newA, obj):
msg = '`rename` does not make {} unique.'.format(
str(objlist[i])[:32])
raise ValueError(msg)
# now that we have newA, replace the original
# object in the list with `obj`...
objlist[i] = obj
#... and replace_if_present_else_append newA.
replace_if_present_else_append(
objlist, newA, cmp=cmp, rename=rename)
# if the existing object should not be renamed,
# simply replace.
else:
objlist[i] = obj
# short circuit to exit the for loop and the function.
return objlist
# if we get here, then the property was not found. Append. HI
objlist.append(obj)
return objlist | f76b3a76fe973ef91176f8ff4afd34d52ce89317 | 5,247 |
def rating_value(value):
"""Check that given value is integer and between 1 and 5."""
if 1 <= int(value) <= 5:
return int(value)
raise ValueError("Expected rating between 1 and 5, but got %s" % value) | cadb45a131a423940e1b3a763935f5e40d84285b | 5,248 |
def find_largest_digit_helper(n, max_n=0):
"""
:param n: int,待判別整數
:param max_n: int,當下最大整數值
:return: int,回傳n中最大之 unit 整數
"""
# 特殊情況:已達最大值9,就不需再比了
if n == 0 or max_n == 9:
return max_n
else:
# 負值轉換為正值
if n < 0:
n *= -1
# 用餘數提出尾數
unit_n = n % 10
# 尾數比現在最大值
if unit_n > max_n:
max_n = unit_n
# 因變數會隨 Recursive 結束而釋出,所以需將 function 放在回傳上
return find_largest_digit_helper(n//10, max_n) | cd60a0cdb7cdfba6e2374a564bb39f1c95fe8931 | 5,249 |
def sous_tableaux(arr: list, n: int) -> list:
"""
Description:
Découper un tableau en sous-tableaux.
Paramètres:
arr: {list} -- Tableau à découper
n: {int} -- Nombre d'éléments par sous-tableau
Retourne:
{list} -- Liste de sous-tableaux
Exemple:
>>> sous_tableaux([0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1], 3)
[[0, 0, 0], [1, 0, 0], [1, 1, 0], [1, 1, 1], [0, 1, 1], [0, 0, 1]]
"""
if n <= 0:
return []
if len(arr) < n:
return []
return [arr[i:i + n] for i in range(0, len(arr), n)] | 4f0a627ea00beafb5b6bc77490e71631e8a55e28 | 5,251 |
import json
def normalize_cell_value(value):
"""Process value for writing into a cell.
Args:
value: any type of variable
Returns:
json serialized value if value is list or dict, else value
"""
if isinstance(value, dict) or isinstance(value, list):
return json.dumps(value)
return value | 8ef421814826c452cdb6528c0645133f48bd448a | 5,254 |
def mongodb():
"""
Simple form to get and set a note in MongoDB
"""
return None | a6de90429bb3ad3e23191e52e1b43484435747f9 | 5,255 |
import os
def list_subdir(path):
""" list all subdirectories given a directory"""
return [o for o in os.listdir(path) if os.path.isdir(path / o)] | d99760b6ec914d59cdbf5b8f4bc2bb8b0a30f9e8 | 5,257 |
def genBinaryFileRDD(sc, path, numPartitions=None):
"""
Read files from a directory to a RDD.
:param sc: SparkContext.
:param path: str, path to files.
:param numPartition: int, number or partitions to use for reading files.
:return: RDD with a pair of key and value: (filePath: str, fileData: BinaryType)
"""
numPartitions = numPartitions or sc.defaultParallelism
rdd = sc.binaryFiles(
path, minPartitions=numPartitions).repartition(numPartitions)
#rdd = rdd.map(lambda x: (x[0], bytearray(x[1])))
return rdd | 85ef3c657b932946424e2c32e58423509f07ceae | 5,259 |
def extract_year_month_from_key(key):
"""
Given an AWS S3 `key` (str) for a file,
extract and return the year (int) and
month (int) specified in the key after
'ano=' and 'mes='.
"""
a_pos = key.find('ano=')
year = int(key[a_pos + 4:a_pos + 8])
m_pos = key.find('mes=')
month = int(key[m_pos + 4:m_pos + 5])
return year, month | b52dc08d393900b54fca3a4939d351d5afe0ef3c | 5,260 |
def parentheses_cleanup(xml):
"""Clean up where parentheses exist between paragraph an emphasis tags"""
# We want to treat None's as blank strings
def _str(x):
return x or ""
for em in xml.xpath("//P/*[position()=1 and name()='E']"):
par = em.getparent()
left, middle, right = _str(par.text), _str(em.text), _str(em.tail)
has_open = '(' in left[-1:] + middle[:1]
has_close = ')' in middle[-1:] + right[:1]
if not left.endswith('(') and middle.startswith('(') and has_close:
# Move '(' out
par.text = _str(par.text) + "("
em.text = em.text[1:]
if middle.endswith(')') and not right.startswith(')') and has_open:
# Move ')' out
em.text = em.text[:-1]
em.tail = ")" + _str(em.tail) | b5a476cd6fd9b6a2ab691fcec63a33e6260d48f2 | 5,261 |
import numpy
def filter_atoms(coordinates, num_atoms=None, morphology="sphere"):
"""
Filter the atoms so that the crystal has a specific morphology with a given number of atoms
Params:
coordinates (array): The atom coordinates
num_atoms (int): The number of atoms
morphology (str): The morphology of the crystal
Returns:
array: The filtered coordinates
"""
def filter_atoms_sphere(coordinates, num_atoms):
# Get the centre of mass
x = coordinates["x"]
y = coordinates["y"]
z = coordinates["z"]
c = numpy.array([x, y, z]).T
centre_of_mass = numpy.mean(c, axis=0)
# Compute all square distances
sq_distance = numpy.sum((c - centre_of_mass) ** 2, axis=1)
# Get the selection of the closest n atoms
index = numpy.argsort(sq_distance)
return coordinates[index[0:num_atoms]]
# If the number of atoms is not set then return as is
if num_atoms is None or morphology is None:
return coordinates
# Check the number of atoms
assert len(coordinates) >= num_atoms
# Filter the atoms into the given morphology
return {"sphere": filter_atoms_sphere}[morphology](coordinates, num_atoms) | 9763f2c7b14a26d089bf58a4c7e82e2d4a0ae2bd | 5,262 |
def create_list(value, sublist_nb, sublist_size):
"""
Create a list of len sublist_size, filled with sublist_nb sublists. Each sublist is filled with the value value
"""
out = []
tmp = []
for i in range(sublist_nb):
for j in range(sublist_size):
tmp.append(value)
out.append(tmp)
tmp = []
return out | 1ecf6c88390167584d1835430c359a7ed6d6b40b | 5,264 |
def gen_r_cr():
"""
Generate the R-Cr table.
"""
r_cr = [0] * 256
for i in range(256):
r_cr[i] = int(1.40199 * (i - 128))
return r_cr | 43e014bb62c40d038c5fbd124e834e98e9edb5e3 | 5,265 |
def getGpsTime(dt):
"""_getGpsTime returns gps time (seconds since midnight Sat/Sun) for a datetime
"""
total = 0
days = (dt.weekday()+ 1) % 7 # this makes Sunday = 0, Monday = 1, etc.
total += days*3600*24
total += dt.hour * 3600
total += dt.minute * 60
total += dt.second
return(total) | 16caa558741d8d65b4b058cf48a591ca09f82234 | 5,266 |
def _round_to_4(v):
"""Rounds up for aligning to the 4-byte word boundary."""
return (v + 3) & ~3 | c79736b4fe9e6e447b59d9ab033181317e0b80de | 5,267 |
def lower_threshold_projection(projection, thresh=1e3):
"""
An ugly but effective work around to get a higher-resolution curvature
of the great-circle paths. This is useful when plotting the great-circle
paths in a relatively small region.
Parameters
----------
projection : class
Should be one of the cartopy projection classes, e.g.,
cartopy.crs.Mercator
thresh : float
Smaller values achieve higher resolutions. Default is 1e3
Returns
-------
Instance of the input (`projection`) class
Example
-------
proj = lower_threshold_projection(cartopy.crs.Mercator, thresh=1e3)
Note that the cartopy.crs.Mercator was not initialized (i.e., there are no
brackets after the word `Mercator`)
"""
class LowerThresholdProjection(projection):
@property
def threshold(self):
return thresh
return LowerThresholdProjection() | 165c657f1ec875f23df21ef412135e27e9e443c6 | 5,269 |
def has_param(param):
"""
Generate function, which will check `param` is in html element.
This function can be used as parameter for .find() method in HTMLElement.
"""
def has_param_closure(element):
"""
Look for `param` in `element`.
"""
if element.params.get(param, "").strip():
return True
return False
return has_param_closure | 6800725c378714b5161772f0a2f9ef89ae278400 | 5,273 |
def get_all_keys(data):
"""Get all keys from json data file"""
all_keys = set(data[0].keys())
for row in data:
all_keys = set.union(all_keys, set(row.keys()))
return list(all_keys) | 5532af993f87bf4e00c7bec13eb971e0114e736c | 5,274 |
def _ngl_write_atom(
num,
species,
x,
y,
z,
group=None,
num2=None,
occupancy=1.0,
temperature_factor=0.0,
):
"""
Writes a PDB-formatted line to represent an atom.
Args:
num (int): Atomic index.
species (str): Elemental species.
x, y, z (float): Cartesian coordinates of the atom.
group (str): A...group name? (Default is None, repeat elemental species.)
num2 (int): An "alternate" index. (Don't ask me...) (Default is None, repeat first number.)
occupancy (float): PDB occupancy parameter. (Default is 1.)
temperature_factor (float): PDB temperature factor parameter. (Default is 0.
Returns:
(str): The line defining an atom in PDB format
Warnings:
* The [PDB docs](https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/tutorials/pdbintro.html) indicate that
the xyz coordinates might need to be in some sort of orthogonal basis. If you have weird behaviour,
this might be a good place to investigate.
"""
if group is None:
group = species
if num2 is None:
num2 = num
return "ATOM {:>6} {:>4} {:>4} {:>5} {:10.3f} {:7.3f} {:7.3f} {:5.2f} {:5.2f} {:>11} \n".format(
num, species, group, num2, x, y, z, occupancy, temperature_factor, species
) | 92a5d62f3c4f6d927aa5a6010b217344d0d241d3 | 5,275 |
def get_insert_components(options):
""" Takes a list of 2-tuple in the form (option, value) and returns a
triplet (colnames, placeholders, values) that permits making a database
query as follows: c.execute('INSERT INTO Table ({colnames}) VALUES
{placeholders}', values). """
col_names = ','.join(opt[0] for opt in options)
placeholders = ','.join('?' for i in range(len(options)))
if len(col_names) > 0:
col_names = ',' + col_names
if len(placeholders) > 0:
placeholders = ',' + placeholders
values = tuple(opt[1] for opt in options)
return col_names, placeholders, values | 3e1deecd39b0e519124278f47713d5b3a1571815 | 5,276 |
def enumerate_trials(perievents):
"""
adds an index to perievents_2D that counts the number of trials per session and event
starting with 1, removes FrameCounter index
:param perievents: perievents df, non-column based format
:return: perievents df with additional index Trial
"""
# unstack indices to make several counts for each event and session
perievents = perievents.reset_index('FrameCounter', drop=True)
idx = list(perievents.index.names)
perievents['Trial'] = perievents.groupby(idx).cumcount() + 1
return perievents.set_index('Trial', append=True) | d469a823b6af60d305dc37b1ce42176f16b21f8d | 5,278 |
import torch
def add_dims_right(tensor, ndims, right_indent=0):
""" Add empty dimensions to the right of tensor shape
"""
assert right_indent >= 0
for i in range(ndims):
tensor = torch.unsqueeze(tensor, -1-right_indent)
return tensor | 7d4c1b47eb659f0bcfc9dbcf7f7b04c1ccbafb80 | 5,279 |
def get_submodel_name(history = 60, lag = 365, num_neighbors = 20, margin_in_days = None, metric = "cos"):
"""Returns submodel name for a given setting of model parameters
"""
submodel_name = '{}-autoknn-hist{}-nbrs{}-margin{}-lag{}'.format(metric,
history,
num_neighbors,
margin_in_days,
lag)
return submodel_name | 69fa276a86c39f342ffceba72408ab6970dd0a41 | 5,280 |
def compress_public_key(public_key):
"""Compresses a given uncompressed public key.
:param public_key: the key to compress, as bytes
:return: the compressed key, as bytes
"""
if public_key[0] != 0x04 or len(public_key) != 65:
raise ValueError('invalid uncompressed public key')
# We take the y coordinate
y = int.from_bytes(public_key, 'big')
# And check its parity, to add the appropriate byte
if y % 2:
return b'\x03' + public_key[1:33]
else:
return b'\x02' + public_key[1:33] | 8ff7c609a216e29b9cc31584e3cd824f7ab2879d | 5,281 |
def forward(layers, x):
"""
function for performing forward propagation in all the layers
Parameters:
layers : list
x : numpy array
Returns:
list : (contains output of all layers in the form of numpy arrays)
"""
conv = layers[0]
pool = layers[1]
dense = layers[2]
conv_output = conv.forward((x/255)- 0.5)
pool_output = pool.forward(conv_output)
dense_output = dense.forward(pool_output)
return [conv_output, pool_output, dense_output] | 2e7ae3c8ab513ca5f138c77a868783dc942063ee | 5,283 |
def __same_axes(x_axis, y_axis, xlim, ylim):
"""Check if two axes are the same, used to determine squared plots"""
axes_same_and_not_none = (x_axis == y_axis) and (x_axis is not None)
axes_same_lim = xlim == ylim
return axes_same_and_not_none and axes_same_lim | ce7538ffa17e15df0fc055809103bf69af88e7aa | 5,284 |
import csv
def create_column_dicts(path_to_input):
"""Creates dictionaries: {column_index, column_name} and
{column_name, column_index}"""
cols = {}
with open(path_to_input, newline="") as csvfile:
inputreader = csv.reader(csvfile, delimiter=",")
row = next(inputreader)
for i, col in enumerate(row):
cols[i] = col
# reverse dictionary: {column_name, column_index}
cols_inds = {v: k for k, v in cols.items()}
return cols, cols_inds | 13bf2e3c99fea9b1d2580b7bfc34e445af8b7e98 | 5,285 |
import string
def get_sentiment(text, word_map):
"""
Identifies the overall sentiment of the text by taking the average
of each word.
Note: Words not found in the word_map dict are give zero value.
"""
# remove all punctuation
text = text.translate(str.maketrans("", "", string.punctuation))
# split into tokens
text = text.split()
total_score, length = 0, 0
# get score for each word, put zero if not found
scores = (word_map.get(token.lower(), 0) for token in text)
# find average score
for score in scores:
total_score += score
length += 1
return total_score / length | ee9e57c999539c0126e5c0d38711a617e82dab10 | 5,286 |
def handle_all_serials(oid, *args):
"""Return dict of oid to serialno from store() and tpc_vote().
Raises an exception if one of the calls raised an exception.
The storage interface got complicated when ZEO was introduced.
Any individual store() call can return None or a sequence of
2-tuples where the 2-tuple is either oid, serialno or an
exception to be raised by the client.
The original interface just returned the serialno for the
object.
The updated multi-commit API returns nothing from store(), and
returns a sequence of resolved oids from tpc_vote.
NOTE: This function is removed entirely in ZODB 5.
"""
d = {}
for arg in args:
if isinstance(arg, bytes):
d[oid] = arg
elif arg:
for t in arg:
if isinstance(t, bytes):
# New protocol. The caller will use the tid
# returned from tpc_finish if we return a dict
# missing the oid.
pass
else:
oid, serial = t
if not isinstance(serial, bytes):
raise serial # error from ZEO server
d[oid] = serial
return d | f2ff56d43f40f4bad5a802acbbe7a8fa869831d3 | 5,288 |
def remove_null_fields(data):
"""Remove all keys with 'None' values"""
for k, v in data.items():
if isinstance(v, dict):
remove_null_fields(v)
if isinstance(v, list):
for element in v:
remove_null_fields(element)
if not data[k]:
del data[k]
return data | 3dc90d215d899afb2316acb92b5755fd93da204f | 5,289 |
def find_recipes(rounds):
"""
Calculate the last ten recipes in the sequence.
:param rounds: the number of rounds
:return: a list of the last 10 recipes in the sequence
>>> find_recipes(5)
[0, 1, 2, 4, 5, 1, 5, 8, 9, 1]
>>> find_recipes(18)
[9, 2, 5, 1, 0, 7, 1, 0, 8, 5]
>>> find_recipes(2018)
[5, 9, 4, 1, 4, 2, 9, 8, 8, 2]
"""
recipes = [3, 7]
elf1 = 0
elf2 = 1
while len(recipes) < rounds + 10:
new_recipe = recipes[elf1] + recipes[elf2]
for i in str(new_recipe):
recipes.append(int(i))
elf1 = (elf1 + recipes[elf1] + 1) % len(recipes)
elf2 = (elf2 + recipes[elf2] + 1) % len(recipes)
return recipes[rounds:rounds+10] | 281cd92094cf9c4be608de5a57075f7f38a531d0 | 5,290 |
def query_abs_over_wiki(abstract):
"""
query from es with the document is wiki_entity
"""
return {
"track_total_hits": "true",
"version": "true",
"size": 1000,
"sort": [{
"_score": {
"order": "desc"
}
}],
"_source": {
"includes": [
"entity",
"stem",
"description"
]
},
"stored_fields": [
"*"
],
"script_fields": {},
"docvalue_fields": [],
"query": {
"match": {
"entity": abstract
}
},
"highlight": {
"pre_tags": [
"@@"
],
"post_tags": [
"@@"
],
"fields": {
"*": {}
},
"fragment_size": 2147483647
}
} | 7e35af5b210cd3485113d7cb5b72f7dd32745d94 | 5,291 |
from six.moves import urllib
def has_internet():
"""
Test if Internet is available.
Failure of connecting to the site "http://www.sagemath.org" within a second
is regarded as internet being not available.
EXAMPLES::
sage: from sage.doctest.external import has_internet
sage: has_internet() # random
True
"""
try:
urllib.request.urlopen("http://www.sagemath.org",timeout=1)
return True
except urllib.error.URLError:
return False | d9cacc17a315abe85022e9a889d4a1da3c9b6a49 | 5,293 |
def read_warfle_text(path: str) -> str:
"""Returns text from *.warfle files"""
try:
with open(path, "r") as text:
return text.read()
except Exception as e:
raise Exception(e) | ba15fe6a62fbefe492054b0899dcdbff35462154 | 5,294 |
import os
def avi_common_argument_spec():
"""
Returns common arguments for all Avi modules
:return: dict
"""
return dict(
controller=dict(default=os.environ.get('AVI_CONTROLLER', '')),
username=dict(default=os.environ.get('AVI_USERNAME', '')),
password=dict(default=os.environ.get('AVI_PASSWORD', ''), no_log=True),
tenant=dict(default='admin'),
tenant_uuid=dict(default='')) | 5bed3408a2f843053271656d98f5919feaee4b02 | 5,295 |
import os
def get_largest_files_new(directory: str, num: int) -> list:
"""
Return a sorted list containing up to num of the largest files from the directory.
Preconditions:
- num > 0
"""
# ACCUMULATOR: Priority queue so far
list_so_far = []
for root in os.walk(directory):
path = root[0]
for file in root[2]:
try:
list_so_far.append((os.path.getsize(os.path.join(path, file)), os.path.join(path, file)))
except FileNotFoundError:
print("Couldn't find file at ", os.path.join(path, file))
return sorted(list_so_far)[-num:] | a9966a03166d1102cc2567fbb5bc19f336893b35 | 5,296 |
def _full_url(url):
"""
Assemble the full url
for a url.
"""
url = url.strip()
for x in ['http', 'https']:
if url.startswith('%s://' % x):
return url
return 'http://%s' % url | cfb56cf98d3c1dd5ee2b58f53a7792e927c1823f | 5,297 |
def encodeUcs2(text):
""" UCS2 text encoding algorithm
Encodes the specified text string into UCS2-encoded bytes.
@param text: the text string to encode
@return: A bytearray containing the string encoded in UCS2 encoding
@rtype: bytearray
"""
result = bytearray()
for b in map(ord, text):
result.append(b >> 8)
result.append(b & 0xFF)
return result | da2243ffc959db64a196a312522f967dce1da9d1 | 5,298 |
import argparse
def get_parser():
""" Builds the argument parser for the program. """
parser = argparse.ArgumentParser()
parser.add_argument('-c', type=str, dest='clf_key', default='dt', choices=['dt', 'xts', 'rf'], help='A classifier to use.')
parser.add_argument('-m', type=str, dest='mode', default='test', choices=['cv', 'test'], help='Mode to run the program in (cross-validation or test).')
parser.add_argument('-k', type=int, dest='cv', default=5, help='Number of folds in KFold cross-validation.')
parser.add_argument('-d', '--data', type=str, dest='data_name', default='econbiz', help='Name of the dataset to use (econbiz or pubmed).')
parser.add_argument('-f', type=float, dest='data_fraction', default=0.1, help='The fraction of the data to be used (0, 1>.')
parser.add_argument('-t', type=float, dest='test_size', default=0.1, help='Test size (0, 1>.')
parser.add_argument('--max_depth', type=int, dest='max_depth', default=None, help='The maximum depth of the tree.')
parser.add_argument('--min_ss', type=int, dest='min_ss', default=2, help='The minimum number of samples required to split an internal tree node.')
parser.add_argument('--max_features', type=str, dest='max_features', default=None, help='The number of features to consider when looking for the best split in the tree.')
parser.add_argument('-n', type=int, dest='n_estimators', default=10, help='The number of estimators in the ensemble.')
parser.add_argument('-j', type=int, dest='n_jobs', default=-1, help='The number of jobs to run in parallel.')
parser.add_argument('-v', type=int, dest='verbose', default=0, help='Verbosity of the program.')
parser.add_argument('-b', '--batch', dest='is_batch_mode', action='store_true', default=False, help='Whether the program runs in a batch mode (affects file locations).')
return parser | 6246e9105d1435715b5297afe87de15288b5f7ea | 5,299 |
def units_to_msec(units, resolution):
"""Convert BLE specific units to milliseconds."""
time_ms = units * float(resolution) / 1000
return time_ms | 49588d7961593b2ba2e57e1481d6e1430b4a3671 | 5,300 |
def is_data(data):
""" Check if a packet is a data packet. """
return len(data) > 26 and ord(data[25]) == 0x08 and ord(data[26]) in [0x42, 0x62] | edb2a6b69fde42aef75923a2afbd5736d1aca660 | 5,302 |
def get_tf_metric(text):
"""
Computes the tf metric
Params:
text (tuple): tuple of words
Returns:
tf_text: format: ((word1, word2, ...), (tf1, tf2, ...))
"""
counts = [text.count(word) for word in text]
max_count = max(counts)
tf = [counts[i]/max_count for i in range(0, len(counts))]
return text, tf | 6397e150fa55a056358f4b28cdf8a74abdc7fdb6 | 5,303 |
def getKeyFromValue(dictionary, value):
"""
dictionary内に指定したvalueを持つKeyを検索して取得
"""
keys = [key for key, val in dictionary.items() if val == value]
if len(keys) > 0:
return keys[0]
return None | d2bb42938a809677f4a96e869e9e03c194a28561 | 5,306 |
import struct
def incdata(data, s):
"""
add 's' to each byte.
This is useful for finding the correct shift from an incorrectly shifted chunk.
"""
return b"".join(struct.pack("<B", (_ + s) & 0xFF) for _ in data) | 89633d232d655183bee7a20bd0e1c5a4a2cc7c05 | 5,308 |
import string
def strip_non_printable(value):
"""
Removes any non-printable characters and adds an indicator to the string
when binary characters are fonud
:param value: the value that you wish to strip
"""
if value is None:
return None
# Filter all non-printable characters
# (note that we must use join to account for the fact that Python 3
# returns a generator)
printable_value = ''.join(filter(lambda c: c in string.printable, value))
if printable_value != value:
if printable_value:
printable_value += ' '
printable_value += '(contains binary)'
return printable_value | 279ea769bd7d57ee3e4feb9faf10f2a3af3aa657 | 5,309 |
import math
def tangent_circle(dist, radius):
"""
return tangent angle to a circle placed at (dist, 0.0) with radius=radius
For non-existing tangent use 100 degrees.
"""
if dist >= radius:
return math.asin(radius/float(dist))
return math.radians(100) | bcde88456a267239566f22bb6ea5cf00f64fa08e | 5,310 |
def state_transitions():
"""Simplified state transition dictionary"""
return {
"E": {"A": {"(0, 9)": 1}},
"A": {"I": {"(0, 9)": 1}},
"I": {"H": {"(0, 9)": 1}},
"H": {"R": {"(0, 9)": 1}}
} | f8c79f8071f2b61ceacaacf3406a198b2c54c917 | 5,311 |
from datetime import datetime
def TimeSec():
"""[Takes current time in and convert into seconds.]
Returns:
[float]: [Time in seconds]
"""
now = datetime.now()
return now.second+(now.minute*60)+(now.hour*60*60) | 58892b89feb05a56c27d4fd62ba174f9d1c09591 | 5,313 |
import re
def server_version(headers):
"""Extract the firmware version from HTTP headers."""
version_re = re.compile(r"ServerTech-AWS/v(?P<version>\d+\.\d+\w+)")
if headers.get("Server"):
match = version_re.match(headers["Server"])
if match:
return match.group("version") | 24151f3898430f5395e69b4dd7c42bd678626381 | 5,314 |
def es_subcadena(adn1, adn2):
"""
(str, str) -> bool
>>> es_subcadena('gatc', 'tta')
False
>>> es_subcadena('gtattt', 'atcgta')
False
:param:adn1:str:primera cadena a comparar
:param:adn2:str:segunda cadena a comparar
:return:bool:verificacion si una es subcadena de la otra
"""
if adn2 in adn1:
return True
else:
return False | 9c3605e74e1c9dbf227695a4f0f6431cc845a5f1 | 5,315 |
def get_labels_and_features(nested_embeddings):
""" returns labels and embeddings
"""
x = nested_embeddings[:,:-1]
y = nested_embeddings[:,-1]
return x,y | 302505bd3aa769570fa602760f7da1ddd017e940 | 5,316 |
def parse_locator(src):
""" (src:str) -> [pathfile:str, label:either(str, None)]
"""
pathfile_label = src.split('#')
if len(pathfile_label)==1:
pathfile_label.append(None)
if len(pathfile_label)!=2:
raise ValueError('Malformed src: %s' % (src))
return pathfile_label | 970bc1e2e60eec4a54cd00fc5984d22ebc2b8c7a | 5,317 |
def detect_seperator(path, encoding):
"""
:param path: pathlib.Path objects
:param encoding: file encoding.
:return: 1 character.
"""
# After reviewing the logic in the CSV sniffer, I concluded that all it
# really does is to look for a non-text character. As the separator is
# determined by the first line, which almost always is a line of headers,
# the text characters will be utf-8,16 or ascii letters plus white space.
# This leaves the characters ,;:| and \t as potential separators, with one
# exception: files that use whitespace as separator. My logic is therefore
# to (1) find the set of characters that intersect with ',;:|\t' which in
# practice is a single character, unless (2) it is empty whereby it must
# be whitespace.
text = ""
for line in path.open('r', encoding=encoding): # pick the first line only.
text = line
break
seps = {',', '\t', ';', ':', '|'}.intersection(text)
if not seps:
if " " in text:
return " "
else:
raise ValueError("separator not detected")
if len(seps) == 1:
return seps.pop()
else:
frq = [(text.count(i), i) for i in seps]
frq.sort(reverse=True) # most frequent first.
return frq[0][-1] | 8436359a602d2b8caf72a6dbdac4870c502d1bad | 5,318 |
from typing import List
from typing import Optional
def check(s: str) -> None:
"""
Checks if the given input string of brackets are balanced or not
Args:
s (str): The input string
"""
stack: List[str] = []
def get_opening(char: str) -> Optional[str]:
"""
Gets the corresponding opening braces of the given input character.
Args:
char (str): The closing braces
Returns:
str: The corresponding open braces.
"""
if char == ")":
return "("
if char == "]":
return "["
if char == "}":
return "{"
return None
# for every character in the given input string
for char in s:
# if the string is an opening brace, push to stack
if char in ("(", "{", "["):
stack.append(char)
else:
try:
# if the top element of the stack is the same as
# the corresponding opening bracket of the current
# character, pop the element
if get_opening(char) == stack[-1]:
stack.pop()
# else, the input string is unbalanced, break out of the
# loop
else:
break
except IndexError:
break
else:
# if the loop terminated normally, and stack is empty, print success message
if len(stack) == 0:
print("Balanced.")
# else print unsuccessful message
else:
print("Not balanced.")
return
# since at this point the loop terminated abnormally,
# print unsuccessful message
print("Not balanced.") | 720018e5b39e070f48e18c502e8a842feef32840 | 5,319 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.