content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def parse_anchor_body(anchor_body):
"""
Given the body of an anchor, parse it to determine what topic ID it's
anchored to and what text the anchor uses in the source help file.
This always returns a 2-tuple, though based on the anchor body in the file
it may end up thinking that the topic ID and the text are identical.
"""
c_pos = anchor_body.find(':')
if c_pos >= 0:
id_val = anchor_body[:c_pos]
anchor_body = anchor_body[c_pos+1:]
id_val = id_val or anchor_body
else:
id_val = anchor_body
return (id_val.casefold().rstrip(), anchor_body.strip()) | 5e86ac489727ec4da69f7ca14152cb79da541f3a | 2,023 |
def range_overlap(range1, range2):
"""
determine range1 is within range2 (or is completely the same)
:param range range1: a range
:param range range2: another range
:rtype: bool
:return: True, range1 is subset of range2, False, not the case
"""
result = all([
range1.start >= range2.start,
range1.stop <= range2.stop
])
return result | 3df4edf59ea473ad7b832256443a1e4e8c7e0ce9 | 2,024 |
def morris_traversal(root):
"""
Morris(InOrder) travaersal is a tree traversal algorithm that does not employ
the use of recursion or a stack. In this traversal, links are created as
successors and nodes are printed using these links.
Finally, the changes are reverted back to restore the original tree.
root = Node(4)
temp = root
temp.left = Node(2)
temp.right = Node(8)
temp = temp.left
temp.left = Node(1)
temp.right = Node(5)
"""
inorder_traversal = []
# set current to root of binary tree
current = root
while current is not None:
if current.left is None:
inorder_traversal.append(current.data)
current = current.right
else:
# find the previous (prev) of curr
previous = current.left
while previous.right is not None and previous.right != current:
previous = previous.right
# make curr as right child of its prev
if previous.right is None:
previous.right = current
current = current.left
# firx the right child of prev
else:
previous.right = None
inorder_traversal.append(current.data)
current = current.right
return inorder_traversal | 1770e1df3811edb6bebb64729e2eddef34348dc4 | 2,025 |
def new_func(message):
"""
new func
:param message:
:return:
"""
def get_message(message):
"""
get message
:param message:
:return:
"""
print('Got a message:{}'.format(message))
return get_message(message) | c5f23b0cd3cebfdd2d36398a3ace18342d6de37c | 2,027 |
def set_namespace_root(namespace):
"""
Stores the GO ID for the root of the selected namespace.
Parameters
----------
namespace : str
A string containing the desired namespace. E.g. biological_process, cellular_component
or molecular_function.
Returns
-------
list
The list of GO ID's of the root terms of the selected namespace.
"""
if namespace == 'biological_process':
namespace_list = ['GO:0008150']
elif namespace == 'cellular_component':
namespace_list = ['GO:0005575']
elif namespace == 'molecular_function':
namespace_list = ['GO:0003674']
else:
namespace_list = ['GO:0008150', 'GO:0005575', 'GO:0003674']
return namespace_list | 2719b2766912ad8caf3427513c7affa1cdb92eb3 | 2,028 |
def count_words(my_str):
"""
count number of word in string sentence by using string spilt function.
INPUT - This is testing program
OUTPUT - 4
"""
my_str_list = my_str.split(" ")
return len(my_str_list) | 731291937205fd0b9cb9153b4ee95d42416a5124 | 2,029 |
def ref_from_rfgc(sample):
"""
rename columns from RFGC catalog
"""
ref = dict(
ra = sample['RAJ2000'],
dec = sample['DEJ2000'],
a = sample['aO'],
b = sample['bO'],
PA = sample['PA']
)
return ref | f93f4dfefc107c082f5454a59fb7a145ab9e9e60 | 2,030 |
import os
import bz2
import lzma
import gzip
def file_open(filename, mode='r', encoding='utf8'):
"""Open file with implicit gzip/bz2 support
Uses text mode by default regardless of the compression.
In write mode, creates the output directory if it does not exist.
"""
if 'w' in mode and not os.path.isdir(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
if filename.endswith('.bz2'):
if mode in {'r', 'w', 'x', 'a'}:
mode += 't'
return bz2.open(filename, mode=mode, encoding=encoding)
if filename.endswith('.xz'):
if mode in {'r', 'w', 'x', 'a'}:
mode += 't'
return lzma.open(filename, mode=mode, encoding=encoding)
if filename.endswith('.gz'):
if mode in {'r', 'w', 'x', 'a'}:
mode += 't'
return gzip.open(filename, mode=mode, encoding=encoding)
return open(filename, mode=mode, encoding=encoding) | ad4b3a02273e02339ef3a6d2c365fc3a087692ea | 2,032 |
def as_dicts(results):
"""Convert execution results to a list of tuples of dicts for better comparison."""
return [result.to_dict(dict_class=dict) for result in results] | f7d3a77c0ef82439137c2ed6c706afc64d597256 | 2,035 |
def concat_allocator_cmd(allocator, cmd):
"""add env variable for different allocator modes."""
new_cmd = cmd
if allocator == "direct":
new_cmd = "DIRECT_BUFFER=1 " + cmd
elif allocator == "unified":
new_cmd = "UNIFIED_BUFFER=1 " + cmd
elif allocator == "je_direct":
new_cmd = "JEMALLOC=1 DIRECT_BUFFER=1 " + cmd
elif allocator == "je_cycle":
new_cmd = "JEMALLOC=1 " + cmd
elif allocator == "je_unified":
new_cmd = "JEMALLOC=1 UNIFIED_BUFFER=1 " + cmd
return new_cmd | b0275705d9a148c4b197e10847a0846e1e96d822 | 2,036 |
def seconds_to_hours(s):
"""Convert seconds to hours:
:param s: Number of seconds
:type s: Float
:return: Number of hours
:rtype: Float
"""
return float(s) / 3600 | 9bf9a7b408bf49714c4e873f59ec5433cc4f1ecf | 2,037 |
def contigs_n_bases(contigs):
"""Returns the sum of all n_bases of contigs."""
return sum(c.n_bases for c in contigs) | 57bbc1712739bf8501ad95a5aa72adece6803bc3 | 2,038 |
def compare_bib_dict(item1, item2):
""" compare bibtex item1 and item 2 in dictionary form """
# unique id check
col_list = ["doi", "pmid", "pmcid", "title", "local-url"]
for c in col_list:
if (item1.get(c, "1") != '') and (item1.get(c, "1") == item2.get(c, "2")):
return 1.0
score = 0.0
def _get_score(item1, item2, colname, s):
if item1.get(colname, "1") == '': return 0.0
if item1.get(colname, "2") == '': return 0.0
if item1.get(colname, "1") == item2.get(colname, "2"): return s
return 0.0
score = score + _get_score(item1, item2, "year", 0.2)
score = score + _get_score(item1, item2, "author", 0.2)
score = score + _get_score(item1, item2, "author1", 0.1)
score = score + _get_score(item1, item2, "journal", 0.2)
score = score + _get_score(item1, item2, "volume", 0.1)
return score | 87d974adec31c5c5fb130d0b5fd8a2b750f67eff | 2,040 |
def _convert_min_sec_to_sec(val):
"""
:param val: val is a string in format 'XmYsZ' like '0m5s3' meaning at secong 5,3
:return:
>>> _convert_min_sec_to_sec('10m11s2')
611.2
"""
_min = val.split('m')[0]
_sec = val.split('m')[1].split('s')[0]
_dsec = val.split('s')[1]
if len(_dsec) == 1:
_dsec = _dsec + '0'
res = int(_min) * 60 + int(_sec) + float(_dsec)/100.
return res | f402e6221fa97ec5ccdb9b194478b916e85fdf85 | 2,042 |
from datetime import datetime
def datetime_to_fractional_year(input: datetime) -> float:
"""Converts a Python datetime object to a fractional year."""
start = date(input.year, 1, 1).toordinal() # type: ignore
year_length = date(input.year + 1, 1, 1).toordinal() - start # type: ignore
return input.year + (input.toordinal() - start) / year_length | 576361cad890f709d6d02c56f53c43529211fb2b | 2,043 |
def get_SHF_L_min_C():
""":return: 冷房負荷最小顕熱比率 (-)"""
return 0.4 | 274728ea22800ade77bfe4e41bc41a05b97ac483 | 2,044 |
from typing import Union
import pathlib
def get_path(obj: Union[str, pathlib.Path]) -> pathlib.Path:
"""Convert a str into a fully resolved & expanded Path object.
Args:
obj: obj to convert into expanded and resolved absolute Path obj
"""
return pathlib.Path(obj).expanduser().resolve() | 88641ea4a6ae54aea12b7d0c9afca8d6f475b8d0 | 2,045 |
import re
def verify_policy_type_id(policy_type_id):
"""
:type policy_type_id: str
:param policy_type_id: policy type id - e.g. storage-policy-00000001
:rtype: int
:return: Fixed policy type ID
:raises: ValueError: policy type id
"""
if not re.match("storage-policy-\d+", policy_type_id):
raise ValueError('{0} is not a valid policy type ID.'.format(policy_type_id))
return int(policy_type_id.split("-")[2]) | ff1bf183add0f2ce1dba78345a7b9fdbc2048e6c | 2,046 |
def absolute_error(observed, modeled):
"""Calculate the absolute error between two arrays.
:param observed: Array of observed data
:type observed: numpy.ndarray
:param modeled: Array of modeled data
:type modeled: numpy.ndarray
:rtype: numpy.ndarray
"""
error = observed - modeled
return error | ef5aa10fbe25689c1197c1ce7a54401be020de1e | 2,047 |
import json
import yaml
def load_dict(file_name):
"""
Reads JSON or YAML file into a dictionary
"""
if file_name.lower().endswith(".json"):
with open(file_name) as _f:
return json.load(_f)
with open(file_name) as _f:
return yaml.full_load(_f) | a098a8582e22fba2c9c2b72fbf3e3f769f740a98 | 2,049 |
import subprocess
def exec_command_stdout(*command_args, **kwargs):
"""
Capture and return the standard output of the command specified by the
passed positional arguments, optionally configured by the passed keyword
arguments.
Unlike the legacy `exec_command()` and `exec_command_all()` functions, this
modern function is explicitly designed for cross-platform portability. The
return value may be safely used for any purpose, including string
manipulation and parsing.
.. NOTE::
If this command's standard output contains _only_ pathnames, this
function does _not_ return the correct filesystem-encoded string expected
by PyInstaller. If this is the case, consider calling the
filesystem-specific `exec_command()` function instead.
Parameters
----------
cmdargs : list
Variadic list whose:
1. Mandatory first element is the absolute path, relative path,
or basename in the current `${PATH}` of the command to run.
1. Optional remaining elements are arguments to pass to this command.
encoding : str, optional
Optional name of the encoding with which to decode this command's
standard output (e.g., `utf8`), passed as a keyword argument. If
unpassed , this output will be decoded in a portable manner specific to
to the current platform, shell environment, and system settings with
Python's built-in `universal_newlines` functionality.
All remaining keyword arguments are passed as is to the
`subprocess.check_output()` function.
Returns
----------
unicode or str
Unicode string of this command's standard output decoded according to
the "encoding" keyword argument. This string's type depends on the
current Python version as follows:
* Under Python 2.7, this is a decoded `unicode` string.
* Under Python 3.x, this is a decoded `str` string.
"""
# Value of the passed "encoding" parameter, defaulting to None.
encoding = kwargs.pop('encoding', None)
# If no encoding was specified, the current locale is defaulted to. Else, an
# encoding was specified. To ensure this encoding is respected, the
# "universal_newlines" option is disabled if also passed. Nice, eh?
kwargs['universal_newlines'] = encoding is None
# Standard output captured from this command as a decoded Unicode string if
# "universal_newlines" is enabled or an encoded byte array otherwise.
stdout = subprocess.check_output(command_args, **kwargs)
# Return a Unicode string, decoded from this encoded byte array if needed.
return stdout if encoding is None else stdout.decode(encoding) | 32552fed9fd250548c0826a8b2679fa46bd8bf14 | 2,051 |
def units(legal_codes):
"""
Return sorted list of the unique units for the given
dictionaries representing legal_codes
"""
return sorted(set(lc["unit"] for lc in legal_codes)) | 85803ecb3d1f51c058c959b7e060c3cb5263f6a3 | 2,053 |
import re
def parse_discount(element):
"""Given an HTML element, parse and return the discount."""
try:
# Remove any non integer characters from the HTML element
discount = re.sub("\D", "", element)
except AttributeError:
discount = "0"
return discount | 658f8a6bef8ba4bf82646a10c495904c03a717c7 | 2,054 |
import re
def _cleanse_line(line, main_character):
"""
Cleanse the extracted lines to remove formatting.
"""
# Strip the line, just in case.
line = line.strip()
# Clean up formatting characters.
line = line.replace('\\' , '') # Remove escape characters.
line = line.replace('[mc]', main_character) # Standardize MC name.
line = re.sub(r'{/?i}' , '*', line) # Convert italics to Markdown.
line = re.sub(r'{cps=\d+}', '' , line) # Remove scroll speed formatting.
return line | 87177c557ab89b77c63cc1df10874e52606258a7 | 2,057 |
def optimal_path_fixture():
"""An optimal path, and associated distance, along the nodes of the pyramid"""
return [0, 1, 2, 3], 10 + 2 + 5 | 10c4e436907ecb99740a2514c927f05fd8488cf4 | 2,058 |
import os
def Environ(envstring):
"""Return the String associated with an operating system environment variable
envstring Optional. String expression containing the name of an environment variable.
number Optional. Numeric expression corresponding to the numeric order of the
environment string in the environment-string table. The number argument can be any
numeric expression, but is rounded to a whole number before it is evaluated.
Remarks
If envstring can't be found in the environment-string table, a zero-length string ("")
is returned. Otherwise, Environ returns the text assigned to the specified envstring;
that is, the text following the equal sign (=) in the environment-string table for that environment variable.
"""
try:
envint = int(envstring)
except ValueError:
return os.environ.get(envstring, "")
# Is an integer - need to get the envint'th value
try:
return "%s=%s" % (list(os.environ.keys())[envint], list(os.environ.values())[envint])
except IndexError:
return "" | 9972a427017dcae2917ea01d679d3fbc89ced0a7 | 2,059 |
import requests
def scrape_opening_hours():
""""scrape opening hours from https://www.designmuseumgent.be/bezoek"""
r = requests.get("https://www.designmuseumgent.be/bezoek")
data = r.text
return data | 297a35f3bc4e10d453da495e031fae5ce79ca643 | 2,060 |
import string
import random
def generate_random_string( length ):
"""Generate a random string of a given length containing uppercase and lowercase letters, digits and ASCII punctuation."""
source = string.ascii_lowercase + string.ascii_uppercase + string.digits + string.punctuation
return ''.join( random.choice( source ) for i in range( length ) ) | 9bb1ee7e21f27231e498f48bff505d963565f582 | 2,061 |
def find(x):
"""
Find the representative of a node
"""
if x.instance is None:
return x
else:
# collapse the path and return the root
x.instance = find(x.instance)
return x.instance | 5143e9d282fb1988d22273996dae36ed587bd9d2 | 2,062 |
import sys
import glob
import os
def get(dirPath):
"""指定したパスのファイル一覧を取得する"""
if sys.version_info.major != 3:
print("Error!!\nPython 3.x is required.")
exit()
if sys.version_info.minor >= 5:
# python 3.5以降
fileList = []
fileList = glob.glob(dirPath, recursive=True)
return fileList
else:
# python3.4以前
fileList = []
for root, dirs, files in os.walk(dirPath):
for filename in files:
fileList.append(os.path.join(root, filename)) # ファイルのみ再帰でいい場合はここまででOK
for dirname in dirs:
fileList.append(os.path.join(root, dirname)) # サブディレクトリまでリストに含めたい場合はこれも書く
print(fileList)
return fileList | a9b66504f1103f094930386a75afbcb8847dacbd | 2,063 |
def is_callable(x):
"""Tests if something is callable"""
return callable(x) | 72584deb62ac5e34e69325466236792c5299a51b | 2,064 |
def vocublary(vec_docs):
""" vocabulary(vec_docs) -> tuple: (int avg_doc_len, updated vec_docs, corpus Vocabulary dictionary {"word": num_docs_have__this_term, ...})
vec_docs = list of documents as dictionaries [{ID:"word_i word_i+1 ..."} , {ID:"word_i word_i+1"}, ...}]
"""
vocabulary = {}
count_vec = [] #used for aggregating doc lengths in a list to determining avg_doc_len
#Extract len of docs anonymously, convert vec_docs values to c(w,d), Create corups Vocabulary as c(d,w)
for key,value in vec_docs.items(): #recall: {key = "doc_ID": value = [list, of, words, in, each, document]}
doc_words = {}
count_vec.append(len(value))
for word in value:
#convert doc word list into dict storing c(w,d) ∈ D
if word in doc_words:
doc_words[word] = doc_words[word] + 1
else:
doc_words[word] = 1
#Next, create vocubulary c(d,w) ∈ Corpus
for word,count in doc_words.items():
if word in vocabulary:
vocabulary[word] = vocabulary[word] + 1
else:
vocabulary[word] = 1
#last convert {ID:[list,of,words]} -> {ID: {dict:1,of:1,word:1,counts:2} }
vec_docs[key] = doc_words
avg_dl = sum(count_vec) / len(count_vec)
return (avg_dl,vocabulary) | 4e6f4df1e36c2fdf3d7d1d20750d74f91a0214b6 | 2,065 |
import argparse
def args():
"""
--all (some subset that is useful for someone)
--packages (maybe positional?)
"""
parser = argparse.ArgumentParser("serviced-tests")
parser.add_argument("-v", "--verbose", action="store_true", help="verbose logging")
types = parser.add_argument_group("Test Type")
types.add_argument("--unit", action="store_true", help="pass the 'unit' build tag")
types.add_argument("--integration", action="store_true", help="pass the 'integration' build tag")
options = parser.add_argument_group("Test Options")
options.add_argument("--quick", action="store_true", help="don't run tests with the '!quick' build constraint")
options.add_argument("--root", action="store_true", help="run the tests as the root user")
options.add_argument("--race", action="store_true", help="run tests with race detection")
options.add_argument("--cover", action="store_true", help="run tests with coverage")
options.add_argument("--tag", action="append", help="optional extra build tag (may be specified multiple times)")
options.add_argument("--include_vendor", action="store_true", dest="include_vendor", help="run tests against the vendor directory")
coverage = parser.add_argument_group("Coverage Options")
coverage.add_argument("--cover-html", required=False, help="output file for HTML coverage report")
coverage.add_argument("--cover-xml", required=False, help="output file for Cobertura coverage report")
fixtures = parser.add_argument_group("Fixture Options")
fixtures.add_argument("--elastic", action="store_true", help="start an elastic server before the test run")
fixtures.add_argument("--elastic-port", type=int, help="elastic server port", default=9202)
parser.add_argument("--packages", nargs="*", help="serviced packages to test, relative to the serviced root (defaults to ./...)")
parser.add_argument("arguments", nargs=argparse.REMAINDER, help="optional arguments to be passed through to the test runner")
return parser.parse_args() | b2a6b83b1ee02fc5ae2ba3130757ca50d9d954fe | 2,066 |
import torch
def pad_col(input, val=0, where='end'):
"""Addes a column of `val` at the start of end of `input`."""
if len(input.shape) != 2:
raise ValueError(f"Only works for `phi` tensor that is 2-D.")
pad = torch.zeros_like(input[:, :1])
if val != 0:
pad = pad + val
if where == 'end':
return torch.cat([input, pad], dim=1)
elif where == 'start':
return torch.cat([pad, input], dim=1)
raise ValueError(f"Need `where` to be 'start' or 'end', got {where}") | 77caa028bb76da922ba12492f077811d2344c2a9 | 2,068 |
def _DX(X):
"""Computes the X finite derivarite along y and x.
Arguments
---------
X: (m, n, l) numpy array
The data to derivate.
Returns
-------
tuple
Tuple of length 2 (Dy(X), Dx(X)).
Note
----
DX[0] which is derivate along y has shape (m-1, n, l).
DX[1] which is derivate along x has shape (m, n-1, l).
"""
return (X[1:, :, :] - X[:-1, :, :], # D along y
X[:, 1:, :] - X[:, 0:-1, :]) # D along x | 4aff05c2c25089c9f93b762a18dad42b0142db09 | 2,069 |
def get_conditions():
"""
List of conditions
"""
return [
'blinded',
'charmed',
'deafened',
'fatigued',
'frightened',
'grappled',
'incapacitated',
'invisible',
'paralyzed',
'petrified',
'poisoned',
'prone',
'restrained',
'stunned',
'unconscious',
'exhaustion'
] | 816ccb50581cafa20bdefed2a075a3370704cef4 | 2,070 |
def flat_list(*alist):
"""
Flat a tuple, list, single value or list of list to flat list
e.g.
>>> flat_list(1,2,3)
[1, 2, 3]
>>> flat_list(1)
[1]
>>> flat_list([1,2,3])
[1, 2, 3]
>>> flat_list([None])
[]
"""
a = []
for x in alist:
if x is None:
continue
if isinstance(x, (tuple, list)):
a.extend([i for i in x if i is not None])
else:
a.append(x)
return a | 5a68495e507e9a08a9f6520b83a912cf579c6688 | 2,071 |
def _title_case(value):
"""
Return the title of the string but the
first letter is affected.
"""
return value[0].upper() + value[1:] | 037bce973580f69d87c2e3b4e016b626a2b76abb | 2,072 |
from typing import List
def is_permutation_matrix(matrix: List[List[bool]]) -> bool:
"""Returns whether the given boolean matrix is a permutation matrix."""
return (all(sum(v) == 1 for v in matrix) and
sum(any(v) for v in matrix) == len(matrix)) | b53d6f4ba6e8e1ba445783350de831b614aa187e | 2,074 |
def show_counts(input_dict):
"""Format dictionary count information into a string
Args:
input_dict (dictionary): input keys and their counts
Return:
string: formatted output string
"""
out_s = ''
in_dict_sorted = {k: v for k, v in sorted(input_dict.items(), key=lambda item: item[1], reverse=True)}
for idx, (k, v) in enumerate(in_dict_sorted.items()):
out_s += '\t{}:\t{} ({})\n'.format(idx, k, v)
out_s += '\n'
return out_s | 078d1f7599b22741f474c0e6d1b02f44edfc1f9b | 2,075 |
def check_if_prime(number):
"""checks if number is prime
Args:
number (int):
Raises:
TypeError: if number of type float
Returns:
[bool]: if number prime returns ,True else returns False
"""
if type(number) == float:
raise TypeError("TypeError: entered float type")
if number > 1 :
for i in range( 2, int(number / 2) + 1 ):
if number % i == 0:
return False
return True
else:
return False | 0a15a4f133b12898b32b1f52a317939cf5e30d34 | 2,076 |
import os
import json
def _load_flags():
"""Load flag definitions.
It will first attempt to load the file at TINYFLAGS environment variable.
If that does not exist, it will then load the default flags file bundled
with this library.
:returns list: Flag definitions to use.
"""
path = os.getenv('TINYFLAGS')
if path and os.path.exists(path) and not os.path.isdir(path):
try:
with open(path, 'r') as f:
return json.load(f)
except:
pass
return []
# with open(resource_filename('tinyenv', 'config/flags.json'), 'r') as f:
# return json.load(f) | ebf3e78296c2fd8e4590f87f87bd27b9252539f8 | 2,077 |
from typing import List
def already_exists(statement: str, lines: List[str]) -> bool:
"""
Check if statement is in lines
"""
return any(statement in line.strip() for line in lines) | 194d8c6c48609f5a2accacdb2ed0857815d48d1d | 2,078 |
import random
def uniform(lower_list, upper_list, dimensions):
"""Fill array """
if hasattr(lower_list, '__iter__'):
return [random.uniform(lower, upper)
for lower, upper in zip(lower_list, upper_list)]
else:
return [random.uniform(lower_list, upper_list)
for _ in range(dimensions)] | 59bcb124f0d71fd6e5890cd1d6c200319ab5910e | 2,079 |
def reshape(box, new_size):
"""
box: (N, 4) in y1x1y2x2 format
new_size: (N, 2) stack of (h, w)
"""
box[:, :2] = new_size * box[:, :2]
box[:, 2:] = new_size * box[:, 2:]
return box | 56fbeac7c785bd81c7964d7585686e11864ff034 | 2,080 |
import calendar
def number_of_days(year: int, month: int) -> int:
"""
Gets the number of days in a given year and month
:param year:
:type year:
:param month:
:type month:
:return:
:rtype:
"""
assert isinstance(year, int) and 0 <= year
assert isinstance(month, int) and 0 < month <= 12
c = calendar.Calendar()
days = c.itermonthdays(year, month)
days = set(days)
days.remove(0)
return len(days) | d585f037292eef36ecc753fbf702035577513a15 | 2,081 |
import six
import sys
def safe_decode(text, incoming=None, errors='strict'):
"""Decodes incoming str using `incoming` if they're not already unicode.
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded
representation of it.
:raises TypeError: If text is not an isntance of str
"""
if not isinstance(text, six.string_types):
raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, six.text_type):
return text
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
try:
return text.decode(incoming, errors)
except UnicodeDecodeError:
# Note(flaper87) If we get here, it means that
# sys.stdin.encoding / sys.getdefaultencoding
# didn't return a suitable encoding to decode
# text. This happens mostly when global LANG
# var is not set correctly and there's no
# default encoding. In this case, most likely
# python will use ASCII or ANSI encoders as
# default encodings but they won't be capable
# of decoding non-ASCII characters.
#
# Also, UTF-8 is being used since it's an ASCII
# extension.
return text.decode('utf-8', errors) | 8bd5a4ef516f925f7967ab50dffff0d7273f547c | 2,082 |
import re
def _egg_link_name(raw_name: str) -> str:
"""
Convert a Name metadata value to a .egg-link name, by applying
the same substitution as pkg_resources's safe_name function.
Note: we cannot use canonicalize_name because it has a different logic.
"""
return re.sub("[^A-Za-z0-9.]+", "-", raw_name) + ".egg-link" | 923ff815b600b95ccb5750a8c1772ee9156e53b2 | 2,083 |
import collections
def get_top_words(words):
"""
Получить список наиболее часто встречающихся слов, с указанием частоты
:param words: список слов для анализа
:return: [(слово1, количество повторений слова1), ..]
"""
return collections.Counter(words).most_common() | 632317f57e734a93b6f3f20dfef001028b40c6b3 | 2,085 |
def format_map(mapping, st):
"""
Format string st with given map.
"""
return st.format_map(mapping) | 462e0a744177d125db50739eac1f2e7a62128010 | 2,086 |
def is_right(side1, side2, side3):
"""
Takes three side lengths and returns true if triangle is right
:param side1: int or float
:param side2: int or float
:param side3: int or float
:return: bool
"""
return False | 2d22bbc7d0d363b360f578002a6380a4ae5f5b63 | 2,087 |
import re
def split_words_and_quoted_text(text):
"""Split string text by space unless it is
wrapped inside double quotes, returning a list
of the elements.
For example
if text =
'Should give "3 elements only"'
the resulting list would be:
['Should', 'give', '3 elements only']
"""
# using shlex
# return shlex.split(text)
# using re
result = list()
pattern = re.findall(r'\w+\s*|\".+?\"', text)
for char in pattern:
result.append(char.strip().replace('"', ''))
return result | befb31949d4c52fac96765fd78bc1b9d644282ba | 2,088 |
def scheduler(epoch):
"""Generating learning rate value for a given epoch.
inputs:
epoch = number of current epoch
outputs:
learning_rate = float learning rate value
"""
if epoch < 100:
return 1e-3
elif epoch < 125:
return 1e-4
else:
return 1e-5 | 916cbc12ff76b8d022a96c89083b8bd2a3078c69 | 2,089 |
import os
def has_supervisor() -> bool:
"""Return true if supervisor is available."""
return "SUPERVISOR" in os.environ | 5af98347acfdcc50c1b4ca80e01597c584e3a45a | 2,090 |
def joinpath(base, end):
"""Like Path.joinpath(), but ensures the result is inside `base`.
Should be used for user-supplied `end`.
"""
result = (base / end).resolve()
if base not in result.parents:
print(base, end, result)
raise ValueError(end)
return result | 1b4f5afcdca21ceb6e676385602dd07b252db3ad | 2,091 |
def posts_completed(scraped_posts, limit):
"""Returns true if the amount of posts scraped from
profile has reached its limit.
"""
if len(scraped_posts) == limit:
return True
else:
return False | ff72474349a32f326b63b95070927c4b379be800 | 2,092 |
def get_zero_columns(matrix):
""" Returns a list of the columns which are all 0 """
rows = matrix.shape[0]
columns = matrix.shape[1]
result = []
for j in range(columns):
is_zero_column = True
for i in range(rows):
is_zero_column = is_zero_column and matrix[i, j] == 0.0
result.append(is_zero_column)
return result | 35694592f4155f710e5ed3c2148a138591cd683f | 2,093 |
def traditional_constants_icr_equation_empty_fixed(fixed_params, X_col):
""" Traditional ICR equation with constants from ACE consensus """
a = 450
tdd = X_col[0]
return a / tdd | 2931e4b3592a94690d98b0cb4cb90f712ff4a449 | 2,094 |
def sort_completions_key(completion):
"""
sort completions according to their type
Args:
completion (jedi.api.classes.Completion): completion
Returns:
int: sorting order
"""
if completion.type == "function":
return 2
elif completion.type == "instance":
return 1
else:
return 3 | 7bf767d908c83c11dafa5e0fd694bbb31a98c404 | 2,095 |
def _is_git_url_mismatch(mismatch_item):
"""Returns whether the given mismatch item is for a GitHub URL."""
_, (required, _) = mismatch_item
return required.startswith('git') | b1c3cec3d8cf3c7d3ffa5c405522b1a08754223b | 2,096 |
import random
def create_solution_board(width=6, height=6):
"""Randomly generates a new board
with width by height size
"""
if type(width) != int or type(height) != int:
raise TypeError('Arguments must be int type')
boxes = width * height
if boxes % 2 != 0:
raise ValueError('Number of boxes is not multiple of two')
numbers = list(range(1, boxes // 2 + 1))
numbers = numbers + numbers
random.shuffle(numbers)
board = []
for index in range(height):
board.append([])
for _ in range(width):
random_number = numbers.pop()
board[index].append(random_number)
board[index] = board[index]
return board | 0b6e30d726cec61581d93c909761f80d739eb917 | 2,098 |
import math
def mylog10(x):
"""Return the base-10 logarithm of x."""
return math.log10(x) | d32113c16047175125e1b79c9ce0ea8822e4853c | 2,100 |
import json
def handler(event, context):
""" Lambda Handler.
Returns Hello World and the event and context objects
"""
print(event)
print(context)
return {
"body": json.dumps('Hello World!')
} | 561326fec784aa72a133b217f1e2cecaf12ec1ad | 2,104 |
def clf2D_slope_intercept(coef=None, intercept=None, clf=None):
"""
Gets the slop an intercept for the separating hyperplane of a linear
classifier fit on a two dimensional dataset.
Parameters
----------
coef:
The classification normal vector.
intercept:
The classifier intercept.
clf: subclass of sklearn.linear_model.base.LinearClassifierMixin
A sklearn classifier with attributes coef_ and intercept_
Output
------
slope, intercept
"""
if clf is not None:
coef = clf.coef_.reshape(-1)
intercept = float(clf.intercept_)
else:
assert coef is not None and intercept is not None
slope = - coef[0] / coef[1]
intercept = - intercept / coef[1]
return slope, intercept | 9376c34a3836ee028c4b0497e1088ddd50bb1fc6 | 2,107 |
def texsafe(value):
""" Returns a string with LaTeX special characters stripped/escaped out """
special = [
[ "\\xc5", 'A'], #'\\AA'
[ "\\xf6", 'o'],
[ "&", 'and'], #'\\"{o}'
]
for char in ['\\', '^', '~', '%', "'", '"']: # these mess up things
value = value.replace(char, '')
for char in ['#','$','_', '{', '}', '<', '>']: # these can be escaped properly
value = value.replace(char, '\\' + char)
for char, new_char in special:
value = eval(repr(value).replace(char, new_char))
return value | b40b60a34629f75dfdac298bd2937af52ef797b1 | 2,110 |
def _logfile_readme() -> str:
"""Returns a string containing a 'how to read this logfile' message.
Returns
-------
str
Returns a formatted paragraph-long message with tips on reading log file output.
"""
line1 = "Messages are displayed below in the format"
line2 = " <DATE> <TIME> <LOGGER NAME> @ <FILE>:<LINE> - <LEVEL> - <FUNCTION>:<MESSAGE>"
line3 = "where <DATE> is the date in 'YYYY-MM-DD' format, <TIME> is the time in 'HH:MM:SS,milliseconds' format, <LOGGER NAME> is the name of the logger that generated the message (which should be the __name__ of the file where the logger was initialized), <FILE> and <LINE> is the file name and line number where the message was generated, <LEVEL> is the priority level that the message was generated at, <FUNCTION> is the name of the function that the message was generated inside, and <MESSAGE> is the actual message that was generated. "
message = f"{line1}\n\n{line2}\n\n{line3}\n\n"
return message | 5e418b20df1ebb486d0b1c3ecf38d6c72ae8a5a7 | 2,111 |
def filter_bam_file(bamfile, chromosome, outfile):
"""
filter_bam_file uses samtools to read a <bamfile> and read only
the reads that are mapped to <chromosome>.
It saves the filtered reads into <outfile>.
"""
inputs = [bamfile]
outputs = [outfile]
options = {
'cores': 1,
'memory': '4g',
'account': 'NChain',
'walltime': '01:00:00'
}
directory = "/".join(outfile.split("/")[:-1])
spec = '''
source /com/extra/samtools/1.6.0/load.sh
mkdir -p {dirc}
samtools view -b {infile} {chrom} > {out}
'''.format(infile=bamfile, chrom=chromosome, out=outfile, dirc=directory)
return inputs, outputs, options, spec | 317e1283d4722483e4bc98080ef99abd9876d045 | 2,112 |
def iou(a, b):
""" Calculates intersection over union (IOU) over two tuples """
(a_x1, a_y1), (a_x2, a_y2) = a
(b_x1, b_y1), (b_x2, b_y2) = b
a_area = (a_x2 - a_x1) * (a_y2 - a_y1)
b_area = (b_x2 - b_x1) * (b_y2 - b_y1)
dx = min(a_x2, b_x2) - max(a_x1, b_x1)
dy = min(a_y2, b_y2) - max(a_y1, b_y1)
if (dx>=0) and (dy>=0):
overlap = dx * dy
iou = overlap / (a_area + b_area - overlap)
return iou
return 0 | 0e72d00a672c430cce69246cb7d7889ae41ae216 | 2,113 |
import json
def get_dict(str_of_dict: str, order_key='', sort_dict=False) -> list:
"""Function returns the list of dicts:
:param str_of_dict: string got form DB
(e.g. {"genre_id": 10, "genre_name": "name1"}, {"genre_id": 11, "genre_name": "name12"},...),
:param order_key: the key by which dictionaries will be sorted (required if flag 'sort_dict=True'),
:param sort_dict: flag for sorting the dictionary (boolean).
:return: list of dicts (e.g. [{"genre_id": 10, "genre_name": "name1"}, {"genre_id": 11, "genre_name": "name12"},...])"""
result_dict = list()
if str_of_dict:
result_dict = json.loads('[' + str_of_dict + ']')
if sort_dict and order_key:
try:
result_dict = sorted(result_dict, key=lambda i: i[order_key])
return result_dict
except KeyError:
return result_dict
return result_dict
else:
return result_dict | 81d20db2dbe929693994b5b94aa971850ef9c838 | 2,114 |
import hashlib
import struct
def get_richpe_hash(pe):
"""Computes the RichPE hash given a file path or data.
If the RichPE hash is unable to be computed, returns None.
Otherwise, returns the computed RichPE hash.
If both file_path and data are provided, file_path is used by default.
Source : https://github.com/RichHeaderResearch/RichPE
"""
if pe.RICH_HEADER is None:
return None
# Get list of @Comp.IDs and counts from Rich header
# Elements in rich_fields at even indices are @Comp.IDs
# Elements in rich_fields at odd indices are counts
rich_fields = pe.RICH_HEADER.values
if len(rich_fields) % 2 != 0:
return None
# The RichPE hash of a file is computed by computing the md5 of specific
# metadata within the Rich header and the PE header
md5 = hashlib.md5()
# Update hash using @Comp.IDs and masked counts from Rich header
while len(rich_fields):
compid = rich_fields.pop(0)
count = rich_fields.pop(0)
mask = 2 ** (count.bit_length() // 2 + 1) - 1
count |= mask
md5.update(struct.pack("<L", compid))
md5.update(struct.pack("<L", count))
# Update hash using metadata from the PE header
md5.update(struct.pack("<L", pe.FILE_HEADER.Machine))
md5.update(struct.pack("<L", pe.FILE_HEADER.Characteristics))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.Subsystem))
md5.update(struct.pack("<B", pe.OPTIONAL_HEADER.MajorLinkerVersion))
md5.update(struct.pack("<B", pe.OPTIONAL_HEADER.MinorLinkerVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MajorOperatingSystemVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MinorOperatingSystemVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MajorImageVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MinorImageVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MajorSubsystemVersion))
md5.update(struct.pack("<L", pe.OPTIONAL_HEADER.MinorSubsystemVersion))
return md5.hexdigest() | 30e5437f36f76a6225eaba579d55218440ab46b9 | 2,115 |
def get_input(label, default=None):
"""Prompt the user for input.
:param label: The label of the prompt.
:param label: str
:param default: The default value.
:rtype: str | None
"""
if default:
_label = "%s [%s]: " % (label, default)
else:
_label = "%s: " % label
print("")
value = input(_label)
if not value:
return default
return value | 11de813f0fcfd16f1198299030656c07392f95c9 | 2,116 |
import os
import json
def getVariables():
"""
Retrieves the variables.json file.
"""
if os.path.exists('variables.json'):
with open('variables.json') as jsonFile:
variables = json.loads(jsonFile.read())
return variables
else:
variables = {}
variables['path'] = ''
return variables | ba0c37c14e92caa9bb83bb078d864541cbeec4ac | 2,117 |
def obj_assert_check(cls):
"""
The body of the assert check for an accessor
We allow all versions of add/delete/modify to use the same accessors
"""
if cls in ["of_flow_modify", "of_flow_modify_strict",
"of_flow_delete", "of_flow_delete_strict",
"of_flow_add"]:
return "IS_FLOW_MOD_SUBTYPE(obj->object_id)"
else:
return "obj->object_id == %s" % cls.upper() | 4ebddebdd87c0bdb28e7687ec2b0da623507f89e | 2,119 |
def GetInverseMatrix(matrix):
"""
:param matrix: the matrix which will get its inverse matrix
:return: the inverse matrix(two dimensions only)
"""
matrix[0, 0], matrix[1, 1] = -matrix[1, 1], -matrix[0, 0]
matrix = matrix / -(matrix[0, 0] * matrix[1, 1] - matrix[0, 1] * matrix[1, 0])
return matrix | c4fdba364cc6b73a3b72a40f980a0fa402a1968f | 2,120 |
def x_dot(y):
"""x_dot(y)
Describes the differential equation for position as given in CW 12.
"""
return y | 7fa01584b09c6e83e28ddf63b300323fdcb7fa0b | 2,122 |
def updateDF(df, fields, id_patient):
"""
fields is a dictionary of column names and values.
The function updates the row of id_patient with the values in fields.
"""
for key in fields:
df.loc[df["id_patient"] == id_patient, key] = fields[key][0]
return df | 5ced64eca8d8736836f82dacd1750cb8ac612989 | 2,124 |
def gcd(num1: int, num2: int) -> int:
"""Computes the greatest common divisor of integers a and b using
Euclid's Algorithm.
"""
while num2 != 0:
num1, num2 = num2, num1 % num2
return num1 | c53ff5be770570278f497d7ce2a2146a3ac3d9da | 2,125 |
import json
def examine(path):
""" Look for forbidden tasks in a job-output.json file path """
data = json.load(open(path))
to_fix = False
for playbook in data:
if playbook['trusted']:
continue
for play in playbook['plays']:
for task in play['tasks']:
for hostname, host in task['hosts'].items():
if hostname != 'localhost':
continue
if host['action'] in ['command', 'shell']:
print("Found disallowed task:")
print(" Playbook: %s" % playbook['playbook'])
print(" Role: %s" % task.get('role', {}).get('name'))
print(" Task: %s" % task.get('task', {}).get('name'))
to_fix = True
return to_fix | e441fc58bbfc4547bbdff451d6d06ba952e5a1ba | 2,127 |
import subprocess
import sys
def determine_disjuct_modules_alternative(src_rep):
"""
Potentially get rid of determine_added_modules and get_modules_lst()
"""
findimports_output = subprocess.check_output(['findimports', src_rep])
findimports_output = findimports_output.decode('utf-8').splitlines()
custom_modules_lst = []
for i, elem in enumerate(findimports_output):
if ':' in elem:
continue
elem = elem.rstrip('\n').split('.',1)[0].strip()
#print(f" element : {elem}")
custom_modules_lst.append(elem)
custom_modules_lst = set(custom_modules_lst)
#beautify this
disjunct_modules = []
for i, elem in enumerate(custom_modules_lst):
if elem in sys.modules:
continue
else:
disjunct_modules.append(elem)
return disjunct_modules | be3e0f1e4edf84bdeb8ea5b2a0117d9853581884 | 2,128 |
def solid_polygon_info_(base_sides, printed=False):
"""Get information about a solid polygon from its side count."""
# Example: A rectangular solid (Each base has four sides) is made up of
# 12 edges, 8 vertices, 6 faces, and 12 triangles.
edges = base_sides * 3
vertices = base_sides * 2
faces = base_sides + 2
triangles = (base_sides - 2) * 2 + vertices
if printed:
print(f"Edges: {edges}\nVertices: {vertices}\nFaces: {faces}\nTriangles: {triangles}")
else:
return {"edges": edges,
"vertices": vertices,
"faces": faces,
"triangles": triangles} | a16bae9b82fd7a89332d5403359c2aa1eddf6cb4 | 2,129 |
import gettext
def _(txt):
""" Custom gettext translation function that uses the CurlyTx domain """
t = gettext.dgettext("CurlyTx", txt)
if t == txt:
#print "[CurlyTx] fallback to default translation for", txt
t = gettext.gettext(txt)
return t | 839c36184eabde641a40d7b7ad55d4695574dafb | 2,131 |
def additional_bases():
""""Manually added bases that cannot be retrieved from the REST API"""
return [
{
"facility_name": "Koltyr Northern Warpgate",
"facility_id": 400014,
"facility_type_id": 7,
"facility_type": "Warpgate"
},
{
"facility_name": "Koltyr Eastern Warpgate",
"facility_id": 400015,
"facility_type_id": 7,
"facility_type": "Warpgate"
},
{
"facility_name": "Koltyr Southern Warpgate",
"facility_id": 400016,
"facility_type_id": 7,
"facility_type": "Warpgate"
},
{
"facility_name": "Zorja",
"facility_id": 400017,
"facility_type_id": 2,
"facility_type": "Amp Station"
},
{
"facility_name": "Xander",
"facility_id": 400018,
"facility_type_id": 3,
"facility_type": "Bio Lab"
},
{
"facility_name": "Svarog",
"facility_id": 400019,
"facility_type_id": 4,
"facility_type": "Tech Plant"
},
{
"facility_name": "Koltyr Tech Plant Outpost",
"facility_id": 400020,
"facility_type_id": 5,
"facility_type": "Large Outpost"
},
{
"facility_name": "Koltyr Biolab Outpost",
"facility_id": 400021,
"facility_type_id": 5,
"facility_type": "Large Outpost"
},
{
"facility_name": "Koltyr Amp Station Outpost",
"facility_id": 400022,
"facility_type_id": 5,
"facility_type": "Large Outpost"
}
] | e2a5ad97ca1b424466f5ebe340466eaf9f627e7e | 2,132 |
def get_all_label_values(dataset_info):
"""Retrieves possible values for modeled labels from a `Seq2LabelDatasetInfo`.
Args:
dataset_info: a `Seq2LabelDatasetInfo` message.
Returns:
A dictionary mapping each label name to a tuple of its permissible values.
"""
return {
label_info.name: tuple(label_info.values)
for label_info in dataset_info.labels
} | 929db286b3f7ee8917618e9f46feabdff630d3b2 | 2,133 |
def LineColourArray():
"""Line colour options array"""
Colour = [
'Black',
'dimgrey',
'darkgrey',
'silver',
'lightgrey',
'maroon',
'darkred',
'firebrick',
'red',
'orangered',
'darkorange',
'orange',
'saddlebrown',
'darkgoldenrod',
'goldenrod',
'gold',
'darkolivegreen',
'olivedrab',
'olive',
'y',
'darkkhaki',
'khaki',
'darkgreen',
'Green',
'limegreen',
'lime',
'mediumspringgreen',
'palegreen',
'greenyellow',
'midnightblue',
'navy',
'darkblue',
'mediumblue',
'blue',
'slateblue',
'indigo',
'purple',
'darkmagenta',
'darkorchid',
'mediumorchid',
'orchid',
'plum',
'crimson',
'deeppink',
'magenta',
'hotpink',
'pink' ]
return Colour | 94f91d17c6e539983ab38ca7fdadd211e6268bfb | 2,136 |
def removePrefixes(word, prefixes):
"""
Attempts to remove the given prefixes from the given word.
Args:
word (string): Word to remove prefixes from.
prefixes (collections.Iterable or string): Prefixes to remove from given word.
Returns:
(string): Word with prefixes removed.
"""
if isinstance(prefixes, str):
return word.split(prefixes)[-1]
for prefix in prefixes:
word = word.split(prefix)[-1]
return word | 6932e5605b11eee004a350c7f9be831d8bb7ca9d | 2,137 |
def isSol(res):
"""
Check if the string is of the type ai bj ck
"""
if not res or res[0] != 'a' or res[-1] != 'c':
return False
l = 0
r = len(res)-1
while res[l] == "a":
l+=1
while res[r] == "c":
r-=1
if r-l+1 <= 0:
return False
for x in res[l:r-l+1]:
if x != 'b':
return False
return True | 14030e52a588dc13029602e81a5f2068707bca17 | 2,138 |
def accuracy(output, target, top_k=(1,)):
"""Calculate classification accuracy between output and target.
:param output: output of classification network
:type output: pytorch tensor
:param target: ground truth from dataset
:type target: pytorch tensor
:param top_k: top k of metric, k is an interger
:type top_k: tuple of interger
:return: results of top k
:rtype: list
"""
max_k = max(top_k)
batch_size = target.size(0)
_, pred = output.topk(max_k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in top_k:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res | 68b7c48e5bd832a637e7a06353c48ffa09b449cd | 2,140 |
def sum_digits(number):
"""
Write a function named sum_digits which takes a number as input and
returns the sum of the absolute value of each of the number's decimal digits.
"""
return sum(int(n) for n in str(number) if n.isdigit()) | b6d8083a78d67a268316716174723f47d84b2287 | 2,141 |
from typing import Callable
from typing import Dict
from typing import Any
import functools
def glacier_wrap(
f: Callable[..., None],
enum_map: Dict[str, Dict[str, Any]],
) -> Callable[..., None]:
"""
Return the new function which is click-compatible
(has no enum signature arguments) from the arbitrary glacier compatible
function
"""
# Implemented the argument convert logic
@functools.wraps(f)
def wrapped(*args: Any, **kwargs: Any) -> None:
# convert args and kwargs
converted_kwargs = {}
for name, value in kwargs.items():
if name in enum_map:
converted_kwargs[name] = enum_map[name][value]
else:
converted_kwargs[name] = value
return f(*args, **converted_kwargs)
return wrapped | 01f3a90179bb0dba29ffb0b2fa9d91be15e0ee7e | 2,142 |
import torch
def huber_loss(x, delta=1.):
""" Standard Huber loss of parameter delta
https://en.wikipedia.org/wiki/Huber_loss
returns 0.5 * x^2 if |a| <= \delta
\delta * (|a| - 0.5 * \delta) o.w.
"""
if torch.abs(x) <= delta:
return 0.5 * (x ** 2)
else:
return delta * (torch.abs(x) - 0.5 * delta) | b3493eb9d4e38fa36f92db80dc52a47c32caf3c9 | 2,143 |
def expected_win(theirs, mine):
"""Compute the expected win rate of my strategy given theirs"""
assert abs(theirs.r + theirs.p + theirs.s - 1) < 0.001
assert abs(mine.r + mine.p + mine.s - 1) < 0.001
wins = theirs.r * mine.p + theirs.p * mine.s + theirs.s * mine.r
losses = theirs.r * mine.s + theirs.p * mine.r + theirs.s * mine.p
return wins - losses | 92de2010287e0c027cb18c3dd01d95353e4653c4 | 2,144 |
import numpy
def parse_megam_weights(s, features_count, explicit=True):
"""
Given the stdout output generated by ``megam`` when training a
model, return a ``numpy`` array containing the corresponding weight
vector. This function does not currently handle bias features.
"""
if numpy is None:
raise ValueError("This function requires that numpy be installed")
assert explicit, "non-explicit not supported yet"
lines = s.strip().split("\n")
weights = numpy.zeros(features_count, "d")
for line in lines:
if line.strip():
fid, weight = line.split()
weights[int(fid)] = float(weight)
return weights | db172935fe7af892b420d515391565ccc2b44c55 | 2,145 |
def create_template_error():
"""
Создает заготовку для генерации ошибок
"""
return {'response': False} | f15c27cc980cf1bda6b82353d01bbe7871fdbff1 | 2,146 |
def load_input(fname):
"""Read in the data, return as a list."""
data = [""]
with open(fname, "r") as f:
for line in f.readlines():
if line.strip("\n"):
data[-1] += line.strip("\n") + " "
else:
data[-1] = data[-1].strip(" ")
data.append("")
data [-1] = data[-1].strip(" ")
return data | f83021dd416e3a959996a16bb8d0a0e7352a471f | 2,147 |
def SizeArray(input_matrix):
"""
Return the size of an array
"""
nrows=input_matrix.shape[0]
ncolumns=input_matrix.shape[1]
return nrows,ncolumns | 3ac45e126c1fea5a70d9d7b35e967896c5d3be0b | 2,148 |
import logging
def handle_exceptions(func):
"""Exception handler helper function."""
logging.basicConfig(level = logging.INFO)
def wrapper_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
logging.error(f'{func.__name__} raised an error: {e}')#, exc_info = True)
return None
return wrapper_func | 2d5c428e65cfb823d1afbf2d2c77f98b8722d685 | 2,149 |
import math
def keyPosition_to_keyIndex(key_position: int, key: int) -> int:
"""
キーポジションからどのキーのノーツなのかを変換します
引数
----
key_position : int
-> キーポジション
key : int
-> 全体のキー数、4Kなら4と入力
戻り値
------
int
-> キーインデックス、指定したキーの0~キー-1の間の数
"""
return math.floor(key_position * key / 512) | e6edcc1711a283336da046e1f8f174cc7ff87760 | 2,150 |
def return_galo_tarsilo(message):
"""Middle function for returning "gaucho" vídeo.
Parameters
----------
message : telebot.types.Message
The message object.
Returns
-------
msg : str
User/Chat alert list addition/removal.
"""
return 'https://www.youtube.com/watch?v=MVYEwZFixJ8' | 58307b763d139dc38220b9a93af15644ccd32959 | 2,151 |
def preimage_func(f, x):
"""Pre-image a funcation at a set of input points.
Parameters
----------
f : typing.Callable
The function we would like to pre-image. The output type must be hashable.
x : typing.Iterable
Input points we would like to evaluate `f`. `x` must be of a type acceptable by `f`.
Returns
-------
D : dict(object, list(object))
This dictionary maps the output of `f` to the list of `x` values that produce it.
"""
D = {}
for xx in x:
D.setdefault(f(xx), []).append(xx)
return D | 6ca0496aff52cff1ce07e327f845df4735e3266a | 2,152 |
from typing import Dict
def load_extract(context, extract: Dict) -> str:
"""
Upload extract to Google Cloud Storage.
Return GCS file path of uploaded file.
"""
return context.resources.data_lake.upload_df(
folder_name="nwea_map",
file_name=extract["filename"],
df=extract["value"]
) | c9d5fedf6f2adcb871abf4d9cead057b0627267a | 2,153 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.