content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def minimal_product_data(setup_data):
"""Valid product data (only required fields)"""
return {
'name': 'Bar',
'rating': .5,
'brand_id': 1,
'categories_ids': [1],
'items_in_stock': 111,
} | ecf027704ea8533d71468527335201a021d8ae4f | 5,036 |
def flatten_column_array(df, columns, separator="|"):
"""Fonction qui transforme une colonne de strings séparés par un
séparateur en une liste : String column -> List column"""
df[columns] = (
df[columns].applymap(lambda x: separator.join(
[str(json_nested["name"]) for json_nested in x]))
)
return df | 770b519a5b086d872e4bd16bc92663f693453745 | 5,037 |
import json
def to_pretty_json(obj):
"""Encode to pretty-looking JSON string"""
return json.dumps(obj, sort_keys=False,
indent=4, separators=(',', ': ')) | b325c4e6e150e089da1d9027299831bd1576e57f | 5,039 |
def parse_access_token(request):
"""Get request object and parse access token"""
try:
auth_header = request.headers.get('Authorization')
return auth_header.split(" ")[1]
except Exception as e:
return | a51d51d83cba5fc8e8eb7b9a9147a0219e2bcb20 | 5,040 |
def text_filter(sentence:str)-> str:
"""
过滤掉非汉字和标点符号和非数字
:param sentence:
:return:
"""
line = sentence.replace('\n', '。')
# 过滤掉非汉字和标点符号和非数字
linelist = [word for word in line if
word >= u'\u4e00' and word <= u'\u9fa5' or word in [',', '。', '?', '!',
':'] or word.isdigit()]
return ''.join(linelist) | 9c0949b2e9b374f1aa5392b5a4c215ebff21171b | 5,041 |
def append_id(endpoint, _id):
"""
append '_id' to endpoint if provided
"""
if _id is not None:
return '/'.join([endpoint.rstrip('/'), _id])
return endpoint | 60586a70bc8b9c9b10c1d54f6810c4528c5c0dec | 5,044 |
def get_host_finding_vulnerabilities_hr(vulnerabilities):
"""
Prepare human readable json for "risksense-get-host-finding-detail" command.
Including vulnerabilities details.
:param vulnerabilities: vulnerabilities details from response.
:return: list of dict
"""
vulnerabilities_list = [{
'Name': vulnerability.get('cve', ''),
'V2/Score': vulnerability.get('baseScore', ''),
'Threat Count': vulnerability.get('threatCount', ''),
'Attack Vector': vulnerability.get('attackVector', ''),
'Access Complexity': vulnerability.get('accessComplexity', ''),
'Authentication': vulnerability.get('authentication', '')
} for vulnerability in vulnerabilities]
# To present human readable horizontally
if len(vulnerabilities) == 1:
vulnerabilities_list.append({})
return vulnerabilities_list | 8f0689441f2fef41bbd5da91c802dfb8baa2b979 | 5,046 |
def summer_69(arr):
"""
Return the sum of the numbers in the array,
except ignore sections of numbers starting
with a 6 and extending to the next 9 (every 6 will be followed by at least one 9).
Return 0 for no numbers.
:param arr: list of integers
:return: int
"""
get_result = 0
add = True
for num in arr:
while add:
if num != 6:
get_result += num
break
else:
add = False
while not add:
if num != 9:
break
else:
add = True
break
return get_result | d155a739afe131025b654002bebb51b25325bd1e | 5,047 |
def get_notebook_title(nb_json, default=None):
"""Determine a suitable title for the notebook.
This will return the text of the first header cell.
If that does not exist, it will return the default.
"""
cells = nb_json['cells']
for cell in cells:
if cell['cell_type'] == 'heading':
return cell['source']
return default | 4a20fe9890371ab107d0194e791c6faf9901d714 | 5,048 |
import os
import sys
def get_local_server_dir(subdir = None):
"""
Get the directory at the root of the venv.
:param subdir:
:return:
"""
figures_dir = os.path.abspath(os.path.join(sys.executable, '..', '..', '..'))
if subdir is not None:
figures_dir = os.path.join(figures_dir, subdir)
return figures_dir | 1e540157786cd0ea2ffa9bd1fabdb3d48bcb37f5 | 5,049 |
def is_valid_password_1(password):
"""
>>> is_valid_password_1("111111")
True
>>> is_valid_password_1("223450")
False
>>> is_valid_password_1("123789")
False
"""
has_double = any(password[c] == password[c+1] for c in range(len(password)-1))
is_ascending = all(password[c] <= password[c+1] for c in range(len(password)-1))
return has_double and is_ascending | 8544e15a7d50c025073a3ac51b9f5b8809341d2e | 5,050 |
def embed(tokenizer, text):
"""
Embeds a text sequence using BERT tokenizer
:param text: text to be embedded
:return: embedded sequence (text -> tokens -> ids)
"""
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text)) | 453d411d9c460dfc28cb54c7a6a807290905bed3 | 5,051 |
import torch
def unzip(list):
"""unzip the tensor tuple list
Args:
list: contains tuple of segemented tensors
"""
T, loss = zip(*list)
T = torch.cat(T)
mean_loss = torch.cat(loss).mean()
return T, mean_loss | 5ed656aa8221c7bc5bd8a43b80fe0efd07d4df24 | 5,053 |
def is_collection(obj):
"""
Check if a object is iterable.
:return: Result of check.
:rtype: bool
"""
return hasattr(obj, '__iter__') and not isinstance(obj, str) | 70fa0262ea7bf91a202aade2a1151d467001071e | 5,054 |
import hashlib
def file_md5(fpath):
"""Return the MD5 digest for the given file"""
with open(fpath,'rb') as f:
m = hashlib.md5()
while True:
s = f.read(4096)
if not s:
break
m.update(s)
return m.hexdigest() | 40b355b9a628d286bf86b5199fd7e2a8bea354de | 5,055 |
import os
def strip_path(full_path):
"""Returns the filename part of full_path with any directory path removed.
:meta private:
"""
return os.path.basename(full_path) | 327736cb77d9aa409a5790efd51895318d970382 | 5,056 |
def formatUs(time):
"""Format human readable time (input in us)."""
if time < 1000:
return f"{time:.2f} us"
time = time / 1000
if time < 1000:
return f"{time:.2f} ms"
time = time / 1000
return f"{time:.2f} s" | 7546db60e3977e07dbbbad0a3ab767865840c2e3 | 5,058 |
def parse_network_info(net_bond, response_json):
"""
Build the network info
"""
out_dict = {}
ip_list = []
node_count = 0
# Build individual node information
for node_result in response_json['result']['nodes']:
for node in response_json['result']['nodes']:
if node['nodeID'] == node_result['nodeID']:
node_id = str(node_result['nodeID'])
n_id = "Node ID " + node_id
net_result = node['result']['network'][net_bond]
bond_addr = net_result['address']
bond_mask = net_result['netmask']
bond_gateway = net_result['gateway']
bond_mode = net_result['bond-mode']
bond_mtu = net_result['mtu']
bond_speed = net_result['linkSpeed']
name_servers = net_result['dns-nameservers']
search_domains = net_result['dns-search']
out_dict['------' + n_id + ' ------'] = \
'--------------------------'
out_dict[n_id + ' Bond name'] = net_bond
out_dict[n_id + ' Address'] = bond_addr
out_dict[n_id + ' Netmask'] = bond_mask
out_dict[n_id + ' Gateway'] = bond_gateway
out_dict[n_id + ' Bond mode'] = bond_mode
out_dict[n_id + ' MTU'] = bond_mtu
out_dict[n_id + ' Link speed'] = bond_speed
if net_bond == 'Bond1G':
out_dict[n_id + ' DNS servers'] = name_servers
out_dict[n_id + ' DNS search'] = search_domains
ip_list.append(bond_addr)
node_count = node_count + 1
if net_bond != 'Bond10G':
return out_dict, ip_list
else:
return out_dict | 2c83aa72d6ee0195a42339546d1fded84f85680f | 5,062 |
def non_numeric(string: str) -> str:
""" Removes all numbers from the string """
return ''.join(letter for letter in string if not letter.isdigit()) | fe16297c4cf1b144fb583986a5c01ea02920787e | 5,063 |
def shiftField(field, dz):
"""Shifts the z-coordinate of the field by dz"""
for f in field:
if f.ID == 'Polar Data':
f.set_RPhiZ(f.r, f.phi, f.z + dz)
elif f.ID == 'Cartesian Data':
f.set_XYZ(f.x, f.y, f.z + dz)
return field | c3c592356dc21688049a94291d075879a12012ee | 5,064 |
def get_clinical_cup():
"""
Returns tuple with clinical cup description
"""
return ("8", "2", "M", "01", 25) | fac133ea74fbe30b50e551fdd7cdce349cc02a3a | 5,065 |
def gen_order_history_sequence(uid, history_grouped, has_history_flag):
""" 用户订单历史结果构成的序列 """
# 311 天的操作记录
sequence = ['0'] * 311
if has_history_flag == 0:
return sequence
df = history_grouped[uid]
for i in df['days_from_now']:
sequence[i] = str(df[df['days_from_now'] == i].shape[0])
return sequence | 9f9e93549ea4c35971f87957b74e44e258d79d49 | 5,066 |
def is_name_valid(name: str, rules: list) -> bool:
""" Determine whether a name corresponds to a named rule. """
for rule in rules:
if rule.name == name:
return True
return False | 41e9f88d86a078ca6386f1d0d6b7123233c819b9 | 5,068 |
def chars_count(word: str):
"""
:param word: string to count the occurrences of a character symbol for.
:return: a dictionary mapping each character found in word to the number of times it appears in it.
"""
res = dict()
for c in word:
res[c] = res.get(c, 0) + 1
return res | 30c27b23c04909a65264247d068e9e2c695c6ecc | 5,071 |
def t90_from_t68(t68):
"""
ITS-90 temperature from IPTS-68 temperature
This conversion should be applied to all in-situ
data collected between 1/1/1968 and 31/12/1989.
"""
return t68 / 1.00024 | a2d8c7ccc0797d47fa8f732bdb61c1ec1e15700e | 5,073 |
def _pad(
s: str,
bs: int,
) -> str:
"""Pads a string so its length is a multiple of a specified block size.
:param s: The string that is to be padded
:type s: str
:param bs: The block size
:type bs: int
:returns: The initial string, padded to have a length that is a multiple of the specified block size
:rtype: str
"""
number_of_bytes_to_pad = bs - len(s) % bs
ascii_string = chr(number_of_bytes_to_pad)
padding_str = number_of_bytes_to_pad * ascii_string
return s + padding_str | 1da441d51c57da688ebcf46b7a30feb36cd007fe | 5,074 |
import json
def get_json(headers) -> str:
"""Construct a str formatted like JSON"""
body: dict = {}
for key, value in headers.items():
body[key] = value
return json.dumps(body, indent=2) | 8471f044ae986acd2173d5e9be26c110ee1b1976 | 5,075 |
def Dir(obj):
"""As the standard dir, but also listup fields of COM object
Create COM object with [win32com.client.gencache.EnsureDispatch]
for early-binding to get what methods and params are available.
"""
keys = dir(obj)
try:
## if hasattr(obj, '_prop_map_get_'):
## keys += obj._prop_map_get_.keys()
if hasattr(obj, '_dispobj_'):
keys += dir(obj._dispobj_)
finally:
return keys | 8abc62fbe09e953fb171626a888838e21346ad9e | 5,076 |
def PH2_Calc(KH2, tH2, Kr, I, qH2):
"""
Calculate PH2.
:param KH2: hydrogen valve constant [kmol.s^(-1).atm^(-1)]
:type KH2 : float
:param tH2: hydrogen time constant [s]
:type tH2 : float
:param Kr: modeling constant [kmol.s^(-1).A^(-1)]
:type Kr : float
:param I: cell load current [A]
:type I : float
:param qH2: molar flow of hydrogen [kmol.s^(-1)]
:type qH2 : float
:return: PH2 [atm] as float
"""
try:
result = ((1 / KH2) / (1 + tH2)) * (qH2 - 2 * Kr * I)
return result
except (TypeError, ZeroDivisionError):
print(
"[Error] PH2 Calculation Failed (KH2:%s, tH2:%s, Kr:%s, I:%s, qH2:%s)" %
(str(KH2), str(tH2), str(Kr), str(I), str(qH2))) | fe69353bfdde4f301439b89f9946782457d07645 | 5,077 |
def getConcentricCell(cellNum, matNum, density, innerSurface, outerSurface, universe, comment):
"""Create a cell which has multiple components inside a cell."""
uCard = ''
if type(universe) is int:
uCard = 'u=' + str(universe)
listType = []
if type(innerSurface) == type(listType):
newInnerSurface = ''
i = 1
for surface in innerSurface:
if i % 5 == 0:
newInnerSurface += ' {}\n '.format(surface)
else:
newInnerSurface += ' {}'.format(surface)
i += 1
innerSurface = newInnerSurface
cellCard = "{} {} {} {} -{} {} imp:n=1 {}".format(cellNum, matNum, round(density, 5), innerSurface, outerSurface,
uCard, comment)
return cellCard | f0e8af3210774500eac0fde195896f3b85473e3f | 5,078 |
def board_str(board):
"""
String representation of the board. Unicode character for the piece,
1 for threat zone and 0 for empty zone.
"""
mat = ''
for row in board:
for squ in row:
if squ > 1:
mat += '%s ' % chr(squ)
else:
mat += '. '
mat += '\n'
return mat | 769d846c5b03c8b75145e3b81cab17ed7331fbbf | 5,080 |
def build_pubmed_url(pubmed_id) -> str:
"""
Generates a Pubmed URL from a Pubmed ID
:param pubmed_id: Pubmed ID to concatenate to Pubmed URL
:return: Pubmed URL
"""
return "https://pubmed.ncbi.nlm.nih.gov/" + str(pubmed_id) | 5794fbec75de0451547d6f0570bb89964026c394 | 5,081 |
def choose(n,r):
"""
number of combinations of n things taken r at a time (order unimportant)
"""
if (n < r):
return 0
if (n == r):
return 1
s = min(r, (n - r))
t = n
a = n-1
b = 2
while b <= s:
t = (t*a)//b
a -= 1
b += 1
return t | 5852054f1a6381278039b0ec2184d0887e2b1d2b | 5,083 |
import os
def validate_prmtop(prmtop, target_dir=None, override=False):
"""
Check that file exists and create a symlink if it doesn't have a
prmtop extension (often *.top is used but mdtraj cant't detect type
with ambiguous extensions).
Parameters
----------
prmtop : str
Path to supposed prmtop file
target_dir : str
Directory in which to create symlink if required. If None the symlink will
be created in the same directory.
override: bool
Override possible already existing file with .prmtop extension. Default is false
Returns
-------
Path
Location of verified prmtop (with potentially edited filename)
"""
if not os.path.isfile(prmtop):
raise IOError()
_, ext = os.path.splitext(prmtop)
if ext is 'prmtop':
return prmtop
target_dir = target_dir or os.path.dirname(os.path.abspath(prmtop))
new_prmtop = os.path.join(target_dir, os.path.basename(prmtop) + '.prmtop')
if os.path.islink(new_prmtop) and override:
os.unlink(new_prmtop)
os.symlink(os.path.abspath(prmtop), os.path.abspath(new_prmtop))
return new_prmtop | 08763ed68a822069274177bc7ecb27521ec231b0 | 5,087 |
def sorted_chromosome(all_samples):
"""
sorted_chromosome(AllSamples) -> list
:return: list of chromosome found in all samples
"""
sorted_chromosome_list = sorted(all_samples.chr_list.keys())
print(sorted_chromosome_list)
return sorted_chromosome_list | c1e49ac974e16c7f9b69581442186c3efc23ef70 | 5,088 |
def adjust_bb_size(bounding_box, factor, resample=False):
"""Modifies the bounding box dimensions according to a given factor.
Args:
bounding_box (list or tuple): Coordinates of bounding box (x_min, x_max, y_min, y_max, z_min, z_max).
factor (list or tuple): Multiplicative factor for each dimension (list or tuple of length 3).
resample (bool): Boolean indicating if this resize is for resampling.
Returns:
list: New coordinates (x_min, x_max, y_min, y_max, z_min, z_max).
"""
coord = []
for i in range(len(bounding_box) // 2):
d_min, d_max = bounding_box[2 * i: (2 * i) + 2]
if resample:
d_min, d_max = d_min * factor[i], d_max * factor[i]
dim_len = d_max - d_min
else:
dim_len = (d_max - d_min) * factor[i]
# new min and max coordinates
min_coord = d_min - (dim_len - (d_max - d_min)) // 2
coord.append(int(round(max(min_coord, 0))))
coord.append(int(coord[-1] + dim_len))
return coord | 93a3c5947cb7c3335421084092dbae8840f8164b | 5,089 |
def unique_count_weight(feature):
"""Normalize count number of unique values relative to length of feature.
Args:
feature: feature/column of pandas dataset
Returns:
Normalized Number of unique values relative to length of feature.
"""
return len(feature.value_counts()) / len(feature) | 0345208cd7d9d4bc2303db377206e5362db6cdde | 5,090 |
import math
def isPrime(n):
"""
check if the input number n is a prime number or not
"""
if n <= 3:
return n > 1
if n % 6 != 1 and n % 6 != 5:
return False
sqrt = math.sqrt(n)
for i in range(5, int(sqrt)+1, 6):
if n % i == 0 or n % (i+2) == 0:
return False
return True | 91da5b13840181d039902e2db3efb8cc09609465 | 5,091 |
def chunk_sample_text(path: str) -> list:
"""Function to chunk down a given vrt file into pieces sepparated by <> </> boundaries.
Assumes that there is one layer (no nested <> </> statements) of text elements to be separated."""
# list for data chunks
data = []
# index to refer to current chunk
i = 0
# index of seen xml elements
xml_seen = 0
with open(path, "r") as myfile:
# iterate .vrt
for line in myfile:
# if line starts with "<" and sml seen == 0 we have the first chunk
if line.startswith("<") and xml_seen == 0:
# we have now seen an xml element
xml_seen += 1
# add chunk to list-> chunk is list of three strings:
# chunk[0]: Opening "<>" statement
# chunk[1]: Text contained in chunk, every "\n" replaced with " "
# chunk[2]: Next "<>" statement
data.append(["", "", ""])
data[i][0] += line.replace("\n", " ")
elif line.startswith("<") and xml_seen > 0:
# we've seen another one
xml_seen += 1
# if we encounter a closing statement we end the current chunk
if line.startswith("</"):
data[i][2] = line.replace("\n", " ")
i += 1
data.append(["", "", ""])
# else we encountered another opening xml element and are in a nested environment
# we also start a new chunk but leave the closing statement of the previous one empty
else:
i += 1
data.append(["", "", ""])
data[i][0] = line.replace("\n", " ")
# if we are not on a line with an xml element we can just write the text to the
# text entry (idx 1) for the current chunk, "inter-chunk indexing" should be handled
# by the above case selection
else:
# append line to chunk[1], replacing "\n" with " "
data[i][1] += line.replace("\n", " ")
# if we appended empty chunks we remove them here
for chunk in data:
if all(elems == "" for elems in chunk):
data.remove(chunk)
return data | 6e6c36db38383283bd6076f0b6b346dcfd608243 | 5,092 |
from datetime import datetime
def get_expiries(body):
"""
:type body: BeautifulSoup
"""
_ex = body.find_all('select', {'id': 'date', 'name': 'date'})
ex = []
for ch in _ex:
for _e in ch:
try:
ex.append(datetime.strptime(_e.text, '%d%b%Y').date())
except ValueError:
pass
except AttributeError:
pass
return ex | 09d7f067aa283ff930151b129378785dcbc17b09 | 5,094 |
def format_args(args):
"""Formats the command line arguments so that they can be logged.
Args:
The args returned from the `config` file.
Returns:
A formatted human readable string representation of the arguments.
"""
formatted_args = "Training Arguments: \n"
args = args.__dict__
for key in args.keys():
formatted_args += "\t > {} : {} \n".format(key, args[key])
return formatted_args | 22d4334daba7cdfd77329f5a6de93a2411f0594d | 5,095 |
def _execute_query(connection, query):
"""Executes the query and returns the result."""
with connection.cursor() as cursor:
cursor.execute(query)
return cursor.fetchall() | 9f71eb650d323f7a5ead3451810a7b9f9d77b4b0 | 5,096 |
def mediaValues(x):
"""
return the media of a list
"""
return sum(x)/len(x) | ab4a436d3383e5df7d8d891c9661eabb0af81ef8 | 5,097 |
def vectorvalued(f):
""" Decorates a distribution function to disable automatic vectorization.
Parameters
----------
f: The function to decorate
Returns
-------
Decorated function
"""
f.already_vectorized = True
return f | cc498fe0731acdbde0c4d9b820a1accb5dc94fea | 5,098 |
import unicodedata
def remove_diacritics(input_str: str) -> str:
"""Remove diacritics and typographical ligatures from the string.
- All diacritics (i.e. accents) will be removed.
- Typographical ligatures (e.g. ffi) are broken into separated characters.
- True linguistic ligatures (e.g. œ) will remain.
- Non-latin scripts will remain.
Args:
input_str (str): The original string with diacritics and ligatures.
Returns:
str: The string without diacritics and typographical ligatures.
"""
nfkd_form = unicodedata.normalize('NFKD', input_str)
return u"".join([c for c in nfkd_form if not unicodedata.combining(c)]) | 23c3e9ce0029704f0012a825460f10f370e3c681 | 5,099 |
def extract_model_field_meta_data(form, attributes_to_extract):
""" Extract meta-data from the data model fields the form is handling. """
if not hasattr(form, 'base_fields'):
raise AttributeError('Form does not have base_fields. Is it a ModelForm?')
meta_data = dict()
for field_name, field_data in form.base_fields.items():
meta_data[field_name] = dict()
for attrib in attributes_to_extract:
meta_data[field_name][attrib] = getattr(field_data, attrib, '')
return meta_data | e41a63935379c5d3310646c79c25a43ad7f6d5fe | 5,100 |
def lambda_plus_mu_elimination(
offspring: list, population: list, lambda_: int):
""" Performs the (λ+μ)-elimination step of the evolutionary algorithm
Args:
offspring (list): List of the offspring
population (list): List of the individuals in a population
lambda_ (int): Number of top lambda_ candidates that will be retained
Returns:
new_combined: Top lambda_ candidates that are retained
"""
# combine population and offspring
combined = population + offspring
# sort new population
combined = sorted(combined, key=lambda k: k.fitness, reverse=False)
# pick top lambda candidates
combined = combined[:lambda_]
return combined | d4f55fa621e3f33e2773da81a6cf0b2fc0439ba9 | 5,101 |
def findTargetNode(root, nodeName, l):
"""
Recursive parsing of the BVH skeletal tree using breath-first
search to locate the node that has the name of the targeted body part.
Args:
root (object): root node of the BVH skeletal tree
nodeName (string): name of the targeted body part
l (list): empty list
Returns:
list: list containing the node representing the targeted body part
"""
if root.name == nodeName:
l.append(root)
else:
for child in root.children:
findTargetNode(child, nodeName, l)
return l | 81d63c032260b496b29dd2890e32753554b93e1a | 5,102 |
def ddtodms(decLat: float, decLon: float):
""" Converts coord point from decimal degrees to Hddd.mm.ss.sss """
try:
lat = float(decLat)
lon = float(decLon)
except ValueError as e:
raise e
# Get declination
ns = "N" if lat >= 0 else "S"
ew = "E" if lon >= 0 else "W"
lat = abs(lat)
lon = abs(lon)
# Floor to get degrees
latD = int(lat)
lonD = int(lon)
# Get minutes
latM = 60*(lat - latD)
lonM = 60*(lon - lonD)
# Get seconds
latS = 60*(latM - int(latM))
lonS = 60*(lonM - int(lonM))
# Assemble output
latOut = f"{ns}{int(latD):03}.{int(latM):02}.{latS:06.3f}"
lonOut = f"{ew}{int(lonD):03}.{int(lonM):02}.{lonS:06.3f}"
return latOut, lonOut | e1d05d5edf274427b42cb88496fe41ddaf58f7fd | 5,103 |
import csv
def read_keywords(fname):
"""Read id file"""
with open(fname, 'r') as f:
reader = csv.reader(f)
header = next(reader)
assert header == ['keyword']
return list(row[0] for row in reader) | 566c1924ae8d4ae7316a2c5e3947170fe23af45d | 5,105 |
def inscribe(mask):
"""Guess the largest axis-aligned rectangle inside mask.
Rectangle must exclude zero values. Assumes zeros are at the
edges, there are no holes, etc. Shrinks the rectangle's most
egregious edge at each iteration.
"""
h, w = mask.shape
i_0, i_1 = 0, h - 1
j_0, j_1 = 0, w - 1
def edge_costs(i_0, i_1, j_0, j_1):
a = mask[i_0, j_0:j_1 + 1].sum()
b = mask[i_1, j_0:j_1 + 1].sum()
c = mask[i_0:i_1 + 1, j_0].sum()
d = mask[i_0:i_1 + 1, j_1].sum()
return a,b,c,d
def area(i_0, i_1, j_0, j_1):
return (i_1 - i_0) * (j_1 - j_0)
coords = [i_0, i_1, j_0, j_1]
while area(*coords) > 0:
costs = edge_costs(*coords)
if sum(costs) == 0:
return coords
worst = costs.index(max(costs))
coords[worst] += 1 if worst in (0, 2) else -1
return | 06042faebedb82dc0044cf2108fae7a3570895e0 | 5,106 |
def vaf_above_or_equal(vaf):
"""
"""
return lambda columns, mapper: float(columns[mapper['Variant_allele_ratio']]) >= vaf | 4b2134d63193699f8ca490a8d7537ba8aaf4c8cf | 5,107 |
def signum(x):
"""
Return -1 if x < 0, 1 if 0 < x, or 0 if x == 0
"""
return (x > 0) - (x < 0) | 59568d4fbf1f5a226528b7f12f8c5011b641bc4e | 5,108 |
def is_closer_to_goal_than(a, b, team_index):
""" Returns true if a is closer than b to goal owned by the given team """
return (a.y < b.y, a.y > b.y)[team_index] | 016cb7f19b2d0046d4f349dbf52da93ca0e9a2cc | 5,110 |
def lower_text(text: str) -> str:
"""Transform all the text to lowercase.
Args:
text : Input text
Returns:
Output text
"""
return text.lower() | 2a657464a014703464ca47eeb77ed6a630535819 | 5,112 |
import base64
def base64url_decode(msg):
"""
Decode a base64 message based on JWT spec, Appendix B.
"Notes on implementing base64url encoding without padding"
"""
rem = len(msg) % 4
if rem:
msg += b'=' * (4 - rem)
return base64.urlsafe_b64decode(msg) | f0f46749ae21ed8166648c52c673eab25f837881 | 5,113 |
def reverse_complement_sequence(sequence, complementary_base_dict):
"""
Finds the reverse complement of a sequence.
Parameters
----------
sequence : str
The sequence to reverse complement.
complementary_base_dict: dict
A dict that maps bases (`str`) to their complementary bases
(`str`).
Returns
-------
str
The reverse complement of the input sequence.
"""
rev_comp_bases = [complementary_base_dict[b] for b in
sequence[::-1]]
return ''.join(rev_comp_bases) | 1130f5b321daf72cdd40704fc8671ba331376ded | 5,115 |
import math
def top2daz(north, east, up):
"""Compute azimouth, zenith and distance from a topocentric vector.
Given a topocentric vector (aka north, east and up components), compute
the azimouth, zenith angle and distance between the two points.
Args:
north (float): the north component (in meters)
east (float) : the east component (in meters)
up (float) : the up component (in meters)
Returns:
tuple (floats): a tuple of three floats is returned, as:
[distance, azimouth, zenith], where distance is
in meters, and azimouth and zenith are in radians.
"""
distance = math.sqrt(north*north + east*east + up*up)
a = math.atan2(east, north) % (math.pi*2e0) # normalized [0, 2pi]
zenith = math.acos(up/distance);
return distance, a, zenith | 67a127957b0dc131a6fe5505de05b89871542009 | 5,116 |
import torch
def get_ground_truth_vector(classes : torch.Tensor, n_domains : int,
n_classes : int) -> torch.Tensor:
"""
Get the ground truth vector for the phase where the feature extractor
tries that discriminator cannot distinguish the domain that the sample
comes from.
Args:
classes (torch.Tensor): Class labels.
n_domains (int): Number of domains.
n_classes (int): Number of classes.
Returns:
torch.Tensor: Tensor containing the ground truth for each sample.
"""
# Create the ground truth tensor
total_size = n_domains * n_classes
gt = torch.zeros(len(classes), total_size)
# Value to be placed in the corresponding categories and domains positions
# It is uniform so the discriminator cannot distinguish which domain the
# sample comes from
non_zero_value = 1 / n_domains
for row in range(len(classes)):
# The indices are the corresponding position for each class into each
# domain
non_zero_indices = [i+classes[row] for i in range(0, total_size, n_classes)]
gt[row, non_zero_indices] = non_zero_value
return gt | 429c0c69d85572073aab66372d651d4981324e2b | 5,117 |
from numpy import array
from numpy import array, log10
def get_midpoints(ar, mode='linear'):
"""
Returns the midpoints of an array; i.e. if you have the left edge of a set
of bins and want the middle, this will do that for you.
:param ar:
The array or list of length L to find the midpoints of
:param mode:
Whether to find the midpoint in logspace ('log') or linear
space ('linear')
:returns:
An array of the midpoints of length L - 1
"""
_valid_modes = ['linear', 'log']
if mode not in _valid_modes:
raise TypeError("Unrecognize midpoint method; must be one of {}}.".format(
_valid_modes))
if mode == 'linear':
lst = [ar[i] + (ar[i+1]-ar[i])/2 for i in range(len(ar))
if i != len(ar) - 1]
return array(lst)
elif mode == 'log':
lst = [10**(log10(ar[i]) + (log10(ar[i+1])-log10(ar[i]))/2)
for i in range(len(ar)) if i != len(ar) - 1]
return array(lst)
else:
raise TypeError("How did I get here? provided mode = {}".format(mode)) | 5bb8ccd674d6a1eb71c02ff8577149f495f3bed4 | 5,118 |
import sys
def lines_from_string(string, as_interned=False):
"""
Create a list of file lines from a given string.
Args:
string (str): File string
as_interned (bool): List of "interned" strings (default False)
Returns:
strings (list): File line list
"""
if as_interned:
return [sys.intern(line) for line in string.splitlines()]
return string.splitlines() | 32b3cc6d93a658f93e22450b9795bafad6b7c293 | 5,121 |
import re
def extract_words(path_to_file):
"""
Takes a path to a file and returns the non-stop
words, after properly removing nonalphanumeric chars
and normalizing for lower case
"""
words = re.findall('[a-z]{2,}', open(path_to_file).read().lower())
stopwords = set(open('../stop_words.txt').read().split(','))
return [w for w in words if w not in stopwords] | 4f5031693d10542c31a3055341d1794d0d7bf4f0 | 5,122 |
def get_activity_id(activity_name):
"""Get activity enum from it's name."""
activity_id = None
if activity_name == 'STAND':
activity_id = 0
elif activity_name == 'SIT':
activity_id = 1
elif activity_name == 'WALK':
activity_id = 2
elif activity_name == 'RUN':
activity_id = 3
elif activity_name == 'WALK_UPSTAIRS':
activity_id = 4
elif activity_name == 'WALK_DOWNSTAIRS':
activity_id = 5
elif activity_name == 'LIE':
activity_id = 6
elif activity_name == 'BIKE':
activity_id = 7
elif activity_name == 'DRIVE':
activity_id = 8
elif activity_name == 'RIDE':
activity_id = 9
else:
activity_id = 10
return activity_id | b5e18063b5448c3f57636daa732dfa2a4f07d801 | 5,124 |
import socket
def is_ipv4(v):
"""
Check value is valid IPv4 address
>>> is_ipv4("192.168.0.1")
True
>>> is_ipv4("192.168.0")
False
>>> is_ipv4("192.168.0.1.1")
False
>>> is_ipv4("192.168.1.256")
False
>>> is_ipv4("192.168.a.250")
False
>>> is_ipv4("11.24.0.09")
False
"""
X = v.split(".")
if len(X) != 4:
return False
try:
return len([x for x in X if 0 <= int(x) <= 255]) == 4 and bool(socket.inet_aton(v))
except Exception:
return False | e81fae435a8e59dbae9ab1805941ce3ba9909ba9 | 5,125 |
def permitted_info_attributes(info_layer_name, permissions):
"""Get permitted attributes for a feature info result layer.
:param str info_layer_name: Layer name from feature info result
:param obj permissions: OGC service permissions
"""
# get WMS layer name for info result layer
wms_layer_name = permissions.get('feature_info_aliases', {}) \
.get(info_layer_name, info_layer_name)
# return permitted attributes for layer
return permissions['layers'].get(wms_layer_name, {}) | 57fcb05e5cd1c7e223c163929e78ecdb00d1ad09 | 5,126 |
from typing import List
from typing import Tuple
import os
def update_SMILES_list(
ID_SMILES_tuples: List[Tuple[str, str]],
path: str,
) -> Tuple[List[str], List[str]]:
"""
This function takes a List of tuples containing IDs (str) and SMILES (str)
and the output path (str). It checks which of the corresponding structures
already have been depicted and returns two lists of IDs and SMILES of
undepicted structures.
This way, we don't have to start from scratch if the process was aborted.
"""
# Get list of IDs of already depicted structures
already_processed = [img_name.split("_")[0] for img_name in os.listdir(path)]
# Get list of SMILES of not-yet depicted structures
updated_ID_SMILES_tuples = [
(tup[0], tup[1]) for tup in ID_SMILES_tuples if tup[0] not in already_processed
]
if len(updated_ID_SMILES_tuples) != 0:
IDs, SMILES = zip(*updated_ID_SMILES_tuples)
else:
IDs = []
SMILES = []
return IDs, SMILES | 5517654608912f7ad1735dc5dd40159bdce065c1 | 5,129 |
def is_metageneration_specified(query_params):
"""Return True if if_metageneration_match is specified."""
if_metageneration_match = query_params.get("ifMetagenerationMatch") is not None
return if_metageneration_match | d37c513f3356aef104e7f6db909d5245e2664c90 | 5,130 |
import six
def qualified_name(obj):
"""Return the qualified name (e.g. package.module.Type) for the given object."""
try:
module = obj.__module__
qualname = obj.__qualname__ if six.PY3 else obj.__name__
except AttributeError:
type_ = type(obj)
module = type_.__module__
qualname = type_.__qualname__ if six.PY3 else type_.__name__
return qualname if module in ('typing', 'builtins') else '{}.{}'.format(module, qualname) | 02444c19d200650de8dc9e2214f7788fca0befd2 | 5,132 |
def ConvertToCamelCase(input):
"""Converts the input string from 'unix_hacker' style to 'CamelCase' style."""
return ''.join(x[:1].upper() + x[1:] for x in input.split('_')) | 8070516c61768ea097eccc62633fc6dea2fa7096 | 5,133 |
def parse_git_version(git) :
"""Parses the version number for git.
Keyword arguments:
git - The result of querying the version from git.
"""
return git.split()[2] | f4af03f0fad333ab87962160ed0ebf5dcbeea22a | 5,136 |
def bag(n, c, w, v):
"""
测试数据:
n = 6 物品的数量,
c = 10 书包能承受的重量,
w = [2, 2, 3, 1, 5, 2] 每个物品的重量,
v = [2, 3, 1, 5, 4, 3] 每个物品的价值
"""
# 置零,表示初始状态
value = [[0 for j in range(c + 1)] for i in range(n + 1)]
for i in range(1, n + 1):
for j in range(1, c + 1):
value[i][j] = value[i - 1][j]
# 背包总容量够放当前物体,遍历前一个状态考虑是否置换
if j >= w[i - 1]:
value[i][j] = max(value[i-1][j],value[i - 1][j - w[i - 1]] + v[i - 1])
for x in value:
print(x)
return value | 27dd9c5f9367afe865686c8f68853bc966bcdaa6 | 5,137 |
def get_output_shape(tensor_shape, channel_axis):
"""
Returns shape vector with the number of channels in the given channel_axis location and 1 at all other locations.
Args:
tensor_shape: A shape vector of a tensor.
channel_axis: Output channel index.
Returns: A shape vector of a tensor.
"""
return [-1 if i is channel_axis else 1 for i in range(len(tensor_shape))] | 7c7058c2da9cb5a4cdb88377ece4c2509727894a | 5,138 |
def get_color_pattern(input_word: str, solution: str) -> str:
"""
Given an input word and a solution, generates the resulting
color pattern.
"""
color_pattern = [0 for _ in range(5)]
sub_solution = list(solution)
for index, letter in enumerate(list(input_word)):
if letter == solution[index]:
color_pattern[index] = 2
sub_solution[index] = "_"
for index, letter in enumerate(list(input_word)):
if letter in sub_solution and color_pattern[index] != 2:
color_pattern[index] = 1
sub_solution[sub_solution.index(letter)] = "_"
color_pattern = "".join([str(c) for c in color_pattern])
return color_pattern | a8746a5854067e27e0aefe451c7b950dd9848f50 | 5,140 |
import inspect
def getargspec(obj):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
Modified version of inspect.getargspec from the Python Standard
Library."""
if inspect.isfunction(obj):
func_obj = obj
elif inspect.ismethod(obj):
func_obj = obj.__func__
else:
raise TypeError('arg is not a Python function')
args, varargs, varkw = inspect.getargs(func_obj.__code__)
return args, varargs, varkw, func_obj.__defaults__ | bcfe75de95ccf22bcefdba52c8556f0722dbbcb7 | 5,141 |
from pathlib import Path
def build_name(prefix: str, source_ind: int, name: Path) -> str:
"""Build a package name from the index and path."""
if name.name.casefold() == '__init__.py':
name = name.parent
name = name.with_suffix('')
dotted = str(name).replace('\\', '.').replace('/', '.')
return f'{prefix}_{source_ind:02x}.{dotted}' | d777b1b875ff6ab6f3228538a9e4c1cfa3c398a0 | 5,143 |
import re
def _format_param_value(value_repr):
"""
Format a parameter value for displaying it as test output. The
values are string obtained via Python repr.
"""
regexs = ["^'(.+)'$",
"^u'(.+)'$",
"^<class '(.+)'>$"]
for regex in regexs:
m = re.match(regex, value_repr)
if m and m.group(1).strip():
return m.group(1)
return value_repr | 87d881a3159c18f56dd2bb1c5556f3e70d27c1bc | 5,144 |
from typing import Dict
from typing import Any
def get_stratum_alert_data(threshold: float, notification_channel: int) -> Dict[str, Any]:
""" Gets alert config for when stratum goes below given threshold
:param threshold: Below this value, grafana should send an alert
:type description: float
:param notification_channel: Id of the notification channel the alert should be sent to
:type notification_channel: int
:return: Data of the alert
:rtype: Dict
"""
return {
"conditions": [
{
"evaluator": {
"params": [threshold],
"type": "lt"
},
"operator": {"type": "and"},
"query": {
"params": ["A", "5m", "now"]
},
"reducer": {
"params": [],
"type": "avg"
},
"type": "query"
}
],
"executionErrorState": "alerting",
"frequency": "60s",
"handler": 1,
"name": "Estimated hash rate on stratum alert",
"noDataState": "alerting",
"notifications": [{"id": notification_channel}]
} | bae4aafe18286eeb5b0f59ad87804a19936ec182 | 5,146 |
def ewma(current, previous, weight):
"""Exponentially weighted moving average: z = w*z + (1-w)*z_1"""
return weight * current + ((1.0 - weight) * previous) | 5a678b51618ebd445db0864d9fa72f63904e0e94 | 5,147 |
from bs4 import BeautifulSoup
def get_post_mapping(content):
"""This function extracts blog post title and url from response object
Args:
content (request.content): String content returned from requests.get
Returns:
list: a list of dictionaries with keys title and url
"""
post_detail_list = []
post_soup = BeautifulSoup(content,"lxml")
h3_content = post_soup.find_all("h3")
for h3 in h3_content:
post_detail_list.append(
{'title':h3.a.get_text(),'url':h3.a.attrs.get('href')}
)
return post_detail_list | 0b3b31e9d8c5cf0a3950dc33cb2b8958a12f47d4 | 5,150 |
import os
def get_output_foldername(start_year: int, end_year: int) -> str:
""" Create an output folder and return its name """
folder_name = "app/data/{}-{}".format(start_year, end_year)
if not os.path.exists(folder_name):
os.mkdir(folder_name)
return folder_name | af098e90b858ca35c834e04af2cc293167b722d5 | 5,151 |
def len_iter(iterator):
"""Count items in an iterator"""
return sum(1 for i in iterator) | 1d828b150945cc4016cdcb067f65753a70d16656 | 5,152 |
import pprint
def main(database):
"""
:type database: db.BlogDB
"""
sqls = []
sql_create_articles = 'CREATE TABLE {} '.format(database.table_name['articles']) + \
'(id INTEGER PRIMARY KEY AUTOINCREMENT, slug CHAR(100) NOT NULL UNIQUE, cat_id INT,' + \
'title NCHAR(100) NOT NULL, md_content TEXT NOT NULL, html_content TEXT NOT NULL, ' + \
'author NCHAR(30) NOT NULL, time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP)'
sql_create_cat = 'CREATE TABLE {} '.format(database.table_name['category']) + \
'(id INTEGER PRIMARY KEY AUTOINCREMENT, slug CHAR(100) UNIQUE, name NCHAR(100) NOT NULL)'
sqls.append(sql_create_articles)
sqls.append(sql_create_cat)
print('INIT...')
pprint.pprint(sqls)
result = False
conn = database.connect()
for sql in sqls:
try:
conn.execute(sql)
result = True
except Exception as e:
print(e)
conn.rollback()
conn.commit()
conn.close()
return result | d0aa5ea2ad4d3eb7909785f427604e3e0bc7681a | 5,154 |
def read_datafiles(filepath):
"""
Example function for reading in data form a file.
This needs to be adjusted for the specific format that
will work for the project.
Parameters
----------
filepath : [type]
[description]
Returns
-------
[type]
[description]
"""
with open(filepath) as read_file:
filecontents = read_file.read()
return filecontents | 789feb11cfa62d2fc2f5ac244ae2be3f618aaf2f | 5,155 |
def getPutDeltas(delta, optType):
"""
delta: array or list of deltas
optType: array or list of optType "C", "P"
:return:
"""
# otm_x = put deltas
otm_x = []
for i in range(len(delta)):
if optType[i] == "C":
otm_x.append(1-delta[i])
else:
otm_x.append(abs(delta[i]))
return otm_x | a9950bd383f91c49e6c6ff745ab9b83a677ea9e8 | 5,157 |
def validate_file(file):
"""Validate that the file exists and is a proper puzzle file.
Preemptively perform all the checks that are done in the input loop of sudoku_solver.py.
:param file: name of file to validate
:return True if the file passes all checks, False if it fails
"""
try:
open_file = open(file)
file_contents = open_file.read()
puzzle_list = [char for char in file_contents if char.isdigit() or char == '.']
puzzle_string = ''.join(puzzle_list)
if len(puzzle_string) == 81:
clues = [char for char in puzzle_string if char != '.' and char != '0']
num_clues = len(clues)
if num_clues >= 17:
return True
else:
print('{} is an unsolvable puzzle. It has {} clues.\n'
'There are no valid sudoku puzzles with fewer than 17 clues.'.format(file, num_clues))
return False
else:
print('{} in incorrect format.\nSee README.md for accepted puzzle formats.'.format(file))
return False
except OSError:
print('File {} not found.'.format(file))
return False | 31b9a5fa7dc999d0336b69642c549517399686c1 | 5,158 |
def how_many(num):
"""Count the number of digits of `num`."""
if num < 10:
return 1
else:
return 1 + how_many(num // 10) | 6b6f8c2a95dac9f2a097300924e29bbca0bdd55c | 5,160 |
def line_length_check(line):
"""Return TRUE if the line length is too long"""
if len(line) > 79:
return True
return False | 2cde1a2b8f20ebf57c6b54bf108e97715789a51d | 5,161 |
import hashlib
def check_contents(md5, filepath, ignore):
"""
md5 - the md5 sum calculated last time the data was validated as correct
filepath - the location/file where the new data is, this is to be validated
ignore - a list of regular expressions that should be thrown out, line by line in the comparison
"""
# Open,close, read file and calculate MD5 on its contents
with open(filepath,"r",encoding='utf-8') as file_to_check:
# read contents of the file
data = ""
lines = file_to_check.readlines()
for line in lines:
flag = True
for re in ignore:
if re in line:
flag = False #exclude this line, it's a date or something and will prevent the md5 from working
if flag:
data = data + line + "\n"
#print(data)
# pipe contents of the file through
md5_returned = hashlib.md5(data.encode('utf-8')).hexdigest()
print("Checking Contents Via Hash:")
print("Original: " + md5)
print("Calculated: " + md5_returned)
if md5 == md5_returned:
return True #md5 verified
else:
return False | fb6ae4a3b6600f7df64cf2ccfe24d035c4a98042 | 5,163 |
import re
def dtype_ripper(the_dtype, min_years, max_years):
"""Extract the range of years from the dtype of a structured array.
Args:
the_dtype (list): A list of tuples with each tuple containing two
entries, a column heading string, and a string defining the
data type for that column. Formatted as a numpy dtype list.
min_years (list): The earliest years found in the imported data.
max_years (list): The latest years found in the imported data.
Returns:
Updated lists of minimum and maximum years with the minimum
and maximum found in data_array.
The minimum and maximum years, as integers, that were contained
in the column headings of the dtype definition.
"""
# Strip the dtype into its constituent lists: column names and data formats
colnames, dtypes = zip(*the_dtype)
# Preallocate a list for the years extracted from the column names
year_list = []
# Loop over the list of column names, identify entries that have
# the format specified by a regex, and add the matches to a list
for name in colnames:
year = re.search('^[1|2][0-9]{3}$', name)
if year:
year_list.append(year.group())
# Identify the minimum and maximum years from the list of the years
# in the column names and record them as integers instead of strings
min_yr = int(min(year_list))
max_yr = int(max(year_list))
# Circumvents the action of .append() modifying in place the lists
# passed to the function
new_min_years = min_years + [min_yr]
new_max_years = max_years + [max_yr]
return new_min_years, new_max_years | 1cbcc30cf7760d466187873aa5ea221691b2092d | 5,165 |
import os
import subprocess
import sys
import getpass
def ssh_cmd(ssh_cfg, command):
"""Returns ssh command."""
try:
binary = os.environ['SSH_BINARY']
except KeyError:
if os.name != 'nt':
binary = subprocess.check_output(
'which ssh', shell=True).decode(sys.stdout.encoding).strip()
else:
raise Exception('SSH binary not provided.')
cmd = [binary]
if ssh_cfg.get('port'):
cmd.extend(['-p', ssh_cfg['port']])
cmd.append('{}@{}'.format(getpass.getuser(), ssh_cfg['host']))
cmd.append(command)
return cmd | f7e110c76e26a462dd9929fc6fa4f2c025a2df44 | 5,166 |
import uuid
def db_entry_generate_id():
""" Generate a new uuid for a new entry """
return str(uuid.uuid4()).lower().replace('-','') | d5e90504a1927623b267082cd228981684c84e8d | 5,167 |
def genome_2_validator(genome_2):
"""
Conducts various test to ensure the stability of the Genome 2.0
"""
standard_gene_length = 27
def structure_test_gene_lengths():
"""
Check length requirements for each gene
"""
gene_anomalies = 0
for key in genome_2:
if len(key) != standard_gene_length:
print("Warning! Key did not meet length requirement:", key)
gene_anomalies += 1
if gene_anomalies == 0:
print("\nGene length verification...... PASSED!")
else:
print("\nGene length verification...... Failed! ", gene_anomalies, " anomalies detected")
return gene_anomalies | 7fe54b51673f3bc71cb8899f9a20b51d28d80957 | 5,168 |
def create_msg(q1,q2,q3):
""" Converts the given configuration into a string of bytes
understood by the robot arm.
Parameters:
q1: The joint angle for the first (waist) axis.
q2: The joint angle for the second (shoulder) axis.
q3: The joint angle for the third (wrist) axis.
Returns:
The string of bytes.
"""
return ('%d,%d,%d\n' % (q1,q2,q3)).encode() | 26f9954a55686c9bf8bd08cc7a9865f3e4e602e3 | 5,169 |
import argparse
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, required=True)
parser.add_argument('--spacy_model', type=str, default='en_core_web_sm')
parser.add_argument('--omit_answers', action='store_true')
parser.add_argument('--include_stopwords', action='store_true')
parser.add_argument('--lowercase', action='store_true')
parser.add_argument('--k', type=int, default=5)
return parser.parse_args() | 905f9e46d17b45e28afeaf13769434ad75685582 | 5,171 |
def pnorm(x, p):
"""
Returns the L_p norm of vector 'x'.
:param x: The vector.
:param p: The order of the norm.
:return: The L_p norm of the matrix.
"""
result = 0
for index in x:
result += abs(index) ** p
result = result ** (1/p)
return result | 110fea5cbe552f022c163e9dcdeacddd920dbc65 | 5,172 |
import json
def load_config(config_file):
"""
加载配置文件
:param config_file:
:return:
"""
with open(config_file, encoding='UTF-8') as f:
return json.load(f) | 85bab8a60e3abb8af56b0ae7483f2afe992d84b4 | 5,173 |
def vehicle_emoji(veh):
"""Maps a vehicle type id to an emoji
:param veh: vehicle type id
:return: vehicle type emoji
"""
if veh == 2:
return u"\U0001F68B"
elif veh == 6:
return u"\U0001f687"
elif veh == 7:
return u"\U000026F4"
elif veh == 12:
return u"\U0001F686"
else:
return u"\U0001F68C" | 8068ce68e0cdf7f220c37247ba2d03c6505a00fe | 5,174 |
import os
def ifFileExists(filePath):
"""
Cheks if the file exists; returns True/False
filePath File Path
"""
return os.path.isfile(filePath) | 2c4d6c332cff980a38d147ad0eafd1d0c3d902fc | 5,175 |
def scroll_down(driver):
"""
This function will simulate the scroll down of the webpage
:param driver: webdriver
:type driver: webdriver
:return: webdriver
"""
# Selenium supports execute JavaScript commands in current window / frame
# get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
# scroll to the end of the page
driver.execute_script("window.scrollTo(0, {});".format(last_height))
return driver | 7d68201f3a49950e509a7e389394915475ed8c94 | 5,176 |
def parent_path(xpath):
"""
Removes the last element in an xpath, effectively yielding the xpath to the parent element
:param xpath: An xpath with at least one '/'
"""
return xpath[:xpath.rfind('/')] | b435375b9d5e57c6668536ab819f40ae7e169b8e | 5,179 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.