content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def _infer_title(ntbk, strip_title_header=True):
"""Infer a title from notebook metadata.
First looks in metadata['title'] and if nothing is found,
looks for whether the first line of the first cell is an H1
header. Optionally it strips this header from the notebook content.
"""
# First try the notebook metadata, if not found try the first line
title = ntbk.metadata.get('title')
# If the first line of the ontebook is H1 header, assume it's the title.
if title is None:
first_cell_lines = ntbk.cells[0].source.lstrip().split('\n')
if first_cell_lines[0].startswith('# '):
title = first_cell_lines.pop(0).strip('# ')
if strip_title_header is True:
ntbk.cells[0].source = '\n'.join(first_cell_lines)
return title | e8152f0c160d2cb7af66b1a20f4d95d4ea16c703 | 4,349 |
import hashlib
def stable_hash(value):
"""Return a stable hash."""
return int(hashlib.md5(str(value).encode('utf-8')).hexdigest(), 16) | a5be51a971eb6c9a91489155216ef194f9d0d7ba | 4,350 |
def get_recommendations(commands_fields, app_pending_changes):
"""
:param commands_fields:
:param app_pending_changes:
:return: List of object describing command to run
>>> cmd_fields = [
... ['cmd1', ['f1', 'f2']],
... ['cmd2', ['prop']],
... ]
>>> app_fields = {
... 'f2': {'field': 'f2', 'user': 'api', 'updated': '00:00'}
... }
>>> from pprint import pprint
>>> pprint(get_recommendations(cmd_fields, app_fields))
[{'command': 'cmd1', 'field': 'f2', 'updated': '00:00', 'user': 'api'}]
"""
recommended_cmds = []
for cmd in commands_fields:
cmd_name = cmd[0]
cmd_fields = cmd[1]
for field in cmd_fields:
if field in app_pending_changes.keys():
recommended_cmds.append({
'command': cmd_name,
'field': field,
'user': app_pending_changes[field]['user'],
'updated': app_pending_changes[field]['updated'],
})
break
return recommended_cmds | 03fa583a5d4ea526cfeaa671418488218e1b227f | 4,354 |
import subprocess
def get_git_version():
"""
Get the version from git.
"""
return subprocess.check_output('git describe --tags'.split()).strip() | 9dd34bd2fc55df75b82f2ccae0005a3212d16623 | 4,356 |
def rename_dict_key(_old_key, _new_key, _dict):
"""
renames a key in a dict without losing the order
"""
return { key if key != _old_key else _new_key: value for key, value in _dict.items()} | ddc497796e0e52677afdf09b7f4995cf3a534cbc | 4,357 |
import os
import sys
import re
from bs4 import BeautifulSoup
def load_html_file(file_dir):
""" Uses BeautifulSoup to load an html """
with open(file_dir, 'rb') as fp:
data = fp.read()
if os.name == 'nt' or sys.version_info[0] == 3:
data = data.decode(encoding='utf-8', errors='strict')
data = re.sub(r'(\>)([ ]+)', lambda match: match.group(1) + ('!space!' * len(match.group(2))), data)
data = re.sub(r'([ ]+)(\<)', lambda match: ('!space!' * len(match.group(1))) + match.group(2), data)
if os.name == 'nt' or sys.version_info[0] == 3:
data = data.encode('utf-8', 'ignore')
soup = BeautifulSoup(data, 'html.parser')
return soup | 999fba3d63ed3c0d62befe0b76baf0a0f3e0a7ca | 4,360 |
import re
import collections
def group_files(config_files, group_regex, group_alias="\\1"):
"""group input files by regular expression"""
rx = re.compile(group_regex)
for key, files in list(config_files.items()):
if isinstance(files, list):
groups = collections.defaultdict(list)
unmatched = []
for fn in sorted(files):
r = rx.search(fn)
if r is None:
unmatched.append(fn)
continue
group_name = r.expand(group_alias)
groups[group_name].append(fn)
if len(unmatched) == len(files):
pass
elif len(unmatched) == 0:
config_files[key] = [{x: y} for x, y in list(groups.items())]
else:
raise ValueError(
"input files not matching regular expression {}: {}"
.format(group_regex, str(unmatched)))
return config_files | 7f0c14387a9a63d03e8fdcb2297502a4ebf31e80 | 4,363 |
import math
def haversine(phi1, lambda1, phi2, lambda2):
"""
calculate angular great circle distance with haversine formula
see parameters in spherical_law_of_cosines
"""
d_phi = phi2 - phi1
d_lambda = lambda2 - lambda1
a = math.pow(math.sin(d_phi / 2), 2) + \
math.cos(phi1) * math.cos(phi2) * math.pow(math.sin(d_lambda / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
return c | acb25fc8d305dde7b18059a770bdcd9b135b295a | 4,364 |
def _frac_scorer(matched_hs_ions_df, all_hyp_ions_df, N_spectra):
"""Fraction ion observed scorer.
Provides a score based off of the fraction of hypothetical ions that were observed
for a given hypothetical structure.
Parameters
----------
matched_hs_ions_df : pd.DataFrame
Dataframe of observed ions that matched a specific hypothetical structure
all_hyp_ions_df : pd.DataFrame
Dataframe of all possible ions for a given hypothetical structure.
N_spectra : int
Number of spectra provided.
Returns
-------
float
Score for a given hypothetical structure.
"""
# Calculate the number of matched ions observed and total possible
N_matched_hs_ions = matched_hs_ions_df.shape[0]
N_tot_hyp_ions = all_hyp_ions_df.shape[0]
score = N_matched_hs_ions / (N_tot_hyp_ions*N_spectra)
return score | a341b02b7ba64eb3b29032b4fe681267c5d36a00 | 4,367 |
def role_in(roles_allowed):
"""
A permission checker that checks that a role possessed by the user matches one of the role_in list
"""
def _check_with_authuser(authuser):
return any(r in authuser.roles for r in roles_allowed)
return _check_with_authuser | 24ff0423dc50187f3607329342af6c8930596a36 | 4,368 |
import requests
def get_raw_img(url):
"""
Download input image from url.
"""
pic = False
response = requests.get(url, stream=True)
with open('./imgs/img.png', 'wb') as file:
for chunk in response.iter_content():
file.write(chunk)
pic = True
response.close()
return pic | 67b2cf9f2c89c26fca865ea93be8f6e32cfa2de5 | 4,369 |
def is_in_cell(point:list, corners:list) -> bool:
"""
Checks if a point is within a cell.
:param point: Tuple of lat/Y,lon/X-coordinates
:param corners: List of corner coordinates
:returns: Boolean whether point is within cell
:Example:
"""
y1, y2, x1, x2 = corners[2][0], corners[0][0], corners[0][1], corners[2][1]
if (y1 <= point[0] <= y2) and (x1 <= point[1] <= x2):
return True
return False | 5f8f13a65ea4da1909a6b701a04e391ebed413dc | 4,370 |
def format_user_id(user_id):
"""
Format user id so Slack tags it
Args:
user_id (str): A slack user id
Returns:
str: A user id in a Slack tag
"""
return f"<@{user_id}>" | 2b3a66739c3c9c52c5beb7161e4380a78c5e2664 | 4,371 |
def calculate_percent(partial, total):
"""Calculate percent value."""
if total:
percent = round(partial / total * 100, 2)
else:
percent = 0
return f'{percent}%' | 4d3da544dd1252acec3351e7f67568be80afe020 | 4,372 |
import re
def extract_sentences(modifier, split_text):
"""
Extracts the sentences that contain the modifier references.
"""
extracted_text = []
for sentence in split_text:
if re.search(r"\b(?=\w)%s\b(?!\w)" % re.escape(modifier), sentence,
re.IGNORECASE):
extracted_text.append(sentence)
return extracted_text | 4e31a250520b765d998aa8bc88f2414fe206901c | 4,374 |
def parse_acs_metadata(acs_metadata, groups):
"""Returns a map of variable ids to metadata for that variable, filtered to
specified groups.
acs_metadata: The ACS metadata as json.
groups: The list of group ids to include."""
output_vars = {}
for variable_id, metadata in acs_metadata["variables"].items():
group = metadata.get("group")
if group in groups and metadata["label"].startswith("Estimate!!Total"):
output_vars[variable_id] = metadata
return output_vars | f0bfb0172b0b2d5fec92b613b5f2e2baf6e7c8f0 | 4,376 |
import random
def randomlyInfectRegions(network, regions, age_groups, infected):
"""Randomly infect regions to initialize the random simulation
:param network: object representing the network of populations
:type network: A NetworkOfPopulation object
:param regions: The number of regions to expose.
:type regions: int
:param age_groups: Age groups to infect
:type age_groups: list
:param infected: People to infect
:type infected: int
:return: Structure of initially infected regions with number
:rtype: dict
"""
infections = {}
for regionID in random.choices(list(network.graph.nodes()), k=regions):
infections[regionID] = {}
for age in age_groups:
infections[regionID][age] = infected
return infections | 213450bfbdba56a8671943905d6ac888a548c8aa | 4,377 |
def timestamp_to_uint64(timestamp):
"""Convert timestamp to milliseconds since epoch."""
return int(timestamp.timestamp() * 1e3) | 165df202cb5f8cee5792bfa5778114ea3e98fa65 | 4,378 |
import torch
def divide_and_conquer(x, k, mul):
"""
Divide and conquer method for polynomial expansion
x is a 2d tensor of size (n_classes, n_roots)
The objective is to obtain the k first coefficients of the expanded
polynomial
"""
to_merge = []
while x[0].dim() > 1 and x[0].size(0) > 1:
size = x[0].size(0)
half = size // 2
if 2 * half < size:
to_merge.append([t[-1] for t in x])
x = mul([t[:half] for t in x],
[t[half: 2 * half] for t in x])
for row in to_merge:
x = mul(x, row)
x = torch.cat(x)
return x | 64bdf2d50cf7cbf7da814b93521df5cee41623fe | 4,379 |
from bs4 import BeautifulSoup
def parse_pypi_index(text):
"""Parses the text and returns all the packages
Parameters
----------
text : str
the html of the website (https://pypi.org/simple/)
Returns
-------
List[str]
the list of packages
"""
soup = BeautifulSoup(text, "lxml")
return [i.get_text() for i in soup.find_all("a")] | 68d831aab69f3ffdd879ea1fa7ca5f28fc1b1e75 | 4,380 |
def clean(expr):
"""
cleans up an expression string
Arguments:
expr: string, expression
"""
expr = expr.replace("^", "**")
return expr | f7c990146094c43d256fe15f9543a0ba90877ee3 | 4,382 |
def xor(text, key):
"""Returns the given string XORed with given key."""
while len(key) < len(text): key += key
key = key[:len(text)]
return "".join(chr(ord(a) ^ ord(b)) for (a, b) in zip(text, key)) | 3cae903ef4751b2f39e0e5e28d448b8d079ce249 | 4,383 |
from typing import List
from typing import Union
def is_prefix(a: List[Union[int, str]], b: List[Union[int, str]]):
"""Check if `a` is a prefix of `b`."""
if len(a) >= len(b):
return False
for i in range(len(a)):
if a[i] != b[i]:
return False
return True | 4b0605af536aa5fa188cfca0cee62588fe41bf5d | 4,384 |
import os
def _sanitize_filename(dfile, no_symlink=True):
"""Check and sanitize 'dfile' for use as a target file.
"""
dirname, basename = os.path.split(dfile)
dirname = os.path.abspath(dirname)
dfile = os.path.join(dirname, basename)
if no_symlink:
if os.path.islink(dfile):
msg = ('{} is a symlink and will be changed into a regular file if '
'the compiler writes a compiled file to it')
raise FileExistsError(msg.format(dfile))
elif os.path.exists(dfile) and not os.path.isfile(dfile):
msg = ('{} is a non-regular file and will be changed into a regular '
'one if the compiler writes a compiled file to it')
raise FileExistsError(msg.format(dfile))
os.makedirs(dirname, exist_ok=True)
return dfile | f612bc587209ee21411fd5d132b2bf9285767f77 | 4,385 |
import sys
import os
import subprocess
import json
def get_routes(config, prefix=None, group_by=None):
"""Executes the helper script that extracts the routes out of the
pyramid app."""
python = sys.executable
script = os.path.join(os.path.dirname(__file__), "extract.py")
config = os.path.expanduser(config)
args = [python, script, config]
if group_by:
args.append("--group=" + group_by)
if prefix:
args.append("--prefix=" + prefix)
p = subprocess.Popen(args=args, stdout=subprocess.PIPE)
(stdout, _) = p.communicate()
return json.loads(stdout.decode("utf-8")) | b71d896f7b3fc23848899efc332d2e9008ff1f9c | 4,386 |
def square_root(s):
""" Function to compute square roots using the Babylonian method
"""
x = s/2
while True:
temp = x
x = (1/2) * ( x + (s/x) )
if temp == x:
return x
# Como la convergencia se alcanza rápidamente, llega un momento en que el error
# es menor que la precisión de la máquina y el valor no cambia de un paso a otro. | 9af22ce073bcb8d131736efba6133a92d9d7dc74 | 4,387 |
def quisort(uslist, lo=None, hi=None):
"""Sort in-place an unsorted list or slice of a list
lo and hi correspond to the start and stop indices for the list slice"""
if hi is None:
hi = len(uslist) - 1
if lo is None:
lo = 0
def partition(uslist, lo, hi):
"""Compare and swap values over list slice"""
p = uslist[hi]
i = lo - 1
j = lo
while j < hi:
if uslist[j] <= p:
i = i + 1
uslist[i], uslist[j] = uslist[j], uslist[i]
j += 1
i += 1
uslist[i], uslist[hi] = uslist[hi], uslist[i]
return i
if lo < hi:
p = partition(uslist, lo, hi)
quisort(uslist, lo, p - 1)
quisort(uslist, p + 1, hi) | a33adbe819ec1c60149e6d9a50ab78555f6021d5 | 4,388 |
def check_movement(pagination):
"""Check for ability to navigate backward or forward between pages."""
pagination_movements = pagination.find_element_by_xpath(
'.//div[@class="search_pagination_right"]'
).find_elements_by_class_name("pagebtn")
# Check for ability to move back
try:
move_back_a = pagination_movements[0]
assert move_back_a.text == "<"
can_move_back = True
print("Can move back, ", end="")
except Exception:
can_move_back = False
print("Can not move back, ", end="")
# Check for ability to move forward
try:
move_forward_a = pagination_movements[-1]
assert move_forward_a.text == ">"
can_move_forward = True
print("Can move forward")
except Exception:
can_move_forward = False
print("Can not move forward, ", end="")
return [can_move_back, can_move_forward] | 37bb55ae4509f8bdc98d3bf52bbef4a4a1e5d600 | 4,389 |
def _scale_annots_dict(annot, new_sz, ann_im_sz):
"""Scale annotations to the new_sz, provided the original ann_im_sz.
:param annot: bounding box in dict format
:param new_sz: new size of image (after linear transforms like resize)
:param ann_im_sz: original size of image for which the bounding boxes were given.
:return:
"""
d = {}
for k, v in annot.items():
if k.startswith('x'):
v_ = new_sz[0] * v / ann_im_sz[0]
elif k.startswith('y'):
v_ = new_sz[1] * v / ann_im_sz[1]
else:
# don't destroy other keys
v_ = v
d.update({k: v_})
return d | 44a0f9bf0b1a9befbaea95fd6b6fd5d9440178a4 | 4,390 |
def text_cleaning(any_text, nlp):
"""
The function filters out stop words from any text and returns tokenized and lemmatized words
"""
doc = nlp(any_text.lower())
result = []
for token in doc:
if token.text in nlp.Defaults.stop_words:
continue
# if token.is_punct:
# continue
result.append(token.lemma_)
clean_text = " ".join(result)
return clean_text | 7383f075a501c7c11565eac2c825c55f37e2a637 | 4,391 |
def get_mwa_eor_spec(nu_obs=150.0, nu_emit=1420.40575, bw=8.0, tint=1000.0,
area_eff=21.5, n_stations=50, bmax=100.0):
"""
Parameters
----------
nu_obs : float or array-like, optional
observed frequency [MHz]
nu_emit : float or array-like, optional
rest frequency [MHz]
bw : float or array-like, optional
frequency bandwidth [MHz]
tint : float or array-like, optional
integration time [hour]
area_eff : float or array-like, optional
effective area per station [m ** 2]
n_stations : int or array-like, optional
number of stations
bmax : float or array-like, optional
maximum baseline [wavelength]
Returns
-------
nu_obs, nu_emit, bw, tint, area_eff, n_stations, bmax
"""
return nu_obs, nu_emit, bw, tint, area_eff, n_stations, bmax | 5bc97d666df938c4e5f42d2d429505e2b7f74004 | 4,392 |
def count_cells(notebook):
"""
The function takes a notebook and returns the number of cells
Args:
notebook(Notebook): python object representing the notebook
Returns:
len(nb_dict["cells"]): integer value representing the number of cells into the notebook
A way you might use me is
cells_count = count_cells(nb)
"""
nb_dict = notebook.nb_dict
return len(nb_dict["cells"]) | 19ec2631888ecbba51fa51870694a7217024e5ae | 4,393 |
def str2bool(value):
"""
Args:
value - text to be converted to boolean
True values: y, yes, true, t, on, 1
False values: n, no, false, off, 0
"""
return value in ['y', 'yes', 'true', 't', '1'] | 876a58c86b449ba3fac668a4ef2124ea31fda350 | 4,394 |
def add2dict(dict, parent_list, key, value):
""" Add a key/value pair to a dictionary; the pair is added following the
hierarchy of 'parents' as define in the parent_list list. That is
if parent list is: ['5', '1'], and key='k', value='v', then the new,
returned dictionary will have a value:
dict['5']['1'][k] = v
"""
d = dict
for p in parent_list:
if p not in d:
d[p] = {}
d = d[p]
d[key] = value
return dict | 32252d3253283110eee2edb2eb216cfd777a710f | 4,395 |
def remove_keys(d, to_remove):
""" This function removes the given keys from the dictionary d. N.B.,
"not in" is used to match the keys.
Args:
d (dict): a dictionary
to_remove (list): a list of keys to remove from d
Returns:
dict: a copy of d, excluding keys in to_remove
"""
ret = {
k:v for k,v in d.items() if k not in to_remove
}
return ret | 94146bb19e8d39ea28c0940307c4c998fe5b7063 | 4,396 |
import os
def parse_line(line_str):
"""
Parse a line from sha1sum output into tuple of hash, directory path and
file name.
Eg. line '3af30443352a5760cb0f88e619819cee1b1599e0 foo/bar/baz' would
be parsed into tuple
('3af30443352a5760cb0f88e619819cee1b1599e0', 'foo/bar', 'baz').
"""
line_str = line_str.rstrip()
hash_str, path_str = line_str.split(' ', maxsplit=1)
path_pair = os.path.split(path_str)
return hash_str, path_pair[0], path_pair[1] | 6a643e7b121e54b224a257c615568c617d0f216f | 4,397 |
def mutual_information(y_true, y_pred):
"""Mutual information score.
"""
# This is a simple wrapper for returning the score as given in y_pred
return y_pred | fae45b40fb3ca285bef57e06b30c42d7f87b5286 | 4,398 |
def comp_height_wire(self):
"""Return bar height
Parameters
----------
self : CondType21
A CondType21 object
Returns
-------
H: float
Height of the bar [m]
"""
return self.Hbar | 98f98d021774166aa960080f353b6fbc01229eab | 4,399 |
def sanitise_text(text):
"""When we process text before saving or executing, we sanitise it
by changing all CR/LF pairs into LF, and then nuking all remaining CRs.
This consistency also ensures that the files we save have the correct
line-endings depending on the operating system we are running on.
It also turns out that things break when after an indentation
level at the very end of the code, there is no empty line. For
example (thanks to Emiel v. IJsseldijk for reproducing!):
def hello():
print "hello" # and this is the last line of the text
Will not completely define method hello.
To remedy this, we add an empty line at the very end if there's
not one already.
"""
text = text.replace('\r\n', '\n')
text = text.replace('\r', '')
lines = text.split('\n')
if lines and len(lines[-1]) != 0:
return text + '\n'
else:
return text | 1d7d047fba7c8697748d0cf115e0f74fcad8c1c4 | 4,401 |
def host_is_local(host: str) -> bool:
"""
Tells whether given host is local.
:param host: host name or address
:return: True if host is local otherwise False
"""
local_names = {
"localhost",
"127.0.0.1",
}
is_local = any(local_name in host for local_name in local_names)
return is_local | ce823b8c309ec842ed1dd5bb04e41356db500658 | 4,402 |
import os
def find_in_path(name, path):
"""Search PATH for a binary.
Args:
name: the filename to search for
path: the path ['./', './path/to/stuff']
Returns:
The abspath to the fie or None if not found.
"""
for dir in path:
binpath = os.path.join(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None | 14d24a51e9c885c469b3f36ff13bc3aa3740e811 | 4,403 |
def format_formula(formula):
"""Converts str of chemical formula into latex format for labelling purposes
Parameters
----------
formula: str
Chemical formula
"""
formatted_formula = ""
number_format = ""
for i, s in enumerate(formula):
if s.isdigit():
if not number_format:
number_format = "_{"
number_format += s
if i == len(formula) - 1:
number_format += "}"
formatted_formula += number_format
else:
if number_format:
number_format += "}"
formatted_formula += number_format
number_format = ""
formatted_formula += s
return r"$%s$" % (formatted_formula) | c3c87ffcdc5695b584892c643f02a7959b649935 | 4,404 |
def markContinuing(key, idea, oldest_idea_id, oldest_idea_detect_time, accum):
"""
Mark IDEA as continuing event.
:return: marked key, IDEA
"""
# If idea is present
if idea:
# Equality of ID's in tuple and idea, if true mark will be added
if oldest_idea_id != idea.id:
# Add {key: (ID, DetectTime)} to accumulator
accum.add(dict([(key, (oldest_idea_id, oldest_idea_detect_time))]))
# Add id mark for continuing event
idea.aida_continuing=oldest_idea_id
# Return tuple: key for next deduplication phase and IDEA
return (key[0:3], idea) | 3f83283f284693b0d0fdee7129fe0fa51b2a9174 | 4,405 |
def configure_pseudolabeler(pseudolabel: bool, pseudolabeler_builder, pseudolabeler_builder_args):
"""Pass in a class that can build a pseudolabeler (implementing __call__) or a builder function
that returns a pseudolabeling function.
"""
if pseudolabel:
return globals()[pseudolabeler_builder](*pseudolabeler_builder_args)
return None | 3e31869542a977cc4b72267b348f7e087ccb2aee | 4,407 |
def flip_dict(dict, unique_items=False, force_list_values=False):
"""Swap keys and values in a dictionary
Parameters
----------
dict: dictionary
dictionary object to flip
unique_items: bool
whether to assume that all items in dict are unique, potential speedup but repeated items will be lost
force_list_values: bool
whether to force all items in the result to be lists or to let unique items have unwrapped values. Doesn't apply if unique_items is true.
"""
if unique_items:
return {v: k for k, v in dict.items()}
elif force_list_values:
new_dict = {}
for k, v in dict.items():
if v not in new_dict:
new_dict[v] = []
new_dict[v].append(k)
return new_dict
else:
new_dict = {}
for k, v in dict.items():
if v in new_dict:
if isinstance(new_dict[v], list):
new_dict[v].append(k)
else:
new_dict[v] = [new_dict[v], k]
else:
new_dict[v] = k
return new_dict | c8344852bc76321f80b4228671707ef7b48e4f71 | 4,408 |
def getHRLanguages(fname, hrthreshold=0):
"""
:param fname: the name of the file containing filesizes. Created using wc -l in the wikidata folder
:param hrthreshold: how big a set of transliteration pairs needs to be considered high resource
:return: a list of language names (in ISO 639-3 format?)
"""
hrlangs = set()
with open(fname) as fs:
for line in fs:
long,iso639_3,iso639_1,size = line.strip().split()
if int(size) > hrthreshold:
hrlangs.add(iso639_3)
return hrlangs | 184f91f40aba76c6ebdcd553c0054b4b1a73da5d | 4,409 |
import requests
def query(params, lang='en'):
"""
Simple Mediawiki API wrapper
"""
url = 'https://%s.wikipedia.org/w/api.php' % lang
finalparams = {
'action': 'query',
'format': 'json',
}
finalparams.update(params)
resp = requests.get(url, params=finalparams)
if not resp.ok:
return None
data = resp.json()
if 'query' in data:
return data['query'] | 990ca6aae015e3106920ce67eb4e29f39e8a8f4c | 4,411 |
from typing import Dict
from typing import Any
import os
import json
def load_json(path: str) -> Dict[str, Any]:
"""Loads a `.json` file from `path`.
Args:
path (str): Path to file.
Returns:
Dict[str, Any]: Returns the loaded json.
Example:
>>> # Load a json file
>>> load_json('mlnext.json')
{'name': 'mlnext'}
"""
if not os.path.isfile(path):
raise FileNotFoundError(f'Path {path} invalid.')
with open(path, 'r') as file:
data = json.load(file)
return data | 0b3d5970e20aa724b6e93eab4c8161397164080d | 4,412 |
import torch
def get_edge_lengths(vertices, edge_points):
"""
get edge squared length using edge_points from get_edge_points(mesh) or edge_vertex_indices(faces)
:params
vertices (N,3)
edge_points (E,4)
"""
N, D = vertices.shape
E = edge_points.shape[0]
# E,2,D (OK to do this kind of indexing on the first dimension)
edge_vertices = vertices[edge_points[:,:2]]
edges = (edge_vertices[:,0,:]-edge_vertices[:,1,:])
edges_sqrlen = torch.sum(edges * edges, dim=-1)
return edges_sqrlen | 396d7d669d96611fb65c20b99347ab8041ff3f5a | 4,413 |
def fac(num):
"""求阶乘"""
assert num >= 0
if num in (0, 1):
return 1
return num * fac(num - 1) | e043e03e1d528dd9ec5685c4483e70217c948a0b | 4,414 |
def entropy(logp, p):
"""Compute the entropy of `p` - probability density function approximation.
We need this in order to compute the entropy-bonus.
"""
H = -(logp * p).sum(dim=1).mean()
return H | dff7c89979e5a9cef65088fd9f8858bb66bf218f | 4,415 |
import random
def permuteregulations(graph):
"""Randomly change which regulations are repressions, maintaining activation and repression counts and directions."""
edges = list(graph.edges)
copy = graph.copy()
repressions = 0
for edge in edges:
edge_data = copy.edges[edge]
if edge_data['repress']:
repressions += 1
edge_data['repress'] = False
for new_repression in random.sample(edges, repressions):
copy.edges[new_repression]['repress'] = True
return copy | 76a12e573a6d053442c86bc81bebf10683d55dfb | 4,416 |
def create_incident_field_context(incident):
"""Parses the 'incident_fields' entry of the incident and returns it
Args:
incident (dict): The incident to parse
Returns:
list. The parsed incident fields list
"""
incident_field_values = dict()
for incident_field in incident.get('incident_field_values', []):
incident_field_values[incident_field['name'].replace(" ", "_")] = incident_field['value']
return incident_field_values | 1a56c5b76c4c82827f8b7febde30e2881e6f0561 | 4,418 |
import os
from datetime import datetime
def create_warning_path(paths_=None):
"""It Creates the files names for both files ( strangers and spoofing )"""
if not paths_:
if not os.path.isdir('/opt/arp_warnings/'):
os.system('mkdir /opt/arp_guard/arp_warnings')
paths_ = ['/opt/arp_guard/arp_warnings/'] # default warning dir
spoofs_path = []
strangers_paths = []
date_path = str(datetime.now().year) + "_" + str(datetime.now().month) + "_" + str(datetime.now().day)
for i in paths_:
spoofs_path.append(i + "MacSpoof_warning_" + date_path)
strangers_paths.append(i + "strangers_warning_" + date_path)
return spoofs_path, strangers_paths | 42b8e87f30cd234c09f496710fd3c9c9633c123d | 4,420 |
def num_of_visited_nodes(driver_matrix):
""" Calculate the total number of visited nodes for multiple paths.
Args:
driver_matrix (list of lists): A list whose members are lists that
contain paths that are represented by consecutively visited nodes.
Returns:
int: Number of visited nodes
"""
return sum(len(x) for x in driver_matrix) | 2a1244cd033029cec4e4f7322b9a27d01ba4abd5 | 4,421 |
def gen_custom_item_windows_file(description, info, value_type, value_data,
regex, expect):
"""Generates a custom item stanza for windows file contents audit
Args:
description: string, a description of the audit
info: string, info about the audit
value_type: string, "POLICY_TEXT" -- included for parity with other
gen_* modules.
value_data: string, location of remote file to check
regex: string, regular expression to check file for
expect: string, regular expression to match for a pass
Returns:
A list of strings to put in the main body of a Windows file audit file.
"""
out = []
out.append('')
out.append('<custom_item>')
out.append(' type: FILE_CONTENT_CHECK')
out.append(' description: "%s"' % description.replace("\n", " "))
out.append(' info: "%s"' % info.replace("\n", " "))
out.append(' value_type: %s' % value_type)
out.append(' value_data: "%s"' % value_data)
out.append(' regex: "%s"' % regex)
out.append(' expect: "%s"' % expect)
out.append('</custom_item>')
out.append(' ')
return out | 3d0335d91eb700d30d5ae314fce13fc4a687d766 | 4,422 |
import inspect
def create_signature(args=None, kwargs=None):
"""Create a inspect.Signature object based on args and kwargs.
Args:
args (list or None): The names of positional or keyword arguments.
kwargs (list or None): The keyword only arguments.
Returns:
inspect.Signature
"""
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
parameter_objects = []
for arg in args:
param = inspect.Parameter(
name=arg,
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
)
parameter_objects.append(param)
for arg in kwargs:
param = inspect.Parameter(
name=arg,
kind=inspect.Parameter.KEYWORD_ONLY,
)
parameter_objects.append(param)
sig = inspect.Signature(parameters=parameter_objects)
return sig | 011acccada7896e11e2d9bb73dcf03d7dc6e751e | 4,423 |
import json
def select(type, name, optional):
"""Select data from data.json file"""
with open('data.json', 'r') as f:
data = json.load(f)
for i in data[type]:
if i == data[name]:
return data[optional] | f784137127cd77af2db6e4ac653dc360515ec056 | 4,424 |
def perform_step(polymer: str, rules: dict) -> str:
"""
Performs a single step of polymerization by performing all applicable insertions; returns new polymer template string
"""
new = [polymer[i] + rules[polymer[i:i+2]] for i in range(len(polymer)-1)]
new.append(polymer[-1])
return "".join(new) | c60f760ef6638ff3a221aff4a56dccbeae394709 | 4,425 |
def user_city_country(obj):
"""Get the location (city, country) of the user
Args:
obj (object): The user profile
Returns:
str: The city and country of user (if exist)
"""
location = list()
if obj.city:
location.append(obj.city)
if obj.country:
location.append(obj.country)
if len(location):
return ", ".join(str(i) for i in location)
return 'Not available' | be4238246042371215debb608934b89b63a07dab | 4,426 |
def remove_app(INSTALLED_APPS, app):
""" remove app from installed_apps """
if app in INSTALLED_APPS:
apps = list(INSTALLED_APPS)
apps.remove(app)
return tuple(apps)
return INSTALLED_APPS | 7386b6f38b73abf25e94d9c8368aaac6255d2cee | 4,428 |
def pprint(matrix: list) -> str:
"""
Preety print matrix string
Parameters
----------
matrix : list
Square matrix.
Returns
-------
str
Preety string form of matrix.
"""
matrix_string = str(matrix)
matrix_string = matrix_string.replace('],', '],\n')
return matrix_string | 5c0ffa2b0a9c237b65b5ad7c4e17c2456195c088 | 4,430 |
def dashed_word(answer):
"""
:param answer: str, from random_word
:return: str, the number of '-' as per the length of answer
"""
ans = ""
for i in answer:
ans += '-'
return ans | 358be047bfad956afef27c0665b02a2a233fefbf | 4,431 |
def merge_overpass_jsons(jsons):
"""Merge a list of overpass JSONs into a single JSON.
Parameters
----------
jsons : :obj:`list`
List of dictionaries representing Overpass JSONs.
Returns
-------
:obj:`dict`
Dictionary containing all elements from input JSONS.
"""
elements = []
for osm_json in jsons:
elements.extend(osm_json['elements'])
return {'elements': elements} | c68fde0ddbdf22a34377e1e865be36aaabaa47be | 4,432 |
def _serialize_account(project):
"""Generate several useful fields related to a project's account"""
account = project.account
return {'goal': account.goal,
'community_contribution': account.community_contribution,
'total_donated': account.total_donated(),
'total_raised': account.total_raised(),
'total_cost': account.total_cost(),
'percent_raised': account.percent_raised(),
'percent_community': account.percent_community(),
'funded': account.funded(),
'remaining': account.remaining()} | dea20df5db1ae37f61d6c661f957432b7cb72158 | 4,434 |
def get_wcets(utils, periods):
""" Returns WCET """
return [ui * ti for ui, ti in zip(utils, periods)]
# return [math.ceil(ui * ti) for ui, ti in zip(utils, periods)] | f853459b2463fc75b91f5effe3357b9c4ec5c4f9 | 4,437 |
from pathlib import Path
def get_dir(path):
"""
Функция возвращает директорию файла, если он является файлом, иначе
возвращает объект Path из указанного пути
"""
if not isinstance(path, Path):
path = Path(path)
return path.parent if path.is_file() else path | b7ca7f60d88c06bc3181bd93039c13a13e2684a4 | 4,438 |
import pickle
def cache_fun(fname_cache, fun):
"""Check whether cached data exists, otherwise call fun and return
Parameters
----------
fname_cache: string
name of cache to look for
fun: function
function to call in case cache doesn't exist
probably a lambda function
"""
try:
print("checking cache for", fname_cache)
with open(fname_cache, 'rb') as fhandle:
print("found cache")
ret = pickle.load(fhandle)
except (FileNotFoundError, EOFError):
print("cache not found, running function")
ret = fun()
with open(fname_cache, 'wb') as fhandle:
pickle.dump(ret, fhandle)
return ret | 0387f611ab12aeb8a2d7dfca664e08b0438b1905 | 4,439 |
import torch
def unpack_bidirectional_lstm_state(state, num_directions=2):
"""
Unpack the packed hidden state of a BiLSTM s.t. the first dimension equals to the number of layers multiplied by
the number of directions.
"""
batch_size = state.size(1)
new_hidden_dim = int(state.size(2) / num_directions)
return torch.stack(torch.split(state, new_hidden_dim, dim=2), dim=1).view(-1, batch_size, new_hidden_dim) | fa58ed9bcf2e9e95aa62b3d18110abe6abce6b1b | 4,440 |
import re
def read(line_str, line_pos, pattern='[0-9a-zA-Z_:?!><=&]'):
"""
Read all tokens from a code line matching specific characters,
starting at a specified position.
Args:
line_str (str): The code line.
line_pos (int): The code line position to start reading.
pattern (str): Regular expression for a single character. All matching
characters will be read.
Returns:
literal (str): The literal that was read, including only characters
that were defined in the pattern argument.
line_pos (int): The updated line position.
"""
length = len(line_str)
literal = ''
while line_pos < length and re.match(pattern, line_str[line_pos]):
literal += line_str[line_pos]
line_pos += 1
return literal, line_pos | 95ece37e927ff3f8ea9579a7d78251b10b1ed0e6 | 4,442 |
import random
def fully_random(entries, count):
"""Choose completely at random from all entries"""
return random.sample(entries, count) | a1f494f6b3cc635bc109378305bf547d48f29019 | 4,443 |
import yaml
def _yaml_to_dict(yaml_string):
"""
Converts a yaml string to dictionary
Args:
yaml_string: String containing YAML
Returns:
Dictionary containing the same object
"""
return yaml.safe_load(yaml_string) | c7de0c860028d17302cd4d07e20c3215503b977b | 4,444 |
from typing import Dict
def encode_address(address: Dict) -> bytes:
"""
Creates bytes representation of address data.
args:
address: Dictionary containing the address data.
returns:
Bytes to be saved as address value in DB.
"""
address_str = ''
address_str += address['balance'] + '\0'
address_str += address['code'] + '\0'
address_str += str(address['inputTxIndex']) + '\0'
address_str += str(address['outputTxIndex']) + '\0'
address_str += str(address['minedIndex']) + '\0'
address_str += address['tokenContract'] + '\0'
address_str += str(address['inputTokenTxIndex']) + '\0'
address_str += str(address['outputTokenTxIndex']) + '\0'
address_str += str(address['inputIntTxIndex']) + '\0'
address_str += str(address['outputIntTxIndex']) + '\0'
return address_str.encode() | fcf05da104551561e44b7ab9c2bf54a9bfcf801e | 4,445 |
def has_substr(line, chars):
""" checks to see if the line has one of the substrings given """
for char in chars:
if char in line:
return True
return False | cf438600894ca43c177af1661a95447daa8b6b0d | 4,448 |
def aq_name(path_to_shp_file):
"""
Computes the name of a given aquifer given it's shape file
:param path_to_shp_file: path to the .shp file for the given aquifer
:return: a string (name of the aquifer)
"""
str_ags = path_to_shp_file.split('/')
str_aq = ""
if len(str_ags) >= 2:
str_aq = str(str_ags[1])
print(str_aq)
return str_aq | 1cb6f9881383b4627ea4f78bf2f6fd9cdf97dbc4 | 4,449 |
def T0_T0star(M, gamma):
"""Total temperature ratio for flow with heat addition (eq. 3.89)
:param <float> M: Initial Mach #
:param <float> gamma: Specific heat ratio
:return <float> Total temperature ratio T0/T0star
"""
t1 = (gamma + 1) * M ** 2
t2 = (1.0 + gamma * M ** 2) ** 2
t3 = 2.0 + (gamma - 1.0) * M ** 2
return t1 / t2 * t3 | 2e5c8ec2ab24dd0d4dfa2feddd0053f277665b33 | 4,451 |
def flip_channels(img):
"""Flips the order of channels in an image; eg, BGR <-> RGB.
This function assumes the image is a numpy.array (what's returned by cv2
function calls) and uses the numpy re-ordering methods. The number of
channels does not matter.
If the image array is strictly 2D, no re-ordering is possible and the
original data is returned untouched.
"""
if len(img.shape) == 2:
return img;
return img[:,:,::-1] | 7aab0222f6fd66c06f8464cd042f30c6eac01c72 | 4,452 |
import os
def autodiscover_datafiles(varmap):
"""Return list of (dist directory, data file list) 2-tuples.
The ``data_dirs`` setup var is used to give a list of
subdirectories in your source distro that contain data
files. It is assumed that all such files will go in the
``share`` subdirectory of the prefix where distutils is
installing your distro (see the distutils docs); within
that directory, a subdirectory with the same name as
your program (i.e., the ``name`` setup var) will be
created, and each directory in ``data_dirs`` will be a
subdirectory of that. So, for example, if you have example
programs using your distro in the ``"examples"`` directory
in your distro, you would declare ``data_dirs = "examples"``
in your setup vars, and everything under that source
directory would be installed into ``share/myprog/examples``.
"""
result = []
try:
datadirs = varmap['data_dirs']
except KeyError:
pass
else:
pathprefix = "share/{}".format(varmap['name'])
for datadir in datadirs:
for dirname, subdirs, filenames in os.walk(datadir):
if filenames and ("." not in dirname):
distdir = dirname.replace(os.sep, '/')
distfiles = [
"{}/{}".format(distdir, filename)
for filename in filenames
if not filename.startswith(".")
]
if distfiles:
distdir = dirname.replace(os.sep, '/')
result.append(
("{}/{}".format(pathprefix, distdir), distfiles)
)
return result | 412e8beda31e19a4003b499e2e3596eb8e600424 | 4,453 |
import json
def load_data(in_file):
"""load json file from seqcluster cluster"""
with open(in_file) as in_handle:
return json.load(in_handle) | 93c1766cb1e36410a8c67e2291b93aa7280abd63 | 4,454 |
import re
def expand_at_linestart(P, tablen):
"""只扩展行开头的制表符号"""
def exp(m):
return m.group().expandtabs(tablen)
return ''.join([ re.sub(r'^\s+', exp, s) for s in P.splitlines(True) ]) | 2b8310e89efdba54b121667e11454281e2c214e3 | 4,455 |
def for_in_pyiter(it):
"""
>>> for_in_pyiter(Iterable(5))
[0, 1, 2, 3, 4]
"""
l = []
for item in it:
l.append(item)
return l | 7d5c44ce771ea9847d57749235a31f200a01b67f | 4,457 |
def is_comment(txt_row):
""" Tries to determine if the current line of text is a comment line.
Args:
txt_row (string): text line to check.
Returns:
True when the text line is considered a comment line, False if not.
"""
if (len(txt_row) < 1):
return True
if ((txt_row[0] == '(') and (txt_row[len(txt_row) - 1] == ')')):
return True
else:
return False | db54b90053244b17ec209ed1edb1905b62151165 | 4,458 |
import os
import json
def read_config():
"""Read configuration file."""
config_file = os.getenv('CONFIG_FILE_PATH')
if not config_file:
config_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'config.json')
with open(config_file) as file:
return json.load(file) | 3df2d5d081e9a6f8326d4d85e0ab4405dd37d461 | 4,459 |
def get_projects(config):
"""Find all XNAT projects and the list of scan sites uploaded to each one.
Args:
config (:obj:`datman.config.config`): The config for a study
Returns:
dict: A map of XNAT project names to the URL(s) of the server holding
that project.
"""
projects = {}
for site in config.get_sites():
xnat_project = config.get_key("XnatArchive", site=site)
projects.setdefault(xnat_project, set()).add(site)
return projects | 09824b67e73f8190d777ec782454940f27b70e33 | 4,460 |
import pandas
def load_labeled_data(filename):
""" Loads data from a csv, where the last column is the label of the data in that row
:param filename: name of the file to load
:return: data frames and labels in separate arrays
"""
dataframe = pandas.read_csv(filename, header=None)
dataset = dataframe.values
data = dataset[:, 0:-1].astype(float)
labels = dataset[:, -1]
return data, labels | 727691d376b744ccfdffbd62dd9f386e7bd7c4dd | 4,461 |
import numpy
def relative_error(estimate, exact):
"""
Compute the relative error of an estimate, in percent.
"""
tol = 1e-15
if numpy.abs(exact) < tol:
if numpy.abs(estimate - exact) < tol:
relative_error = 0.0
else:
relative_error = numpy.inf
else:
relative_error = numpy.abs((estimate - exact) / exact) * 100.0
return relative_error | 4170fd4a7c448eb312ea9f42d436d12acd828695 | 4,464 |
def get_missing_ids(raw, results):
"""
Compare cached results with overall expected IDs, return missing ones.
Returns a set.
"""
all_ids = set(raw.keys())
cached_ids = set(results.keys())
print("There are {0} IDs in the dataset, we already have {1}. {2} are missing.".format(len(all_ids), len(cached_ids), len(all_ids) - len(cached_ids)))
return all_ids - cached_ids | cb380c12f26de8b4d3908964f4314bc7efe43056 | 4,468 |
def resultcallback(group):
"""Compatibility layer for Click 7 and 8."""
if hasattr(group, "result_callback") and group.result_callback is not None:
decorator = group.result_callback()
else:
# Click < 8.0
decorator = group.resultcallback()
return decorator | 1eb938400c90667eb532366f5ca83d02dd6429e1 | 4,469 |
def dec2hms(dec):
"""
ADW: This should really be replaced by astropy
"""
DEGREE = 360.
HOUR = 24.
MINUTE = 60.
SECOND = 3600.
dec = float(dec)
fhour = dec*(HOUR/DEGREE)
hour = int(fhour)
fminute = (fhour - hour)*MINUTE
minute = int(fminute)
second = (fminute - minute)*MINUTE
return (hour, minute, second) | 4c2c564631d431d908f66486af40e380598f2724 | 4,472 |
def str_input(prompt: str) -> str:
"""Prompt user for string value.
Args:
prompt (str): Prompt to display.
Returns:
str: User string response.
"""
return input(f"{prompt} ") | ac6c3c694adf227fcc1418574d4875d7fa637541 | 4,474 |
from typing import List
def get_nodes_for_homek8s_group(inventory, group_name) -> List[str]:
"""Return the nodes' names of the given group from the inventory as a list."""
hosts_dict = inventory['all']['children']['homek8s']['children'][group_name]['hosts']
if hosts_dict:
return list(hosts_dict.keys())
else:
return [] | 806394259816ec4311e69dcd46e7b111c7ca0652 | 4,475 |
def getrinputs(rtyper, graph):
"""Return the list of reprs of the input arguments to the 'graph'."""
return [rtyper.bindingrepr(v) for v in graph.getargs()] | bb0f8861a29cd41af59432f267f07ff67601460c | 4,477 |
import os
def get_conf_path(run_id):
"""
Generate path for storing/loading configuration file
:param run_id (str): run ID to be used
:return: full file path for storing/loading config file
"""
return os.path.join('conf', run_id + '.ini') | 6d701be83e52b8294bf45366944543be8b205e9d | 4,479 |
def string_with_fixed_length(s="", l=30):
"""
Return a string with the contents of s plus white spaces until length l.
:param s: input string
:param l: total length of the string (will crop original string if longer than l)
:return:
"""
s_out = ""
for i in range(0, l):
if i < len(s):
s_out += s[i]
else:
s_out += " "
return s_out | 2230a2893913eadb2c42a03c85728a5fe79e1e0f | 4,482 |
def ele_types(eles):
"""
Returns a list of unique types in eles
"""
return list(set([e['type'] for e in eles] )) | e87ea4c6256c2520f9f714dd065a9e8642f77555 | 4,484 |
def calculate_label_counts(examples):
"""Assumes that the examples each have ONE label, and not a distribution over labels"""
label_counts = {}
for example in examples:
label = example.label
label_counts[label] = label_counts.get(label, 0) + 1
return label_counts | 4c45378c6e29ce3d1b40b4d02a112e1fbd23d8b6 | 4,485 |
def printer(arg1):
"""
Even though 'times' is destroyed when printer() has been called,
the 'inner' function created remembers what times is. Same goes
for the argument arg1.
"""
times = 3
def inner():
for i in range(times): print(arg1)
return inner | 7e3d2033602eaef9ef570c97a058208066073427 | 4,486 |
def get_listing_panel(tool, ghidra):
""" Get the code listing UI element, so we can get up-to-date location/highlight/selection """
cvs = tool.getService(ghidra.app.services.CodeViewerService)
return cvs.getListingPanel() | f14477cf13cb7eb4e7ede82b0c2068ca53a30723 | 4,488 |
from pathlib import Path
from typing import Set
def get_files_recurse(path: Path) -> Set:
"""Get all files recursively from given :param:`path`."""
res = set()
for p in path.rglob("*"):
if p.is_dir():
continue
res.add(p)
return res | c129ce43130da09962264f6e7935410685815943 | 4,489 |
def trim_spectrum(self, scouse, flux):
"""
Trims a spectrum according to the user inputs
"""
return flux[scouse.trimids] | 3f18259986e677f8e8a9718408cdb56352d956e5 | 4,493 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.