content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def p_list_formatter(primer_list):
"""
Reformat the primer list (remove unnecessary characters from biopython2 output).
Args:
primer_list (list): list from list_from_gen output
Returns:
primer_dimers (list): list with unnecessary chars removed.
"""
reformat_p_list = []
primer_dimers = []
reformat_p_list = [each_item.replace('\\n', ' ').split() for each_item in primer_list]
for each_item in reformat_p_list:
primer_dimers.append((each_item[0].replace('(', '').replace('\'', ''),
each_item[2].replace('\'', ''), each_item[4],
each_item[5].replace('\')', '')))
return primer_dimers | 0b1585221a13c3560d511127af782875fcd71732 | 9,466 |
def _repr_rule(iptc_rule, ipv6=False):
""" Return a string representation of an iptc_rule """
s = ''
if ipv6==False and iptc_rule.src != '0.0.0.0/0.0.0.0':
s += 'src {} '.format(iptc_rule.src)
elif ipv6==True and iptc_rule.src != '::/0':
s += 'src {} '.format(iptc_rule.src)
if ipv6==False and iptc_rule.dst != '0.0.0.0/0.0.0.0':
s += 'dst {} '.format(iptc_rule.dst)
elif ipv6==True and iptc_rule.dst != '::/0':
s += 'dst {} '.format(iptc_rule.dst)
if iptc_rule.protocol != 'ip':
s += 'protocol {} '.format(iptc_rule.protocol)
if iptc_rule.in_interface is not None:
s += 'in {} '.format(iptc_rule.in_interface)
if iptc_rule.out_interface is not None:
s += 'out {} '.format(iptc_rule.out_interface)
if ipv6 == False and iptc_rule.fragment:
s += 'fragment '
for m in iptc_rule.matches:
s += '{} {} '.format(m.name, m.get_all_parameters())
if iptc_rule.target and iptc_rule.target.name and len(iptc_rule.target.get_all_parameters()):
s += '-j {} '.format(iptc_rule.target.get_all_parameters())
elif iptc_rule.target and iptc_rule.target.name:
s += '-j {} '.format(iptc_rule.target.name)
return s | e01c3b27ec6ee831a7d88fc87e69e707639ef0b6 | 9,470 |
def add_padding_0_bits(bits_string: str, required_length: int) -> tuple:
"""
Adds 0 to bits string.
Returns tuple - (bits string with padding, number of added 0s)
"""
extra_0_bits_count = 0
while len(bits_string) < required_length:
bits_string += '0'
extra_0_bits_count += 1
return bits_string, extra_0_bits_count | fd3eee071821d087b710c33a0beef72431836505 | 9,471 |
def IsInstance(type_):
"""Returns a function which can be used to check whether or not a value is of the given type."""
def Do(item):
return isinstance(item, type_)
return Do | 06f916e8658761a03619834692d78a44d145b514 | 9,472 |
def seq_type(seq):
"""
Determines whether a sequence consists of 'N's only
(i.e., represents a gap)
"""
return 'gap' if set(seq.upper()) == {'N'} else 'bases' | 5555e5cd0ccdbf8f5e7b475c5c983ab54a17fb07 | 9,475 |
def get_ar(bbox):
"""
:param bbox: top left, right down
:return: aspect ratio
"""
[x1, y1, x2, y2] = bbox
return (y2 - y1) / (x2 - x1) | d79eb51eafec917b1754558a9a87307734fd8ac4 | 9,482 |
def validate_image_pull_credentials(image_pull_credentials):
"""
Validate ImagePullCredentialsType for Project
Property: Environment.ImagePullCredentialsType
"""
VALID_IMAGE_PULL_CREDENTIALS = ("CODEBUILD", "SERVICE_ROLE")
if image_pull_credentials not in VALID_IMAGE_PULL_CREDENTIALS:
raise ValueError(
"Project ImagePullCredentialsType must be one of: %s"
% ", ".join(VALID_IMAGE_PULL_CREDENTIALS)
)
return image_pull_credentials | f4953fefbca3ca5906ca58497152b25a07247c9a | 9,485 |
def num_bits(i):
"""Returns the number of bits in an unsigned integer."""
n = 0
while i:
n += 1
i &= i - 1
return n | 3eb664bd642717556af0b2c09314000d70209b44 | 9,486 |
def get_index_of_feature(feature_list, item):
"""
Gets the index of the feature in the provided feature list
:rtype : int
:param feature_list: List of features to search from
:param item: The feature to search
:return: The index where the feature was founded, -1 otherwise
"""
# getting the indexes where 'item' occurs
idxs = [k for k in range(len(feature_list)) if feature_list[k][0] == item]
# counting the indexes
idxs_count = len(idxs)
# if the feature appears more than one time
if idxs_count > 1:
raise Exception("""
There was a problem in the feature extraction process.\r\n
The feature is counted more than one time.""")
# the index if any, -1 if the feature doesn't appear
return idxs[0] if idxs_count == 1 else -1 | 2f2d79d4caf953b60ecf841a23d86e8b4a00b937 | 9,491 |
import math
def sol_rad_from_t(et_radiation, cs_radiation, temperature_min, temperature_max, coastal):
"""
Estimate incoming solar (or shortwave) radiation, *Rs*, (radiation hitting
a horizontal plane after scattering by the atmosphere) from min and max
temperature together with an empirical adjustment coefficient for
'interior' and 'coastal' regions.
The formula is based on equation 50 in Allen et al (1998) which is the
Hargreaves radiation formula (Hargreaves and Samani, 1982, 1985). This
method should be used only when solar radiation or sunshine hours data are
not available. It is only recommended for locations where it is not
possible to use radiation data from a regional station (either because
climate conditions are heterogeneous or data are lacking).
**NOTE**: this method is not suitable for island locations due to the
moderating effects of the surrounding water.
:param et_radiation: Extraterrestrial radiation [MJ m-2 day-1]. Can be
estimated using ``et_rad()``.
:param cs_radiation: Clear sky radiation [MJ m-2 day-1]. Can be estimated
using ``cs_rad()``.
:param temperature_min: Daily minimum temperature [deg C].
:param temperature_max: Daily maximum temperature [deg C].
:param coastal: ``True`` if site is a coastal location, situated on or
adjacent to coast of a large land mass and where air masses are
influenced by a nearby water body, ``False`` if interior location
where land mass dominates and air masses are not strongly influenced
by a large water body.
:return: Incoming solar (or shortwave) radiation (Rs) [MJ m-2 day-1].
:rtype: float
"""
# Determine value of adjustment coefficient [deg C-0.5] for
# coastal/interior locations
if coastal:
adj = 0.19
else:
adj = 0.16
sol_rad = adj * math.sqrt(temperature_max - temperature_min) * et_radiation
# The solar radiation value is constrained by the clear sky radiation
return min(sol_rad, cs_radiation) | 6952aa6509897494551839e412d5a15e51b5e30c | 9,495 |
def moving_avg(v, N):
"""
simple moving average.
Parameters
----------
v : list
data ta to average
N : integer
number of samples per average.
Returns
-------
m_avg : list
averaged data.
"""
s, m_avg = [0], []
for i, x in enumerate(v, 1):
s.append(s[i - 1] + x)
if i >= N:
avg = (s[i] - s[i - N]) / N
m_avg.append(avg)
return m_avg | 2e71eefb91ac694eaf06c2167e38ef497671145e | 9,496 |
from typing import Union
import re
def get_brackets(title: str) -> Union[str, None]:
"""
Return the substring of the first instance of bracketed text.
"""
regex_brackets = re.search(r"\[(.*?)\]", title)
if regex_brackets is None:
return None
else:
return regex_brackets.group() | f1d985cf79ae881e8aca168c065d40e640a9c1ff | 9,500 |
def _dof(mean_tau, sd_tau2):
"""
Returns the degrees of freedom for the chi-2 distribution from the mean and
variance of the uncertainty model, as reported in equation 5.5 of Al Atik
(2015)
"""
return (2.0 * mean_tau ** 4.) / (sd_tau2 ** 2.) | 9a4a395c9aea7b965a477550c7f254bf744cadc5 | 9,501 |
from typing import List
import json
def compare_apache_profiles(baseline_file, test_file, threshold=0.5) -> List:
"""
Compare baseline Apache access log profile against test profile.
:param baseline_file: file containing baseline profile
:param test_file: file containing test profile
:param threshold: percent (in decimal format) difference test profile must be from baseline to be considered malicious
:return: list of results
"""
results = []
with open(baseline_file, 'r') as f:
baseline_profile = json.load(f)
with open(test_file, 'r') as f:
test_profile = json.load(f)
baseline_all_avg_per_min = baseline_profile['requests']['all']['avg_per_min']
test_all_avg_per_min = test_profile['requests']['all']['avg_per_min']
baseline_limit_avg_per_min = baseline_all_avg_per_min * (1 + threshold)
if test_all_avg_per_min > baseline_limit_avg_per_min:
result = {'category': 'Potential DoS Attack',
'details': {'baseline_profile_avg_per_min': baseline_all_avg_per_min,
'test_profile_avg_per_min': test_all_avg_per_min,
'baseline_profile_upper_limit': baseline_limit_avg_per_min,
'baseline_profile_threshold_percent': threshold * 100}}
results.append(result)
return results | 0b94ad318fcb61be559767cbdba51def1b6db61f | 9,505 |
def uuid_mole_index(moles, mole_uuid):
"""Return the index of the first mole with the specified uuid."""
for i, mole in enumerate(moles):
if mole["uuid"] == mole_uuid:
return i
return None | 851877da59f6a6dd8c06b9bb2d462f6239d512e7 | 9,507 |
def find_average_record(sen_set, voting_dict):
"""
Input: a set of last names, a voting dictionary
Output: a vector containing the average components of the voting records
of the senators in the input set
Example:
>>> voting_dict = {'Klein': [-1,0,1], 'Fox-Epstein': [-1,-1,-1], 'Ravella': [0,0,1]}
>>> find_average_record({'Fox-Epstein','Ravella'}, voting_dict)
[-0.5, -0.5, 0.0]
"""
avg_record = list()
for key, val in voting_dict.items():
if key in sen_set:
for i in range(len(val)):
if avg_record:
avg_record[i] += val[i]
else:
avg_record = val
break
return [ a / len(sen_set)for a in avg_record] | ef0c0aeb5a75c0335de57ae6cca1c013fe59b8b0 | 9,514 |
def get_commit_link(repo_name: str, commit_sha: str) -> str:
"""
Build a commit URL for manual browser access using full repository name and commit SHA1
:param repo_name: full repository name (i.e. `{username}/{repoanme}`)
:param commit_sha: 40 byte SHA1 for a commit
:return: A commit URL
"""
return "https://github.com/{}/commit/{}".format(repo_name, commit_sha) | a1cf8d30f3e5c5ce3c5fefc719cca7c1c4d92331 | 9,524 |
def msi_file_finder(pth):
"""
Return True if pth represents a msi file file.
"""
return bool(pth.fname.endswith('.msi.txt')) | 35c0d0dac72d44cbdd6f87b280a70917d264db0c | 9,526 |
def get_accuracy(y_true, y_predicted):
"""Compute the accuracy for given predicted class labels.
Parameters
----------
y_true: numpy array
The true class labels of shape=(number_points,).
y_predicted: numpy array
The predicted class labels of shape=(number_points,).
Returns
---------
accuracy: float
The accuracy of the predictions.
"""
correct_answers = (y_predicted == y_true).astype(int) # will have 1 when the prediction and the actual label match
accuracy = float(sum(correct_answers)) / float(len(correct_answers))
return accuracy | c88a6b51021c9e01133852fa84c6f434043391a5 | 9,532 |
def get_connections(network):
"""
Function creates a dictionary with agent id's as a key and adjacent nodes as a value.
:param network: object of graph
:type network: networkx.classes.graph.Graph
"""
return {agent_id: list(adj_agents.keys()) for (agent_id, adj_agents) in network.adjacency()} | fa614b599c577de5c2554818366159e918c1335b | 9,533 |
def meta_name(file_name):
"""Generate the name of the meta file"""
return "{}.json".format(file_name) | fc168d19c145c4f93fb8d92e3c0daa109aad31b6 | 9,534 |
def concat_body_paragraphs(body_candidates):
"""
Concatenate paragraphs constituting the question body.
:param body_candidates:
:return:
"""
return ' '.join(' '.join(body_candidates).split()) | a0faaa0ae0be0cda007c2af1f6e47f3b745862b3 | 9,536 |
import re
def matchNumbersOnly(value):
"""Match strings with numbers and '.' only."""
if re.match('^[0-9.]+$', value):
return True
return False | 04d782431b79e78f93269c662c747d1f7348c9ec | 9,538 |
def get_sample_mean(values: list) -> float:
"""
Calculates the sample mean (overline x) of the elements in a list
:param values: list of values
:return: sample mean
"""
sample_mean = sum(values) / len(values)
return sample_mean | 182befe514f406340f0b1f37e892ad1add1f0ed2 | 9,543 |
def polyfill_integers(generator, low, high=None, size=None, dtype="int32",
endpoint=False):
"""Sample integers from a generator in different numpy versions.
Parameters
----------
generator : numpy.random.Generator or numpy.random.RandomState
The generator to sample from. If it is a ``RandomState``,
:func:`numpy.random.RandomState.randint` will be called,
otherwise :func:`numpy.random.Generator.integers`.
low : int or array-like of ints
See :func:`numpy.random.Generator.integers`.
high : int or array-like of ints, optional
See :func:`numpy.random.Generator.integers`.
size : int or tuple of ints, optional
See :func:`numpy.random.Generator.integers`.
dtype : {str, dtype}, optional
See :func:`numpy.random.Generator.integers`.
endpoint : bool, optional
See :func:`numpy.random.Generator.integers`.
Returns
-------
int or ndarray of ints
See :func:`numpy.random.Generator.integers`.
"""
if hasattr(generator, "randint"):
if endpoint:
if high is None:
high = low + 1
low = 0
else:
high = high + 1
return generator.randint(low=low, high=high, size=size, dtype=dtype)
return generator.integers(low=low, high=high, size=size, dtype=dtype,
endpoint=endpoint) | b4061e8ec7cb9927bbe4fcce1c847aecdc10052b | 9,544 |
import difflib
import pprint
def pprint_diff(first, second, first_name='first', second_name='second'):
"""Compare the pprint representation of two objects and yield diff lines."""
return difflib.unified_diff(
pprint.pformat(first).splitlines(),
pprint.pformat(second).splitlines(),
fromfile=first_name, tofile=second_name, lineterm='') | 5c88916b47cfa970d6ab15caa650540d2dab3c3b | 9,548 |
def ascii(value):
"""Return the string of value
:param mixed value: The value to return
:rtype: str
"""
return '{0}'.format(value) | 11cf1af6567c53a5583d8bdcb6da2431f6b79ba9 | 9,551 |
def find_closest_date(date, list_of_dates):
"""
This is a helper function that works on Python datetimes. It returns the closest date value,
and the timedelta from the provided date.
"""
match = min(list_of_dates, key = lambda x: abs(x - date))
delta = match - date
return match, delta | 57f9ecbf764539fcea495057ba4b908df700b8db | 9,552 |
from collections import Counter
def solve_part_one(id_list: list) -> int:
"""
Calculates the checksum for a list of IDs
:param id_list: Python list containing a list of ID strings
:return: Checksum as defined by the problem
"""
twos, threes = 0, 0
for id in id_list:
id_counter = Counter(id)
if 2 in id_counter.values():
twos += 1
if 3 in id_counter.values():
threes += 1
checksum = twos * threes
return checksum | a4fe4d7b8205492e132175199121f3ed5a58b7b9 | 9,554 |
def is_prerelease(version_str):
"""
Checks if the given version_str represents a prerelease version.
"""
return any([c.isalpha() for c in version_str]) | c6454bb350b2c4e55dbc271f23253aa2e3472802 | 9,556 |
import re
def class_name_to_resource_name(class_name: str) -> str:
"""Converts a camel case class name to a resource name with spaces.
>>> class_name_to_resource_name('FooBarObject')
'Foo Bar Object'
:param class_name: The name to convert.
:returns: The resource name.
"""
s = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', class_name)
return re.sub('([a-z0-9])([A-Z])', r'\1 \2', s) | b0ac6692c441b0f4cfca4a9b680dc612552795f4 | 9,557 |
def datetime_to_date(dt, org):
"""
Convert a datetime to a date using the given org's timezone
"""
return dt.astimezone(org.timezone).date() | 92565cf65b0c485e6f8649a9a47619f516d0fd35 | 9,558 |
import requests
def current_server_id() -> str:
"""Helper to get the current server id"""
rsp = requests.get("http://localhost:10000/api/servers")
if rsp.status_code != 200:
raise ValueError("Failed to fetch current server id")
return rsp.json()['current_server'] | 8ec4efcc0eeea0b5b62ce5446aece5abdf6fbd66 | 9,560 |
def get_class_name(obj):
"""
Returns the name of the class of the given object
:param obj: the object whose class is to be determined
:return: the name of the class as a string
"""
return obj.__class__.__name__ | 93be3acf545376dc1df43684da75a59e967d2b2f | 9,565 |
def dataset(client):
"""Create a dataset."""
with client.with_dataset(name='dataset') as dataset:
dataset.authors = {
'name': 'me',
'email': '[email protected]',
}
return dataset | 4f77cd30c58ad74e48280be193f4dd30b0fb5584 | 9,567 |
import grp
def is_existing_group(group_name):
"""Asserts the group exists on the host.
Returns:
bool, True if group exists on the box, False otherwise
"""
try:
grp.getgrnam(group_name)
return True
except KeyError:
return False | 8831281684107d9f4c4511cb4cf3493494269650 | 9,570 |
def median(lst):
"""
Get the median value of a list
Arguments:
lst (list) -- list of ints or floats
Returns:
(int or float) -- median value in the list
"""
n = len(lst)
if n < 1:
return None
if n % 2 == 1:
return sorted(lst)[n//2]
else:
return sum(sorted(lst)[n//2-1:n//2+1])/2.0 | b6b7eefdf63490e35e74063995cabd38f4c12089 | 9,573 |
import weakref
def weakref_props(*properties):
"""A class decorator to assign properties that hold weakrefs to objects.
This decorator will not overwrite existing attributes and methods.
Parameters
----------
properties : list of str
A list of property attributes to assign to weakrefs.
Examples
--------
>>> @weakref_props('a', 'b')
... class Test(object):
... pass
>>> test = Test()
>>> test2 = Test()
>>> test.a = test2
>>> test.b = Test()
>>> test.c = 1
>>> sorted(test.__dict__.keys())
['_a', '_b', 'c']
>>> test.a == test2
True
>>> test.b is None # Dead link
True
>>> test.c == 1
True
>>> del test.a
>>> test.a is None
True
"""
def func(cls):
def property_func(attr):
def _set_attr(self, value):
name = '_' + attr if not attr.startswith('_') else attr
setattr(self, name, weakref.ref(value))
def _get_attr(self):
name = '_' + attr if not attr.startswith('_') else attr
value = getattr(self, name, None)
return value() if value is not None else None
def _del_attr(self):
name = '_' + attr if not attr.startswith('_') else attr
delattr(self, name)
docstr = "A weakref to the object stored in '{}'".format(attr)
return _get_attr, _set_attr, _del_attr, docstr
for prop in properties:
if hasattr(cls, prop):
continue
fget, fset, fdel, docstr = property_func(prop)
setattr(cls, prop, property(fget=fget, fset=fset, fdel=fdel,
doc=docstr))
return cls
return func | 4ae42fc4e2dccbb7193a377e122f96c4f7d5112d | 9,575 |
from pathlib import Path
def lglob(self: Path, pattern="*"):
"""Like Path.glob, but returns a list rather than a generator"""
return list(self.glob(pattern)) | eba1b9d6300a1e1aca5c47bedd6ac456430e4d89 | 9,576 |
from typing import Sequence
def parse_attrs(attrs):
"""Parse an attrs sequence/dict to have tuples as keys/items."""
if isinstance(attrs, Sequence):
ret = [item.split(".") for item in attrs]
else:
ret = dict()
for key, value in attrs.items():
ret[tuple(key.split("."))] = value
return ret | 15779e14fbdb9783d91732aa3420ccb18ee6c620 | 9,580 |
def scanD(d, ck, minSupport):
"""
计算候选数据集CK在数据集D中的支持度,
并返回支持度大于最小支持度 minSupport 的数据
Args:
D 数据集
Ck 候选项集列表
minSupport 最小支持度
Returns:
retList 支持度大于 minSupport 的集合
supportData 候选项集支持度数据
"""
# ssCnt 临时存放选数据集 Ck 的频率. 例如: a->10, b->5, c->8
ssCnt = {}
for tid in d:
for can in ck:
if can.issubset(tid):
ssCnt[can] = ssCnt.get(can, 0) + 1
numItems = (float(len(d)))
retList = []
supportData = {}
for key in ssCnt:
# 支持度 = 候选项(key)出现的次数 / 所有数据集的数量
support = ssCnt[key]/numItems
# 在 retList 的首位插入元素,只存储支持度满足频繁项集的值
if support >= minSupport:
retList.insert(0, key)
# 存储所有的候选项(key)和对应的支持度(support)
supportData[key] = support
return retList, supportData | a37fccca461774777bf082ca1b1e2bf3528cc220 | 9,582 |
import codecs
def _readfile(fname, strip="\n"):
"""Shortcut for reading a text file."""
with codecs.open(fname, 'r', 'UTF8') as fp:
content = fp.read()
return content.strip(strip) if strip else content | 5708a91ed7ceb8743bf0e6a40962c80e74996368 | 9,583 |
def eval_request_bool(val, default=False):
"""
Evaluates the boolean value of a request parameter.
:param val: the value to check
:param default: bool to return by default
:return: Boolean
"""
assert isinstance(default, bool)
if val is not None:
val = val.lower()
if val in ['False', 'false', '0', 'n', 'no', 'off']:
return False
if val in ['True', 'true', '1', 'y', 'yes', 'on']:
return True
return default | 99909909846f3194abc8c83ad84411c3ccd1245c | 9,586 |
def get_category_index_from_categories(categories, category):
"""
Gets the index of a category from the categories dictionary. If the category
doesn't exist, it creates a new entry for that category name and returns
the new index number.
"""
if category not in categories:
categories[category] = len(categories)
return categories[category] | 94ce8e2926c1de55383d5fd11e531d9c81792f9c | 9,589 |
def intervalLength(aa, wrapAt=360.):
"""Returns the length of an interval."""
if wrapAt is None:
return (aa[1] - aa[0])
else:
return (aa[1] - aa[0]) % wrapAt | dbceac2d1606d1bedf7c12b4114c17b70db78a86 | 9,591 |
import json
def read_json(file_path: str) -> dict:
"""Reads json file from the given path.
Args:
file_path (str): Location of the file
Returns:
dict: Json content formatted as python dictionary in most cases
"""
with open(file_path, "r") as f:
return json.load(f) | 251c0ad8597ca2819727f95e7e52aa062444cba2 | 9,595 |
def get_weather_units(units):
"""returns a str representation of units of measurement that corresponds to given system of units.
if units is 'metric' return °C
if units is 'kelvin' return K
by default if units is 'imperial' return °F
Parameters
----------
:param str units: the system of units.
:return: a str representation of the units of measurement.
"""
degree_symbol = '\u00b0'
if units.lower() == 'metric':
return f'{degree_symbol}C'
if units.lower() == 'kelvin':
return 'K'
return f'{degree_symbol}F' | 6a12cb96e98f6ccf95927a79a5e9ffaa0a31d4ab | 9,598 |
from typing import Callable
from typing import Iterable
from typing import Dict
from typing import Any
def groupby_many_reduce(key: Callable, reducer: Callable, seq: Iterable):
"""Group a collection by a key function, when the value is given by a reducer function.
Parameters:
key (Callable): Key function (given object in collection outputs key).
reducer (Callable): Reducer function (given object in collection outputs new value).
seq (Iterable): Collection.
Returns:
Dict[Text, Any]: Dictionary where key has been computed by the `key` function
and value by the `reducer` function.
>>> groupby_many_reduce(head, lambda x, y: x + len(y) if x else len(y), ["hello", "hi", "test", "to"])
{'h': 7, 't': 6}
"""
result: Dict[Any, Any] = {}
for element in seq:
for key_result in key(element):
result[key_result] = reducer(result.get(key_result, None), element)
return result | 7d08325206bfb78cfe421244af6d91b4cd3ceb56 | 9,600 |
def store(src, rel, dst):
"""
Returns an SQL statement to store an edge into
the SQL backing store.
:param src: The source node.
:param rel: The relation.
:param dst: The destination node.
"""
smt = 'INSERT INTO %s (src, dst) VALUES (?, ?)'
return smt % rel, (src, dst) | 1fcb76ff722fbf0a43c125a4ff42405b12d54ec6 | 9,603 |
def get_output_metadata(packer, sample_dim_name):
"""
Retrieve xarray metadata for a packer's values, assuming arrays are [sample(, z)].
"""
metadata = []
for name in packer.pack_names:
n_features = packer.feature_counts[name]
if n_features == 1:
dims = [sample_dim_name]
else:
dims = [sample_dim_name, "z"]
metadata.append({"dims": dims, "units": "unknown"})
return tuple(metadata) | 7a16ed6878d58be45a3cd63b0a5bec515ab6475e | 9,613 |
def get_sample_interval(info, chan_info):
"""
Get sample interval for one channel
"""
if info['system_id'] in [1, 2, 3, 4, 5]: # Before version 5
sample_interval = (chan_info['divide'] * info['us_per_time'] *
info['time_per_adc']) * 1e-6
else:
sample_interval = (chan_info['l_chan_dvd'] *
info['us_per_time'] * info['dtime_base'])
return sample_interval | e1a397ad9221c30b70997f0cfb296305e0ca7355 | 9,625 |
def get_input_artifact_location(job):
"""
Returns the S3 location of the input artifact.
"""
input_artifact = job["data"]["inputArtifacts"][0]
input_location = input_artifact["location"]["s3Location"]
input_bucket = input_location["bucketName"]
input_key = input_location["objectKey"]
return (input_bucket, input_key) | 282881315313b88882f1df8019f60ae88f654cab | 9,626 |
import hashlib
def getImageHash(img):
""" Calculates md5 hash for a given Pillow image. """
md5hash = hashlib.md5(img.tobytes())
return md5hash.hexdigest() | d7bd7e1857f6849143f07063c045ae206985d4a3 | 9,634 |
import ssl
def create_ssl_context(verify=True, cafile=None, capath=None):
"""Set up the SSL context.
"""
# This is somewhat tricky to do it right and still keep it
# compatible across various Python versions.
try:
# The easiest and most secure way.
# Requires either Python 2.7.9 or 3.4 or newer.
context = ssl.create_default_context(cafile=cafile, capath=capath)
if not verify:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
except AttributeError:
# ssl.create_default_context() is not available.
try:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
except AttributeError:
# We don't even have the SSLContext class. This smells
# Python 2.7.8 or 3.1 or older. Bad luck.
return None
context.options |= ssl.OP_NO_SSLv2
context.options |= ssl.OP_NO_SSLv3
if verify:
context.verify_mode = ssl.CERT_REQUIRED
if cafile or capath:
context.load_verify_locations(cafile, capath)
else:
context.set_default_verify_paths()
else:
context.verify_mode = ssl.CERT_NONE
return context | fe8db07f3d0043224cb3ca739fa43a1e3e69fdae | 9,637 |
import csv
def LoadVNSIM(nsim_csv):
"""Returns dictionary with degraded file key and mean nsim value.
The CSV should have three values: reference path, degraded path, nsim value
Args:
nsim_csv: Path to CSV file with NSIM values, format described above.
Returns:
Dictionary with degraded file key and NSIM value.
"""
nsim_dict = {}
with open(nsim_csv, 'r') as csvfile:
nsim_reader = csv.reader(csvfile)
for row in nsim_reader:
# Skip header
if row[2] == 'vnsim':
continue
# Keep the degraded file without the directory info as key.
# This will match what the mos dictionary has.
deg_file = row[1].split('/')[-1]
nsim_dict[deg_file] = row[2]
return nsim_dict | 919d1dcffab7e4a78e0ced2cbeee01126d586c27 | 9,641 |
def getCharOverlapCount(from1, to1, from2, to2):
"""Calculates the number of overlapping characters of the two given areas."""
#order such that from1 is always prior from2
if from1 > from2:
tmp = from1
from1 = from2
from2 = tmp
tmp = to1
to1 = to2
to2 = tmp
if from2 >= from1 and from2 <= to1:
if to2 > to1:
return to1 - from2 + 1
else:
return to2 - from2 + 1
else:
return 0 | 66ea7cbc9408d41de002c96e40705d4dd45f9ad5 | 9,642 |
import re
def clean_python_name(s):
"""Method to convert string to Python 2 object name.
Inteded for use in dataframe column names such :
i) it complies to python 2.x object name standard:
(letter|'_')(letter|digit|'_')
ii) my preference to use lowercase and adhere
to practice of case-insensitive column names for data
Based on
https://stackoverflow.com/questions/3303312/how-do-i-convert-a-string-to-a-valid-variable-name-in-python
Example:
.. code:: python
df.rename(columns=clean_python_name)
Args:
- s (str): string to be converted
Returns:
str: cleaned string
"""
# Remove leading characters until we find a letter or underscore, and remove trailing spaces
s = re.sub('^[^a-zA-Z_]+', '', s.strip())
# Replace invalid characters with underscores
s = re.sub('[^0-9a-zA-Z_]', '_', s)
return s.lower() | d77eaa81607aabf8cae62e2a9c36a51e8428aac4 | 9,646 |
def count_distinct_col(curs, table_name, col='y'):
"""Queries to find number of distinct values of col column in table in
database.
Args:
curs (sqlite3.Cursor): cursor to database
table_name (str): name of table to query
col (str): name of column to find number of distinct values for
Returns:
(int) number of distinct values for col"""
return curs.execute("""SELECT COUNT(DISTINCT {})
FROM {};""".format(col, table_name)).fetchone()[0] | c346b8463eeb4faec645917831f7bde8f42ed5e1 | 9,650 |
import struct
def encode_string(input_string):
"""Encode the string value in binary using utf-8
as well as its length (valuable info when decoding
later on). Length will be encoded as an unsigned
short (max 65535).
"""
input_string_encoded = input_string.encode("utf-8")
length = len(input_string_encoded)
length_encoded = struct.pack("<H", length)
return length_encoded + input_string_encoded | ecb26ce97cbebfe79b694e96b6e16d50069858b4 | 9,656 |
def parse_vmin_vmax(container, field, vmin, vmax):
""" Parse and return vmin and vmax parameters. """
field_dict = container.fields[field]
if vmin is None:
if 'valid_min' in field_dict:
vmin = field_dict['valid_min']
else:
vmin = -6 # default value
if vmax is None:
if 'valid_max' in field_dict:
vmax = field_dict['valid_max']
else:
vmax = 100
return vmin, vmax | a7c096e4648662a5efe59c38de016e586c718ffb | 9,658 |
def fib_mem(n, computed={0:0,1:1}):
"""find fibonacci number using memoization"""
if n not in computed:
computed[n] = fib_mem(n-1, computed) + fib_mem (n-2, computed)
return computed[n] | 5d25c22ccdc5ea41fbd0faf21a8b35ac535acaef | 9,661 |
def get_hash_tuple(a_dict, included_keys=None):
""" Helps in hashing a dictionary by flattening it to a flat list of its keys and values, and then converting it into a tuple (which is what the hash function expects). """
if included_keys is not None:
a_dict = {included_key:a_dict[included_key] for included_key in included_keys} # filter the dictionary for only the keys specified
member_names_tuple = list(a_dict.keys())
values_tuple = list(a_dict.values())
combined_tuple = tuple(member_names_tuple + values_tuple)
return combined_tuple | b35feea54e6e4446ac1097487445027a07751910 | 9,662 |
def get_cfg_option(cfg, sec, opt, verbose=False):
"""
Retrieve value of a specific option of a configuration.
Parameters
----------
cfg : configparser.ConfigParser()
Configuration as retrieved by the function read_cfg_file().
sec : str
The section in which the option is located.
opt : str
The option that should be retrieved.
verbose : bool, optional
Print info, if either section or option could not be found in cfg.
Returns
-------
str
Value of the option
"""
if sec not in cfg:
verbose and print("Section '%s' is not in configuration '%s'"
% (sec, cfg))
return None
if opt not in cfg[sec]:
verbose and print("Option '%s' is not in section '%s'"
% (opt, sec))
return None
option = cfg[sec][opt]
return option | 1f387c63d241f1364aa17caec76efe3b33f41b88 | 9,664 |
from typing import Callable
def check_callback(callback):
"""
Check if callback is a callable or a list of callables.
"""
if callback is not None:
if isinstance(callback, Callable):
return [callback]
elif (isinstance(callback, list) and
all([isinstance(c, Callable) for c in callback])):
return callback
else:
raise ValueError("callback should be either a callable or "
"a list of callables.")
else:
return [] | 1e0be3680c934a79777dbe99a47ecc406df19d2a | 9,671 |
def fasta_name_seq(s):
"""
Interprets a string as a FASTA record. Does not make any
assumptions about wrapping of the sequence string.
"""
DELIMITER = ">"
try:
lines = s.splitlines()
assert len(lines) > 1
assert lines[0][0] == DELIMITER
name = lines[0][1:]
sequence = "".join(lines[1:])
return (name, sequence)
except AssertionError:
raise ValueError("String not recognized as a valid FASTA record") | e450455710a945df25ad2cf4cc3c7f9ad662e7d3 | 9,676 |
def get_commit_timestamps(commits):
"""Get all commit timestamps for the given ebuild.
Args:
commits (list[Commit]): The commits in question.
Returns:
list[int]: The uprev commit unix timestamps, in order.
"""
return [int(commit.timestamp) for commit in commits] | 72694219664d0d6cf83d793e9ccce2b0642ec89f | 9,678 |
def commonprefix(m):
"""Given a list of pathnames, returns the longest common leading component without trailing /"""
if not m:
return ""
m = [p.rstrip("/").split("/") for p in m]
s1 = min(m)
s2 = max(m)
s = s1
for i, (c1, c2) in enumerate(zip(s1, s2)):
if c1 != c2:
s = s1[:i]
break
return "/".join(s) | 4d0bb25fd1eb1dbcd4ee28f67b61574751b6e091 | 9,681 |
def edgecolor_by_source(G, node_colors):
""" Returns a list of colors to set as edge colors based on the source node for each edge.
Parameters
----------
G : graph.
A networkx graph.
node_colors : list
List of node colors.
Example
--------
>>> colormap = {'male':'b','female':'r'}
>>> node_colors = set_node_color(G,"gender",colormap)
>>> edge_colors = edgecolor_by_source(G,node_colors)
Returns
-------
list
list of colors for each edge of the graph color set by the source node.
"""
edge_colormap = []
node_colormap = dict(zip(G.nodes(), node_colors))
for edge in G.edges():
edge_colormap.append(node_colormap[edge[0]])
return edge_colormap | 961205e100cf208f5471c08afdd8b3c7328713c0 | 9,682 |
def build_tuple_for_feet_structure(quantity):
"""
Builds the tuple required to create a FeetAndInches object
:param quantity: string containing the feet, inches, and fractional inches
:return: tuple containing feet, inches, and calculated fractional inches
"""
feet = float(quantity[0])
inches = float(quantity[1])
fractional_inches = quantity[2].split('/')
return feet, inches, int(fractional_inches[0])/int(fractional_inches[1]) | 2a66e7bf859e120d224c097a628445342a987067 | 9,685 |
def _all(itr):
"""Similar to Python's all, but returns the first value that doesn't match."""
any_iterations = False
val = None
for val in itr:
any_iterations = True
if not val:
return val
return val if any_iterations else True | bb1145abaaaa1c6910371178ca5ebe68600bb287 | 9,686 |
def no_init(_data, weights):
"""
Return the entered weights.
Parameters
----------
_data: ndarray
Data to pick to initialize weights.
weights: ndarray
Previous weight values.
Returns
-------
weights: ndarray
New weight values
Notes
-----
Useful when it is needed a function that accepts two parameters as all others
weight init functions, but it is no needed to calculate any new value.
"""
return weights | f120b49ab26fa1051360b4e4ae85dd07025ae5cc | 9,687 |
import uuid
def is_valid_uuid(val):
"""
Check if a string is a valid uuid
:param val: uuid String
:return: Returns true if is a string is a valid uuid else False
"""
try:
uuid.UUID(str(val))
return True
except ValueError:
return False | d04f658d3ae2fa85377e110b0a6716bc34ee9df0 | 9,689 |
def convert_to_hexadecimal(bits, padding):
"""
Converts bits to a hexadecimal character with padding.
E.g.
Converts [False, False, False, True], 0 to "1".
Converts [True, False, False, False], 2 to "08"
Args:
bits: List of boolean values.
padding: Integer of number of 0 padded places.
Returns:
string: Zero padded hexadecimal number.
"""
bits_as_strings = ["1" if bit else "0" for bit in bits]
bits_base_2 = int("".join(bits_as_strings), 2)
zero_padded_eight_digit_hexadecimal_with_prefix = "{0:#0{1}x}".format(bits_base_2, padding + 2)
zero_padded_eight_digit_hexadecimal_without_prefix = zero_padded_eight_digit_hexadecimal_with_prefix[2:]
return zero_padded_eight_digit_hexadecimal_without_prefix.upper() | b8cd1647a24072278aca65f7734934acd93d8f12 | 9,690 |
import string
def _sanitize_title(title):
""" Remove all non alphanumeric characters from title and lowercase """
alphanumeric = string.ascii_lowercase + string.digits + ' '
title = title.lower()
title = "".join(filter(lambda x: x in alphanumeric, title))
return title | 6f0d1818140bc2a50b160f73b8b4590be8f31891 | 9,691 |
def IsBefore(version, major, minor, revision):
"""Decide if a given version is strictly before a given version.
@param version: (major, minor, revision) or None, with None being
before all versions
@type version: (int, int, int) or None
@param major: major version
@type major: int
@param minor: minor version
@type minor: int
@param revision: revision
@type revision: int
"""
if version is None:
return True
return version < (major, minor, revision) | 3fe9f995b90d7406d0b0366b0bbe5a940f975893 | 9,692 |
def ts_grismc_sim(pixels):
"""
Simple analytic wavelength calibration for Simulated GRISMC data
"""
disp = 0.0010035 ## microns per pixel (toward positive X in raw detector pixels, used in pynrc)
undevWav = 4.0 ## undeviated wavelength
undevPx = 1638.33
wavelengths = (pixels - undevPx) * disp + undevWav
return wavelengths | f3491fea1fa1e8833384711076e6187f0f6cb42b | 9,699 |
def get_profile_avatar_upload_to(instance, filename):
""" Returns a valid upload path for the avatar associated with a forum profile. """
return instance.get_avatar_upload_to(filename) | 840e7482b225c0a456dbc8cd967203aa542945f8 | 9,700 |
def parse_bool_token(token):
"""
Parses a string token to convert it to its equivalent boolean value ignoring
the case of the string token or leaves the token intact if it cannot.
:param token:
String to convert to ``True`` or ``False``.
:type token:
``str``
:return:
``True`` or ``False`` or the token itself if not converted.
Usage::
>>> parse_bool_token('FAlse')
False
>>> parse_bool_token('FalS')
'FalS'
>>> parse_bool_token('true')
True
>>> parse_bool_token('TRUE')
True
"""
return {'true': True, 'false': False}.get(token.lower(), token) | bd9de30ee85921ba72a46e83eb96a0af104f998d | 9,702 |
def predict_by_moving_avg_growth(stock, s, **_):
"""Returns predicted value of a stock
Predicts the next price of a stock by extrapolating the moving average and its growth.
Parameters
----------
stock : :obj:`stock`
Stock to be predicted.
s : int
Number of data points used to calculate a moving average.
Returns
-------
int
Predicted next price of the stock
Notes
_____
The moving average lags behind by n/2 + 0.5 periods when not centered around the mean.
"""
stockPriceHistory = len(stock.price_history)
if stockPriceHistory < s+1:
return None
else:
ma = sum(stock.price_history[-s:]) / s
growth = ma - sum(stock.price_history[-s-1:-1]) / s
predicted = ma + (s/2+0.5)*growth
if predicted > 0:
return predicted
else:
return 0 | c8e02b9c55bd339ff6f1793b697ba46647331326 | 9,706 |
def select_data(df, countries_list, regions_list, ages_list, genders_list):
"""Extracts from the dataset the data corresponding to many criterias.
Parameters:
-----------
df : Pandas DataFrame
dataset
countries_list : list of str
countries to be selected
regions_list : list of str
regions to be selected
ages_list : list of int
age ranges to be selected
genders_list : list of str
genders to be selected
Returns:
-----------
The corresponding dataset (pandas DataFrame)
"""
df0 = df[df['Country'].isin(countries_list)]
df1 = df0[df0['Region'].isin(regions_list)]
df2 = df1[df1['Age'].isin(ages_list)]
df3 = df2[df2['Sex'].isin(genders_list)]
return df3 | de6e24966f3060728657a4cc6685c8203bfa85e7 | 9,710 |
def percentage(value, precision=2):
"""Convert `float` to #.##% notation as `str`.
A value of 1 = `"100.00%"`; 0.5 = `"50.00%"`"""
return f"{value:.{precision}%}" | 7abd3fafa8fc6f8323ca448ff5022faa0f83aa60 | 9,717 |
import re
def findAlphanumeric(line):
"""Parse string to extract all non-numeric strings"""
return re.findall(r'^\w+', line) | 7ac210f35d347532ff9e68b4fd6f6f978b0c61ea | 9,718 |
def rep(data):
"""Checks if all labels are represented
in the dataset `data`."""
labels = [0, 1]
# Iteratively check if all labels are represented
for i in range(len(data) - 1):
row = data[i]
label = row["label"]
contains = label in labels
if contains and labels:
# If label found, remove from list
labels.pop(label)
elif not labels:
# List is empty, so all labels
# are represented
return True
return False | 803c561c48fec10c44154138e83e95405054dad5 | 9,720 |
import numbers
def torch_item(x):
"""
Like ``x.item()`` for a :class:`~torch.Tensor`, but also works with numbers.
"""
return x if isinstance(x, numbers.Number) else x.item() | dae9881c21a305b42e5c488723a88beb117bf90f | 9,724 |
def count(grid, c):
"""
Count the occurrences
of an object "c" in
the 2D list "grid".
"""
acc = 0
for row in grid:
for elem in row:
acc += c == elem
return acc | 6a497b5d052ce8e1d2619f2278010ecd41126a42 | 9,725 |
def config_retry_strategy(retry):
"""Generate retry strategy."""
if not isinstance(retry, int):
raise ValueError("Parameter retry should be a number")
return {"limit": retry, "retryPolicy": "Always"} | cb436261391e57e845ac5019c7906a56edc2db64 | 9,726 |
def interval(lower_, upper_):
"""Build an interval."""
if lower_ <= upper_:
return lower_, upper_
return None | 3dd1b0c04c9cad8e5f8a69e5c348f07a7542fe7b | 9,728 |
def fetch_links(html):
"""
Fetch all links on a given page and return their hrefs in a list
"""
elements = html.cssselect('a')
return [e.get('href') for e in elements if e.get('href') is not None] | f97f610e5baeb3e3b304c3093a5e27fc0b8de551 | 9,729 |
def echo_handler(ex, *args, **kwargs):
"""
Example error handler which echoes the exception and the arguments.
"""
argstring = ','.join(['%s' % arg for arg in args])
return '%s/%s/%s' % (ex.message, argstring, kwargs) | 29ca4f7663e9c3bf1893bf3f4ab84c8744827ec3 | 9,731 |
import json
def load_config(config_name):
"""
Loads a json config file and returns a config dictionary.
:param config_name: the path to the config json
"""
with open(config_name) as config_file:
config = json.load(config_file)
return config | 5920d21c67133d2d106863910fdd8db95efc94e6 | 9,733 |
def get_by_name(opname, operators):
"""Return operator class instance by name.
Parameters
----------
opname: str
Name of the sklearn class that belongs to a TPOT operator
operators: list
List of operator classes from operator library
Returns
-------
ret_op_class: class
An operator class
"""
ret_op_classes = [op for op in operators if op.__name__ == opname]
if len(ret_op_classes) == 0:
raise TypeError('Cannot found operator {} in operator dictionary'.format(opname))
elif len(ret_op_classes) > 1:
raise ValueError(
'Found duplicate operators {} in operator dictionary. Please check '
'your dictionary file.'.format(opname)
)
ret_op_class = ret_op_classes[0]
return ret_op_class | 29692c00ae034c391582ab7dd40a1d728406e73f | 9,735 |
def get_sub_text_from_xml_node( xmlnode, _text=None ):
"""
Concatenates the content at and under the given ElementTree node, such as
text = get_sub_text_from_xml_node( xmlnode )
"""
if _text == None:
_text = []
if xmlnode.text:
_text.append( xmlnode.text )
for nd in xmlnode:
get_sub_text_from_xml_node( nd, _text )
if nd.tail:
_text.append( nd.tail )
return ''.join( _text ) | 620200d5ba782f5696c720ba4da722204783bd17 | 9,737 |
def cmp_char(a, b):
"""Returns '<', '=', '>' depending on whether a < b, a = b, or a > b
Examples
--------
>>> from misc_utils import cmp_char
>>> cmp_char(1, 2)
'<'
>>> print('%d %s %d' % (1, cmp_char(1,2), 2))
1 < 2
Parameters
----------
a
Value to be compared
b
Value to be compared
Returns
-------
{'<', '=', '>'}
Character denoting the result of comparing `a` and `b`.
"""
if a < b:
return '<'
elif a == b:
return '='
elif a > b:
return '>'
else:
return '?' | 7e8183564f888df3cce65f2bbbeb659aec43928c | 9,744 |
def get_command_name(cmd, default=''):
"""Extracts command name."""
# Check if command object exists.
# Return the expected name property or replace with default.
if cmd:
return cmd.name
return default | f77a73d1ff24ec74b1c7cf10f89c45fab41fed20 | 9,746 |
import hashlib
def get_str_md5(content):
"""
Calculate the MD5 for the str file
:param content:
:return:
"""
m = hashlib.md5(content) # 创建md5对象
return m.hexdigest() | c0e864288d8d6af2fe31b5cb5afe54bfe83e2fb3 | 9,750 |
def make_test(row):
"""
Generate a test method
"""
def row_test(self):
actual = row.get("_actual")
if actual in ("P", "F"):
if actual == "P":
self.assertMeansTest("eligible", row)
else:
self.assertMeansTest("ineligible", row)
row_test.__doc__ = str(row.get("line_number")) + ": " + row.get("_description")
return row_test | 083117f44687c56a7a33cfa74776baea6b40048c | 9,751 |
def _find_last_larger_than(target, val_array):
"""
Takes an array and finds the last value larger than the target value.
Returns the index of that value, returns -1 if none exists in array.
"""
ind = -1
for j in range(len(val_array), 0, -1):
if val_array[j - 1] > target:
ind = j - 1
break
return ind | dbdba59ba35b502669082c8416159770843b7312 | 9,766 |
import struct
def readStruct(fb, structFormat, seek=False, cleanStrings=True):
"""
Return a structured value in an ABF file as a Python object.
If cleanStrings is enabled, ascii-safe strings are returned.
"""
if seek:
fb.seek(seek)
varSize = struct.calcsize(structFormat)
byteString = fb.read(varSize)
vals = struct.unpack(structFormat, byteString)
vals = list(vals)
if cleanStrings:
for i in range(len(vals)):
if type(vals[i]) == type(b''):
vals[i] = vals[i].decode("ascii", errors='ignore').strip()
if len(vals) == 1:
vals = vals[0]
return vals | 1920c69f1881698a3898774be95e8f10a462d936 | 9,767 |
def abspath(newpath, curpath):
"""Return the absolute path to the given 'newpath'.
The current directory string must be given by 'curpath' as an absolute
path.
"""
assert newpath
assert curpath
assert curpath.startswith('/')
subdirs = newpath.split('/')
if not subdirs[0] or curpath == '/':
# Absolute path (curpath is ignored)
# or we're in the root directory
dirs = [""]
else:
# Relative path; extract directory components from curpath
dirs = curpath.split('/')
for s in subdirs:
if not s or s == ".":
# Empty or 'current directory'
pass
elif s == "..":
dirs.pop()
if not dirs:
raise ValueError("Too many '..' in path '{}'".format(newpath))
else:
dirs.append(s)
if len(dirs) == 1:
# Special case for root: joining [] or [""] return "", but you can't
# set the first component to "/" since joining ["/","foo"] would
# return "//foo"
return '/'
return '/'.join(dirs) | 0b1416492891121f433ce3bfbf934601bfc96f06 | 9,770 |
def get_by_string(source_dict, search_string, default_if_not_found=None):
"""
Search a dictionary using keys provided by the search string.
The search string is made up of keywords separated by a '.'
Example: 'fee.fie.foe.fum'
:param source_dict: the dictionary to search
:param search_string: search string with keyword separated by '.'
:param default_if_not_found: Return value if search is un-successful
:return value, dictionary or default_if_not_found
"""
if not source_dict or not search_string:
return default_if_not_found
dict_obj = source_dict
for search_key in search_string.split("."):
try:
dict_obj = next(val for key, val in dict_obj.iteritems() if key == search_key)
except StopIteration:
return default_if_not_found
return dict_obj | 59386f5777805f2e7c5a7c7204c56d3d5792c190 | 9,772 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.