content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def get_credentials(_globals: dict):
"""
Gets Credentials from Globals
Structure may be found in modules/ducktests/tests/checks/utils/check_get_credentials.py
This function return default username and password, defaults may be overriden throw globals
"""
if USERNAME_KEY in _globals[AUTHENTICATION_KEY] and PASSWORD_KEY in _globals[AUTHENTICATION_KEY]:
return _globals[AUTHENTICATION_KEY][USERNAME_KEY], _globals[AUTHENTICATION_KEY][PASSWORD_KEY]
return DEFAULT_AUTH_USERNAME, DEFAULT_AUTH_PASSWORD | 600 |
def hideablerevs(repo):
"""Revision candidates to be hidden
This is a standalone function to allow extensions to wrap it.
Because we use the set of immutable changesets as a fallback subset in
branchmap (see mercurial.branchmap.subsettable), you cannot set "public"
changesets as "hideable". Doing so would break multiple code assertions and
lead to crashes."""
return obsolete.getrevs(repo, 'obsolete') | 601 |
def _find_matches(pattern_pieces, directory):
"""
Used by eglob.
"""
import glob
result = []
if not _os.path.isdir(directory):
return
piece = pattern_pieces[0]
last = len(pattern_pieces) == 1
remaining_pieces = []
if piece == '**':
if not last:
remaining_pieces = pattern_pieces[1:]
for root, dirs, files in _os.walk(directory):
if last:
# At the end of a pattern, "**" just recursively matches
# directories.
yield _os.path.normpath(root)
else:
# Recurse downward, trying to match the rest of the
# pattern.
sub_result = _find_matches(remaining_pieces, root)
for partial_path in sub_result:
yield _os.path.normpath(partial_path)
else:
# Regular glob pattern.
matches = glob.glob(_os.path.join(directory, piece))
if len(matches) > 0:
if last:
for match in matches:
yield _os.path.normpath(match)
else:
remaining_pieces = pattern_pieces[1:]
for match in matches:
sub_result = _find_matches(remaining_pieces, match)
for partial_path in sub_result:
yield _os.path.normpath(partial_path) | 602 |
def eject(force=False, unless_exists=False, verbose=False):
"""Write the generated files, without header warnings."""
docs_dir = Path('./docs')
write_template_files(docs_dir, force=force, include_generated_warning=False,
unless_exists=unless_exists, verbose=verbose) | 603 |
def start(queue, height):
""" Start finding pending intents """
results = []
global tree
global current_file
count = 0
# TODO - Look for use of fillIn method which can make this a much more exploitable condition
for j in common.java_files:
count = count + 1
pub.sendMessage('progress', bar='Pending Intents', percent=round(count * 100 / common.java_files.__len__()))
current_file = j
try:
tree = parser.parse_file(j)
except ValueError as e:
continue
# TODO - Need to add scanning of the imports, to see if Intent or PendingIntent is extended, was working on it,
# but the one issue where it arose was non-trivial, so I gave up for now
try:
for import_decl in tree.import_declarations:
if 'PendingIntent' in import_decl.name.value:
for type_decl in tree.type_declarations:
if type(type_decl) is m.ClassDeclaration:
for t in type_decl.body:
try:
empty_intent(t, j, results)
except Exception as e:
common.logger.debug("Problem in empty intent function of findPending.py: " + str(e))
common.parsingerrors.add(str(current_file))
for f in t._fields:
# dynamically parse each token where is f is the field and t is the token
try:
recurse(f, t, results)
except Exception as e:
common.logger.debug("Problem in recurse function of findPending.py: " + str(e))
common.parsingerrors.add(str(current_file))
except Exception:
common.logger.debug("No type declarations: " + str(j))
report.write("parsingerror-issues-list", str(current_file), "strong")
queue.put(results)
return | 604 |
def squeeze_features(protein):
"""Remove singleton and repeated dimensions in protein features."""
protein["aatype"] = torch.argmax(protein["aatype"], dim=-1)
for k in [
"domain_name",
"msa",
"num_alignments",
"seq_length",
"sequence",
"superfamily",
"deletion_matrix",
"resolution",
"between_segment_residues",
"residue_index",
"template_all_atom_mask",
]:
if k in protein:
final_dim = protein[k].shape[-1]
if isinstance(final_dim, int) and final_dim == 1:
if torch.is_tensor(protein[k]):
protein[k] = torch.squeeze(protein[k], dim=-1)
else:
protein[k] = np.squeeze(protein[k], axis=-1)
for k in ["seq_length", "num_alignments"]:
if k in protein:
protein[k] = protein[k][0]
return protein | 605 |
def get_cache_dir(app_name: str, suffix: str = None, create: bool = True):
"""Get a local cache directory for a given application name.
Args:
app_name: The name of the application.
suffix: A subdirectory appended to the cache dir.
create: Whether to create the directory and its parents if it does not
already exist.
"""
appdirs = _import_appdirs()
if appdirs is None:
raise ImportError(
"To use `dm.utils.fs.get_cache_dir()`, you must have `appdirs` "
"installed: `conda install appdirs`."
)
cache_dir = pathlib.Path(appdirs.user_cache_dir(appname=app_name))
if suffix is not None:
cache_dir /= suffix
if create:
cache_dir.mkdir(exist_ok=True, parents=True)
return cache_dir | 606 |
def _dump_test_data(filename, num_per_type=10):
"""Get corpus of statements for testing that has a range of stmt types."""
sp = signor.process_from_web()
# Group statements by type
stmts_by_type = defaultdict(list)
for stmt in sp.statements:
stmts_by_type[stmt.__class__].append(stmt)
# Sample statements of each type (without replacement)
stmt_sample = []
for stmt_type, stmt_list in stmts_by_type.items():
if len(stmt_list) <= num_per_type:
stmt_sample.extend(stmt_list)
else:
stmt_sample.extend(random.sample(stmt_list, num_per_type))
# Make a random binary class vector for the stmt list
y_arr = [random.choice((0, 1)) for s in stmt_sample]
with open(test_stmt_path, 'wb') as f:
pickle.dump((stmt_sample, y_arr), f)
return stmt_sample | 607 |
def drawBoundingBoxes(imageData, boxes: Union[List[Section], List[Line]], color = (0, 120, 0, 120)):
"""Draw bounding boxes on an image.
imageData: image data in numpy array format
inferenceResults: inference results array off object (l,t,w,h)
colorMap: Bounding box color candidates, list of RGB tuples.
"""
if len(color) != len(boxes):
colors = [color for i in range(len(boxes))]
else:
colors = color
for res,c in zip(boxes, colors):
#rint(res)
#res = Line("", "", res, [])
imgHeight, imgWidth, _ = imageData.shape
left = int(res.bound.left * imgWidth)
top = int(res.bound.top * imgHeight)
right = int(res.bound.right() * imgWidth)
bottom = int(res.bound.bottom() * imgHeight)
thick = int((imgHeight + imgWidth) // 900)
cv2.rectangle(imageData,(left, top), (right, bottom), c, thick)
plt.figure(figsize=(20, 20))
RGB_img = cv2.cvtColor(imageData, cv2.COLOR_BGR2RGB)
plt.imshow(RGB_img, ) | 608 |
def gin_dict_parser(coll):
"""
Use for parsing collections that may contain a 'gin' key.
The 'gin' key is assumed to map to either a dict or str value that contains gin bindings.
e.g.
{'gin': {'Classifier.n_layers': 2, 'Classifier.width': 3}}
or
{'gin': 'Classifier.n_layers = 2\nClassifier.width = 3'}
"""
if 'gin' in coll:
if is_mapping(coll['gin']):
gin.parse_config("".join(map(lambda t: f'{t[0]} = {t[1]}\n', iteritems(coll['gin']))))
elif isinstance(coll['gin'], str):
gin.parse_config(coll['gin'])
return coll | 609 |
def ones(shape, dtype):
"""
Declare a new worker-local tensor with all elements initialized to one.
:param shape: the tensor shape
:param dtype: the tensor data type
:return: the tensor expression
"""
np_dtype = DType(dtype).as_numpy()
init = _ConstTensor(np.ones(shape, dtype=np_dtype))
return LocalTensor(init) | 610 |
def _valid_url(url):
"""Checks that the given URL is Discord embed friendly. Or at least, it tries."""
def _valid_string(segment, main=True):
if not len(segment):
return False
for c in [ord(it.lower()) for it in segment]:
if not (97 <= c <= 122 or (main and (48 <= c <= 57 or c == 45))):
return False
return True
test = urlparse(url)
if not (test.scheme and test.netloc and '.' in test.netloc):
return False
# Discord only accepts http or https
if test.scheme not in ('http', 'https'):
return False
# Test for valid netloc
netloc_split = test.netloc.split('.')
if (len(netloc_split) < 2):
return False # http://foo
tld = test.netloc.split('.')[-1]
if not (len(tld) >= 2 and _valid_string(tld, main=False)):
return False # http://foo.123
for segment in netloc_split[:-1]:
if not _valid_string(segment):
return False # http://foo..bar or http://fo*o.bar
for c in url:
if not 33 <= ord(c) <= 126:
return False # non-ASCII only URLs
return True | 611 |
def generate_initialization_perturbation(
blk, bound_push=1e-2, bound_frac=1e-2, bound_relax_factor=1e-8, user_scaling=False
):
"""
Generate the initialization perturbations performed by IPOPT for a given Block
Args:
blk: Pyomo block
bound_push: bound_push to evaluate (same as IPOPT option) (default=1e-2)
bound_frac: bound_frac to evaluate (same as IPOPT option) (default=1e-2)
bound_relax_factor: bound_relax_factor to evaluate (same as IPOPT option) (default=1e-8)
user_scaling: If True, the variables are scaled as if `nlp_scaling_method = user-scaling`
is used. (default=False)
Yields:
tuple: (pyo.Var object, current_value, perturbed_value)
"""
kappa1 = bound_push
kappa2 = bound_frac
for v in blk.component_data_objects(Var):
if v.value is None:
_log.warning(f"Variable {v.name} has no initial value")
continue
if v.fixed:
continue
if user_scaling:
sf = get_scaling_factor(v, default=1.0)
else:
sf = 1.0
v_lb = __none_left_mult(v.lb, sf)
if v_lb is not None:
v_lb -= bound_relax_factor * max(1, abs(v_lb))
v_value = v.value * sf
v_ub = __none_left_mult(v.ub, sf)
if v_ub is not None:
v_ub += bound_relax_factor * max(1, abs(v_ub))
if v_lb is not None:
if v_ub is not None:
pl = min(kappa1 * max(1, abs(v_lb)), kappa2 * (v_ub - v_lb))
else:
pl = kappa1 * max(1, abs(v_lb))
if v_value < v_lb + pl:
yield (v, v_value, v_lb + pl)
if v_ub is not None:
if v_lb is not None:
pu = min(kappa1 * max(1, abs(v_ub)), kappa2 * (v_ub - v_lb))
else:
pu = kappa1 * max(1, abs(v_ub))
if v_value > v_ub - pu:
yield (v, v_value, v_ub - pu) | 612 |
def isText(node):
"""
Returns True if the supplied node is free text.
"""
return node.nodeType == node.TEXT_NODE | 613 |
def test_zscore(dir_seq, dir_targets):
"""Test that TETRA Z-score calculated correctly."""
tetra_z = calculate_tetra_zscore(dir_seq / "NC_002696.fna")
with (dir_targets / "tetra" / "zscore.json").open("r") as ifh:
target = json.load(ifh)
assert ordered(tetra_z) == ordered(target) | 614 |
def _parse_descriptor(desc: str, ctx: '_ParseDescriptorContext') -> 'Descriptor':
"""
:meta private:
Parse a descriptor given the context level we are in.
Used recursively to parse subdescriptors
:param desc: The descriptor string to parse
:param ctx: The :class:`_ParseDescriptorContext` indicating the level we are in
:return: The parsed descriptor
:raises: ValueError: if the descriptor is malformed
"""
func, expr = _get_func_expr(desc)
if func == "pk":
pubkey, expr = parse_pubkey(expr)
if expr:
raise ValueError("more than one pubkey in pk descriptor")
return PKDescriptor(pubkey)
if func == "pkh":
if not (ctx == _ParseDescriptorContext.TOP or ctx == _ParseDescriptorContext.P2SH or ctx == _ParseDescriptorContext.P2WSH):
raise ValueError("Can only have pkh at top level, in sh(), or in wsh()")
pubkey, expr = parse_pubkey(expr)
if expr:
raise ValueError("More than one pubkey in pkh descriptor")
return PKHDescriptor(pubkey)
if func == "sortedmulti" or func == "multi":
if not (ctx == _ParseDescriptorContext.TOP or ctx == _ParseDescriptorContext.P2SH or ctx == _ParseDescriptorContext.P2WSH):
raise ValueError("Can only have multi/sortedmulti at top level, in sh(), or in wsh()")
is_sorted = func == "sortedmulti"
comma_idx = expr.index(",")
thresh = int(expr[:comma_idx])
expr = expr[comma_idx + 1:]
pubkeys = []
while expr:
pubkey, expr = parse_pubkey(expr)
pubkeys.append(pubkey)
if len(pubkeys) == 0 or len(pubkeys) > 16:
raise ValueError("Cannot have {} keys in a multisig; must have between 1 and 16 keys, inclusive".format(len(pubkeys)))
elif thresh < 1:
raise ValueError("Multisig threshold cannot be {}, must be at least 1".format(thresh))
elif thresh > len(pubkeys):
raise ValueError("Multisig threshold cannot be larger than the number of keys; threshold is {} but only {} keys specified".format(thresh, len(pubkeys)))
if ctx == _ParseDescriptorContext.TOP and len(pubkeys) > 3:
raise ValueError("Cannot have {} pubkeys in bare multisig: only at most 3 pubkeys")
return MultisigDescriptor(pubkeys, thresh, is_sorted)
if func == "wpkh":
if not (ctx == _ParseDescriptorContext.TOP or ctx == _ParseDescriptorContext.P2SH):
raise ValueError("Can only have wpkh() at top level or inside sh()")
pubkey, expr = parse_pubkey(expr)
if expr:
raise ValueError("More than one pubkey in pkh descriptor")
return WPKHDescriptor(pubkey)
if func == "sh":
if ctx != _ParseDescriptorContext.TOP:
raise ValueError("Can only have sh() at top level")
subdesc = _parse_descriptor(expr, _ParseDescriptorContext.P2SH)
return SHDescriptor(subdesc)
if func == "wsh":
if not (ctx == _ParseDescriptorContext.TOP or ctx == _ParseDescriptorContext.P2SH):
raise ValueError("Can only have wsh() at top level or inside sh()")
subdesc = _parse_descriptor(expr, _ParseDescriptorContext.P2WSH)
return WSHDescriptor(subdesc)
if func == "tr":
if ctx != _ParseDescriptorContext.TOP:
raise ValueError("Can only have tr at top level")
internal_key, expr = parse_pubkey(expr)
subscripts = []
depths = []
if expr:
# Path from top of the tree to what we're currently processing.
# branches[i] == False: left branch in the i'th step from the top
# branches[i] == true: right branch
branches = []
while True:
# Process open braces
while True:
try:
expr = _get_const(expr, "{")
branches.append(False)
except ValueError:
break
if len(branches) > MAX_TAPROOT_NODES:
raise ValueError("tr() supports at most {MAX_TAPROOT_NODES} nesting levels")
# Process script expression
sarg, expr = _get_expr(expr)
subscripts.append(_parse_descriptor(sarg, _ParseDescriptorContext.P2TR))
depths.append(len(branches))
# Process closing braces
while len(branches) > 0 and branches[-1]:
expr = _get_const(expr, "}")
branches.pop()
# If we're at the end of a left branch, expect a comma
if len(branches) > 0 and not branches[-1]:
expr = _get_const(expr, ",")
branches[-1] = True
if len(branches) == 0:
break
return TRDescriptor(internal_key, subscripts, depths)
if ctx == _ParseDescriptorContext.P2SH:
raise ValueError("A function is needed within P2SH")
elif ctx == _ParseDescriptorContext.P2WSH:
raise ValueError("A function is needed within P2WSH")
raise ValueError("{} is not a valid descriptor function".format(func)) | 615 |
def get_editable_fields(cc_content, context):
"""
Return the set of fields that the requester can edit on the given content
"""
# For closed thread:
# no edits, except 'abuse_flagged' and 'read' are allowed for thread
# no edits, except 'abuse_flagged' is allowed for comment
ret = {"abuse_flagged"}
if cc_content["type"] == "thread" and cc_content["closed"]:
ret |= {"read"}
return ret
if cc_content["type"] == "comment" and context["thread"]["closed"]:
return ret
# Shared fields
ret |= {"voted"}
if _is_author_or_privileged(cc_content, context):
ret |= {"raw_body"}
# Thread fields
if cc_content["type"] == "thread":
ret |= {"following", "read"}
if _is_author_or_privileged(cc_content, context):
ret |= {"topic_id", "type", "title"}
if context["is_requester_privileged"] and context["discussion_division_enabled"]:
ret |= {"group_id"}
# Comment fields
if (
cc_content["type"] == "comment" and (
context["is_requester_privileged"] or (
_is_author(context["thread"], context) and
context["thread"]["thread_type"] == "question"
)
)
):
ret |= {"endorsed"}
return ret | 616 |
def rate_of_matrix_function(A, Adot, f, fprime):
"""Find the rate of the tensor A
Parameters
----------
A : ndarray (3,3)
A diagonalizable tensor
Adot : ndarray (3,3)
Rate of A
f : callable
fprime : callable
Derivative of f
Returns
-------
Ydot : ndarray (3,3)
Notes
-----
For a diagonalizable tensor A (the strain) which has a quasi-arbitrary
spectral expansion
.. math::
A = \sum_{i=1}^3 \lambda_i P_{i}
and if a second tensor Y is a principal function of A, defined by
.. math::
Y = \sum_{i=1}^3 f(\lambda_i) P_i,
compute the time rate \dot{Y}. Algorithm taken from Brannon's
Tensor book, from the highlighted box near Equation (28.404) on
page 550.
"""
# Compute the eigenvalues and eigenprojections.
eig_vals, eig_vecs = np.linalg.eig(A)
eig_projs = [np.outer(eig_vecs[:, i], eig_vecs[:, i]) for i in [0, 1, 2]]
# Assemble the rate of Y.
Ydot = np.zeros((3, 3))
for eigi, proji in zip(eig_vals, eig_projs):
for eigj, projj in zip(eig_vals, eig_projs):
if eigi == eigj:
gamma = fprime(eigi)
else:
gamma = (f(eigi) - f(eigj)) / (eigi - eigj)
Ydot += gamma * np.dot(proji, np.dot(Adot, projj))
return Ydot | 617 |
def enumerate_changes(levels):
"""Assign a unique integer to each run of identical values.
Repeated but non-consecutive values will be assigned different integers.
"""
return levels.diff().fillna(0).abs().cumsum().astype(int) | 618 |
def TDataStd_ByteArray_Set(*args):
"""
* Finds or creates an attribute with the array. If <isDelta> == False, DefaultDeltaOnModification is used. If attribute is already set, all input parameters are refused and the found attribute is returned.
:param label:
:type label: TDF_Label &
:param lower:
:type lower: int
:param upper:
:type upper: int
:param isDelta: default value is Standard_False
:type isDelta: bool
:rtype: Handle_TDataStd_ByteArray
"""
return _TDataStd.TDataStd_ByteArray_Set(*args) | 619 |
def number_generator(doc):
"""Searches for occurrences of number patterns (cardinal, ordinal, quantity or percent) in text"""
i = 0
while i < len(doc):
tok = doc[i]
if tok.lower_ in ORDINALS:
yield i, i + 1, "ORDINAL"
elif re.search("\\d", tok.text):
j = i + 1
while j < len(doc) and (doc[j].norm_ in MAGNITUDES):
j += 1
if j < len(doc) and doc[j].lower_.rstrip(".") in UNITS:
j += 1
yield i, j, "QUANTITY"
elif j < len(doc) and doc[j].lower_ in ["%", "percent", "pc.", "pc", "pct", "pct.", "percents",
"percentage"]:
j += 1
yield i, j, "PERCENT"
else:
yield i, j, "CARDINAL"
i = j - 1
i += 1 | 620 |
def vis_verts(mean_shape, verts, face, mvs=None, textures=None):
"""
mean_shape: N x 3
verts: B x N x 3
face: numpy F x 3
textures: B x F x T x T (x T) x 3
"""
from psbody.mesh.mesh import Mesh
from psbody.mesh.meshviewer import MeshViewers
if mvs is None:
mvs = MeshViewers((2, 3))
num_row = len(mvs)
num_col = len(mvs[0])
mean_shape = convert2np(mean_shape)
verts = convert2np(verts)
num_show = min(num_row * num_col, verts.shape[0] + 1)
mvs[0][0].set_dynamic_meshes([Mesh(mean_shape, face)])
# 0th is mean shape:
if textures is not None:
tex = convert2np(textures)
for k in np.arange(1, num_show):
vert_here = verts[k - 1]
if textures is not None:
tex_here = tex[k - 1]
fc = tex_here.reshape(tex_here.shape[0], -1, 3).mean(axis=1)
mesh = Mesh(vert_here, face, fc=fc)
else:
mesh = Mesh(vert_here, face)
mvs[int(k % num_row)][int(k / num_row)].set_dynamic_meshes([mesh]) | 621 |
def _get_total_elements(viewer) -> int:
"""
We need to fetch a workflows listing to figure out how many entries we
have in the database, since the API does not contain a method to count
the DB entries.
:param viewer: CWL Viewer instance URL
:return: number of total elements in the CWL Viewer instance DB
"""
smallest_workflow_dataset: dict = _fetch_workflows_data(viewer, 0, 1).json()
return int(smallest_workflow_dataset['totalElements']) | 622 |
def entrepreneursIncubated(dateFrom=None, dateTo=None):
"""
Returns all entrepreneurs ages count between a set of ranges
"""
queryset = Stage.objects
output = {
'queryset': None,
'fields': [],
'values': [],
'fieldLabels': [],
}
queryset = queryset.filter(stage_type="IN") # check for duplicated
projects = Project.objects.filter(id__in=queryset.values('project_id'))
entrepreneurs = Entrepreneur.objects.filter(id__in=projects.values('entrepreneurs'))
output['queryset'] = entrepreneurs
fieldsDict = helperDictionaries.getModelReportFields('entrepreneurs')
output['fieldDict'] = fieldsDict
output['fields'] = [*fieldsDict.keys()]
output['fieldLabels'] = [*fieldsDict.values()]
return output | 623 |
def get_steps(x, shape):
"""
Convert a (vocab_size, steps * batch_size) array
into a [(vocab_size, batch_size)] * steps list of views
"""
steps = shape[1]
if x is None:
return [None for step in range(steps)]
xs = x.reshape(shape + (-1,))
return [xs[:, step, :] for step in range(steps)] | 624 |
def LHS(
a: int,
operation1: str,
b: int,
operation2: str,
c: float
):
"""
E.g. LHS(a, 'plus', b, 'times', c) does
(a + b) * c
params:
a: int. First number in equation
operation1: str. Must be 'plus', 'minus', 'times', 'divide'
b : int. Second number in equation
operation2: str. Must be 'plus', 'minus', 'times', 'divide'
c: float. Third number in equation
return: int
"""
step_1 = word_function(a, operation1, b)
step_2 = word_function(step_1, operation2, c)
return step_2 | 625 |
def _validate(api_indicator_matype, option, parameters:dict, **kwargs): # -> dict
"""Validates kwargs and attaches them to parameters."""
# APO, PPO, BBANDS
matype = int(math.fabs(kwargs["matype"])) if "matype" in kwargs else None
if option == "matype" and matype is not None and matype in api_indicator_matype:
parameters["matype"] = matype
# BBANDS
nbdevup = math.fabs(kwargs["nbdevup"]) if "nbdevup" in kwargs else None
nbdevdn = math.fabs(kwargs["nbdevdn"]) if "nbdevdn" in kwargs else None
if option == "nbdevup" and nbdevup is not None:
parameters["nbdevup"] = nbdevup
if option == "nbdevdn" and nbdevdn is not None:
parameters["nbdevdn"] = nbdevdn
# ULTOSC
timeperiod1 = int(math.fabs(kwargs["timeperiod1"])) if "timeperiod1" in kwargs else None
timeperiod2 = int(math.fabs(kwargs["timeperiod2"])) if "timeperiod2" in kwargs else None
timeperiod3 = int(math.fabs(kwargs["timeperiod3"])) if "timeperiod3" in kwargs else None
if option == "timeperiod1" and timeperiod1 is not None:
parameters["timeperiod1"] = timeperiod1
if option == "timeperiod2" and timeperiod2 is not None:
parameters["timeperiod2"] = timeperiod2
if option == "timeperiod3" and timeperiod3 is not None:
parameters["timeperiod3"] = timeperiod3
# SAR
acceleration = math.fabs(float(kwargs["acceleration"])) if "acceleration" in kwargs else None
maximum = math.fabs(float(kwargs["maximum"])) if "maximum" in kwargs else None
if option == "acceleration" and acceleration is not None:
parameters["acceleration"] = acceleration
if option == "maximum" and maximum is not None:
parameters["maximum"] = maximum
# MAMA
fastlimit = math.fabs(float(kwargs["fastlimit"])) if "fastlimit" in kwargs else None
slowlimit = math.fabs(float(kwargs["slowlimit"])) if "slowlimit" in kwargs else None
if option == "fastlimit" and fastlimit is not None and fastlimit > 0 and fastlimit < 1:
parameters["fastlimit"] = fastlimit
if option == "slowlimit" and slowlimit is not None and slowlimit > 0 and slowlimit < 1:
parameters["slowlimit"] = slowlimit
# MACD, APO, PPO, ADOSC
fastperiod = int(math.fabs(kwargs["fastperiod"])) if "fastperiod" in kwargs else None
slowperiod = int(math.fabs(kwargs["slowperiod"])) if "slowperiod" in kwargs else None
signalperiod = int(math.fabs(kwargs["signalperiod"])) if "signalperiod" in kwargs else None
if option == "fastperiod" and fastperiod is not None:
parameters["fastperiod"] = fastperiod
if option == "slowperiod" and slowperiod is not None:
parameters["slowperiod"] = slowperiod
if option == "signalperiod" and signalperiod is not None:
parameters["signalperiod"] = signalperiod
# MACDEXT
fastmatype = int(math.fabs(kwargs["fastmatype"])) if "fastmatype" in kwargs else None
slowmatype = int(math.fabs(kwargs["slowmatype"])) if "slowmatype" in kwargs else None
signalmatype = int(math.fabs(kwargs["signalmatype"])) if "signalmatype" in kwargs else None
if option == "fastmatype" and fastmatype is not None and fastmatype in api_indicator_matype:
parameters["fastmatype"] = fastmatype
if option == "slowmatype" and slowmatype is not None and slowmatype in api_indicator_matype:
parameters["slowmatype"] = slowmatype
if option == "signalmatype" and signalmatype is not None and signalmatype in api_indicator_matype:
parameters["signalmatype"] = signalmatype
# STOCH(F), STOCHRSI
fastkperiod = int(math.fabs(kwargs["fastkperiod"])) if "fastkperiod" in kwargs else None
fastdperiod = int(math.fabs(kwargs["fastdperiod"])) if "fastdperiod" in kwargs else None
fastdmatype = int(math.fabs(kwargs["fastdmatype"])) if "fastdmatype" in kwargs else None
if option == "fastkperiod" and fastkperiod is not None:
parameters["fastkperiod"] = fastkperiod
if option == "fastdperiod" and fastdperiod is not None:
parameters["fastdperiod"] = fastdperiod
if option == "fastdmatype" and fastdmatype is not None and fastdmatype in api_indicator_matype:
parameters["fastdmatype"] = fastdmatype
# STOCH(F), STOCHRSI
slowkperiod = int(math.fabs(kwargs["slowkperiod"])) if "slowkperiod" in kwargs else None
slowdperiod = int(math.fabs(kwargs["slowdperiod"])) if "slowdperiod" in kwargs else None
slowkmatype = int(math.fabs(kwargs["slowkmatype"])) if "slowkmatype" in kwargs else None
slowdmatype = int(math.fabs(kwargs["slowdmatype"])) if "slowdmatype" in kwargs else None
if option == "slowkperiod" and slowkperiod is not None:
parameters["slowkperiod"] = slowkperiod
if option == "slowdperiod" and slowdperiod is not None:
parameters["slowdperiod"] = slowdperiod
if option == "slowkmatype" and slowkmatype is not None and slowkmatype in api_indicator_matype:
parameters["slowkmatype"] = slowkmatype
if option == "slowdmatype" and slowdmatype is not None and slowdmatype in api_indicator_matype:
parameters["slowdmatype"] = slowdmatype
return parameters | 626 |
def scale_y_values(y_data, y_reference, y_max):
"""
Scale the plot in y direction, to prevent extreme values.
:param y_data: the y data of the plot
:param y_reference: the maximum value of the plot series (e.g. Normal force), which will be scaled to y_max
:param y_max: the maximum y value for the plot (e.g. if y_max=1, no y value in the plot will be greater than 1)
"""
multipl_factor = y_max / y_reference
for i in range(len(y_data)):
y_data[i] = y_data[i] * multipl_factor
return y_data, multipl_factor | 627 |
def set_heating_contribution(agent, pv_power):
""" If the water tank is currently in use, compute and return the part of the pv_power used for heating the water"""
pv_power_to_heating = 0
if agent.water_tank.is_active():
pv_power_to_heating = pv_power * agent.pv_panel.heating_contribution
return pv_power_to_heating | 628 |
def test_melt_columns() -> None:
"""Melt selected columns to rows."""
before_melt = pd.DataFrame(
{
"GEOGID": ["SA2017_017001001"],
"Pre 1919 (No. of households)": [10],
"Pre 1919 (No. of persons)": [25],
},
)
expected_output = pd.DataFrame(
{
"GEOGID": ["SA2017_017001001", "SA2017_017001001"],
"variable": ["Pre 1919 (No. of households)", "Pre 1919 (No. of persons)"],
"value": [10, 25],
},
)
output = _melt_columns.run(before_melt, id_vars="GEOGID")
assert_frame_equal(output, expected_output) | 629 |
def test_put_vector_mixed_dtypes():
"""
Passing a numpy array of mixed dtypes to a dataset.
See https://github.com/GenericMappingTools/pygmt/issues/255
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
for dtypex, dtypey in itertools.permutations(dtypes, r=2):
with clib.Session() as lib:
dataset = lib.create_data(
family="GMT_IS_DATASET|GMT_VIA_VECTOR",
geometry="GMT_IS_POINT",
mode="GMT_CONTAINER_ONLY",
dim=[2, 5, 1, 0], # columns, rows, layers, dtype
)
x = np.array([1, 2, 3, 4, 5], dtype=dtypex)
y = np.array([6, 7, 8, 9, 10], dtype=dtypey)
lib.put_vector(dataset, column=lib["GMT_X"], vector=x)
lib.put_vector(dataset, column=lib["GMT_Y"], vector=y)
# Turns out wesn doesn't matter for Datasets
wesn = [0] * 6
# Save the data to a file to see if it's being accessed correctly
with GMTTempFile() as tmp_file:
lib.write_data(
"GMT_IS_VECTOR",
"GMT_IS_POINT",
"GMT_WRITE_SET",
wesn,
tmp_file.name,
dataset,
)
# Load the data and check that it's correct
newx, newy = tmp_file.loadtxt(
unpack=True, dtype=[("x", dtypex), ("y", dtypey)]
)
assert x.dtype == newx.dtype
assert y.dtype == newy.dtype
npt.assert_allclose(newx, x)
npt.assert_allclose(newy, y) | 630 |
def predict_direction(clf, tickers, **kwargs):
"""
Use clf (an untrained classifier) to predict direction of change for validation
data for each stock in 'tickers'. Pass additional keyword arguments to be
used in building the stock datasets.
Args:
--clf: An untrained sklearn classifier
--tickers: A list of tickers to use
--kwargs: Additional arguments for the StockDataset class
Returns:
A dictionary where each key is a ticker in 'tickers' and each value is the
accuracy for the predictions for that ticker.
"""
results = {}
for ticker in tqdm(tickers):
# Build and split dataset
ds = StockDataset(tickers=ticker, quiet=True, **kwargs)
t_data, v_data, t_label, v_label = ds.split(label_field='Direction')
# Clone classifier
clf_clone = sklearn.base.clone(clf)
# Fit classifier to data
clf_clone.fit(t_data, t_label)
# Predict and store results
v_pred = clf_clone.predict(v_data)
results[ticker] = mymetrics.direction_accuracy(v_label, v_pred)
return results | 631 |
def rename(node_name, new_name):
"""Change the name of a storage NODE to NEW-NAME."""
config_connect()
try:
node = st.StorageNode.get(name=node_name)
try:
st.StorageNode.get(name=new_name)
print('Node "%s" already exists.' % new_name)
exit(1)
except pw.DoesNotExist:
node.name = new_name
node.save()
print("Updated.")
except pw.DoesNotExist:
print('Node "%s" does not exist!' % node_name)
exit(1) | 632 |
def plot_simulation_results(df_plot, week, year):
"""Make wildcard and division winner plots by simulation number
:param df_plot: data frame with summarised simulation information
:param week: current week
:param year: current season
:return: None
"""
# Calculate label positions
df_plot_label_pos = (
df_plot
.query('x_vals==x_vals.max()')[['team_id', 'firstName', 'wc_pct', 'div_pct', 'x_vals']]
.reset_index(drop=True))
x_scale_factor = df_plot_label_pos.x_vals.max() / df_plot_label_pos.team_id.size
df_plot_label_pos['wc_pct_pos'] = df_plot_label_pos.wc_pct.rank(method='first') * x_scale_factor
df_plot_label_pos['div_pct_pos'] = df_plot_label_pos.div_pct.rank(method='first') * x_scale_factor
# Create wildcard plot
df_plot_label_pos
p_wc = (
ggplot(aes(x='x_vals',
y='wc_pct',
color='factor(team_id)',
group='team_id'),
data=df_plot) +
geom_line() +
geom_label(aes(label='firstName',
x='wc_pct_pos',
y='wc_pct',
color='factor(team_id)'),
data=df_plot_label_pos,
size=10) +
labs(x='Simulation', y='Simulations Team is Wildcard (%)') +
theme_bw() +
guides(color=False) +
ylim(0, 100)
)
# Create division winner plot
p_div = (
ggplot(aes(x='x_vals',
y='div_pct',
color='factor(team_id)',
group='team_id'),
data=df_plot) +
geom_line() +
geom_label(aes(label='firstName',
x='div_pct_pos',
y='div_pct',
color='factor(team_id)'),
data=df_plot_label_pos,
size=10) +
labs(x='Simulation', y='Simulations Team is Div. Winner (%)') +
theme_bw() +
guides(color=False) +
ylim(0, 100)
)
# Create directory to save plots
out_dir = Path(f'output/{year}/week{week}')
out_dir.mkdir(parents=True, exist_ok=True)
# Create file names
out_file_wc = out_dir / 'playoffs_wildcard_pct_by_simulation.png'
out_file_div = out_dir / 'playoffs_division_pct_by_simulation.png'
# Save plots
warnings.filterwarnings('ignore')
p_wc.save(out_file_wc, width=10, height=6, dpi=300)
p_div.save(out_file_div, width=10, height=6, dpi=300)
warnings.filterwarnings('default')
logger.info(f'Playoff simulation plots saved to: \n\t>{out_file_wc}\n\t>{out_file_div}') | 633 |
def load_from_csv():
""" Loads a list of Currency objects from CSV """
file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'currencies.csv')
currencies = []
with open(file) as csvfile:
reader = csv.reader(csvfile)
headers = next(reader)
for row in reader:
currencies.append(Currency(*row))
return currencies | 634 |
def parse_primary(index):
"""Parse primary expression."""
if token_is(index, token_kinds.open_paren):
node, index = parse_expression(index + 1)
index = match_token(index, token_kinds.close_paren, ParserError.GOT)
return expr_nodes.ParenExpr(node), index
elif token_is(index, token_kinds.number):
return expr_nodes.Number(p.tokens[index]), index + 1
elif (token_is(index, token_kinds.identifier)
and not p.symbols.is_typedef(p.tokens[index])):
return expr_nodes.Identifier(p.tokens[index]), index + 1
elif token_is(index, token_kinds.string):
return expr_nodes.String(p.tokens[index].content), index + 1
elif token_is(index, token_kinds.char_string):
chars = p.tokens[index].content
return expr_nodes.Number(chars[0]), index + 1
else:
raise_error("expected expression", index, ParserError.GOT) | 635 |
def chopper_pulses_of_mode(i):
"""How many single pulses the chopper transmits per opening,
or in hybrid mode, how many single bunches the tranmitted intensity
corresponds to, based on the current settings of the chopper.
i: 0-based integer"""
if isnan(i) or i<0 or i>=len(chopper.pulses): return nan
return chopper.pulses[int(i)] | 636 |
def _get_content_from_tag(tag):
"""Gets the content from tag till before a new section."""
contents = []
next_tag = tag
while next_tag and not _is_section(next_tag):
content = parse_content(next_tag.text())
if content:
contents.append(content)
next_tag = next_tag.next
return ' '.join(contents) | 637 |
def get_ngrams(corpus, n):
"""
Get ngrams from provided corpus according to provided value of n.
"""
words = []
ngrams = {}
for word_list in [elt.split(' ') for elt in corpus]:
# print(word_list)
# for word in word_list:
# words.append(word)
words = [' '.join(name) for name in zip(*[word_list[i:] for i in range(n)])]
for ngram in words:
if ngram in ngrams:
ngrams[ngram] += 1
else:
ngrams[ngram] = 1
ngrams = collections.OrderedDict(sorted(ngrams.items(), key=lambda t: t[1], reverse=True))
# for el in words:
# print(el)
pprint(ngrams) | 638 |
async def get_connections(request: data_models.ConnectionsRequest):
"""Get connections *from* and *to* each entity in the request.
Connections *to* are all the subject-predicate pairs where the entity is the object, and connections *from* are all the predicate-object pairs where the entity is the subject."""
response = {}
for ent in request.entities:
ent_normalised = utils.normaliseURI(ent)
connections_from = sparql_connector.get_sparql_results(
sparql.get_p_o(ent_normalised, labels=request.labels, limit=request.limit)
)["results"]["bindings"]
connections_to = sparql_connector.get_sparql_results(
sparql.get_s_p(ent_normalised, labels=request.labels, limit=request.limit)
)["results"]["bindings"]
for predicate_object_dict in connections_from:
if (
"collections.vam.ac.uk" in predicate_object_dict["object"]["value"]
) and "objectLabel" not in predicate_object_dict:
object_label = utils.get_vam_object_title(
predicate_object_dict["object"]["value"]
)
if object_label is not None:
predicate_object_dict["objectLabel"] = dict()
predicate_object_dict["objectLabel"]["type"] = "literal"
predicate_object_dict["objectLabel"]["value"] = object_label
for subject_predicate_dict in connections_to:
if (
"collections.vam.ac.uk" in subject_predicate_dict["subject"]["value"]
) and "subjectLabel" not in subject_predicate_dict:
subject_label = utils.get_vam_object_title(
subject_predicate_dict["subject"]["value"]
)
if subject_label is not None:
subject_predicate_dict["subjectLabel"] = dict()
subject_predicate_dict["subjectLabel"]["type"] = "literal"
subject_predicate_dict["subjectLabel"]["value"] = subject_label
response.update(
{
ent: {
"from": connections_from,
"to": connections_to,
}
}
)
return response | 639 |
def partida_26():
"""partida_26"""
check50.run("python3 volleyball.py").stdin("B\nA\nB\nB\nB\nA\nA\nA\nB\nA\nB\nA\nA\nB\nB\nB\nA\nB\nB", prompt=False).stdout("EMPIEZA\nSACA A\nGANA B\nA 0 B 0\nSACA B\nGANA A\nA 0 B 0\nSACA A\nGANA B\nA 0 B 0\nSACA B\nGANA B\nA 0 B 1\nSACA B\nGANA B\nA 0 B 2\nSACA B\nGANA A\nA 0 B 2\nSACA A\nGANA A\nA 1 B 2\nSACA A\nGANA A\nA 2 B 2\nSACA A\nGANA B\nA 2 B 2\nSACA B\nGANA A\nA 2 B 2\nSACA A\nGANA B\nA 2 B 2\nSACA B\nGANA A\nA 2 B 2\nSACA A\nGANA A\nA 3 B 2\nSACA A\nGANA B\nA 3 B 2\nSACA B\nGANA B\nA 3 B 3\nSACA B\nGANA B\nA 3 B 4\nSACA B\nGANA A\nA 3 B 4\nSACA A\nGANA B\nA 3 B 4\nSACA B\nGANA B\nA 3 B 5\nFINAL", regex=False).exit(0) | 640 |
def sunlight_duration(hour_angle_sunrise):
"""Returns the duration of Sunlight, in minutes, with Hour Angle in degrees,
hour_angle."""
sunlight_durration = 8 * hour_angle_sunrise # this seems like the wrong output
return sunlight_durration | 641 |
def add_particle_bunch_gaussian(sim, q, m, sig_r, sig_z, n_emit, gamma0,
sig_gamma, n_physical_particles,
n_macroparticles, tf=0., zf=0., boost=None,
save_beam=None, z_injection_plane=None,
initialize_self_field=True):
"""
Introduce a relativistic Gaussian particle bunch in the simulation,
along with its space charge field.
The bunch is initialized with a normalized emittance `n_emit`,
in such a way that it will be focused at time `tf`, at the position `zf`.
Thus if `tf` is not 0, the bunch will be initially out of focus.
(This does not take space charge effects into account.)
Parameters
----------
sim : a Simulation object
The structure that contains the simulation.
q : float (in Coulomb)
Charge of the particle species
m : float (in kg)
Mass of the particle species
sig_r : float (in meters)
The transverse RMS bunch size.
sig_z : float (in meters)
The longitudinal RMS bunch size.
n_emit : float (in meters)
The normalized emittance of the bunch.
gamma0 : float
The Lorentz factor of the electrons.
sig_gamma : float
The absolute energy spread of the bunch.
n_physical_particles : float
The number of physical particles (e.g. electrons) the bunch should
consist of.
n_macroparticles : int
The number of macroparticles the bunch should consist of.
zf: float (in meters), optional
Position of the focus.
tf : float (in seconds), optional
Time at which the bunch reaches focus.
boost : a BoostConverter object, optional
A BoostConverter object defining the Lorentz boost of
the simulation.
save_beam : string, optional
Saves the generated beam distribution as an .npz file "string".npz
z_injection_plane: float (in meters) or None
When `z_injection_plane` is not None, then particles have a ballistic
motion for z<z_injection_plane. This is sometimes useful in
boosted-frame simulations.
`z_injection_plane` is always given in the lab frame.
initialize_self_field: bool, optional
Whether to calculate the initial space charge fields of the bunch
and add these fields to the fields on the grid (Default: True)
"""
# Generate Gaussian gamma distribution of the beam
if sig_gamma > 0.:
gamma = np.random.normal(gamma0, sig_gamma, n_macroparticles)
else:
# Zero energy spread beam
gamma = np.full(n_macroparticles, gamma0)
if sig_gamma < 0.:
warnings.warn(
"Negative energy spread sig_gamma detected."
" sig_gamma will be set to zero. \n")
# Get inverse gamma
inv_gamma = 1. / gamma
# Get Gaussian particle distribution in x,y,z
x = sig_r * np.random.normal(0., 1., n_macroparticles)
y = sig_r * np.random.normal(0., 1., n_macroparticles)
z = zf + sig_z * np.random.normal(0., 1., n_macroparticles)
# Define sigma of ux and uy based on normalized emittance
sig_ur = (n_emit / sig_r)
# Get Gaussian distribution of transverse normalized momenta ux, uy
ux = sig_ur * np.random.normal(0., 1., n_macroparticles)
uy = sig_ur * np.random.normal(0., 1., n_macroparticles)
# Finally we calculate the uz of each particle
# from the gamma and the transverse momenta ux, uy
uz_sqr = (gamma ** 2 - 1) - ux ** 2 - uy ** 2
# Check for unphysical particles with uz**2 < 0
mask = uz_sqr >= 0
N_new = np.count_nonzero(mask)
if N_new < n_macroparticles:
warnings.warn(
"Particles with uz**2<0 detected."
" %d Particles will be removed from the beam. \n"
"This will truncate the distribution of the beam"
" at gamma ~= 1. \n"
"However, the charge will be kept constant. \n"%(n_macroparticles
- N_new))
# Remove unphysical particles with uz**2 < 0
x = x[mask]
y = y[mask]
z = z[mask]
ux = ux[mask]
uy = uy[mask]
inv_gamma = inv_gamma[mask]
uz_sqr = uz_sqr[mask]
# Calculate longitudinal momentum of the bunch
uz = np.sqrt(uz_sqr)
# Get weight of each particle
w = n_physical_particles / N_new * np.ones_like(x)
# Propagate distribution to an out-of-focus position tf.
# (without taking space charge effects into account)
if tf != 0.:
x = x - ux * inv_gamma * c * tf
y = y - uy * inv_gamma * c * tf
z = z - uz * inv_gamma * c * tf
# Save beam distribution to an .npz file
if save_beam is not None:
np.savez(save_beam, x=x, y=y, z=z, ux=ux, uy=uy, uz=uz,
inv_gamma=inv_gamma, w=w)
# Add the electrons to the simulation
ptcl_bunch = add_particle_bunch_from_arrays(sim, q, m, x, y, z, ux, uy, uz,
w, boost=boost, z_injection_plane=z_injection_plane,
initialize_self_field=initialize_self_field)
return ptcl_bunch | 642 |
def test_frame_seq_caching(frame_sequence: FrameSequence):
"""Test that we only interpolate on demand, and cache results."""
fs = frame_sequence
# index into the sequence and watch whether interpolate is called
with patch.object(
fs, "_interpolate_state", wraps=fs._interpolate_state
) as mock:
frame_5 = fs[5]
# it should have been called once, and a 2 frames cached (the initial one too)
mock.assert_called_once()
assert isinstance(frame_5, ViewerState)
assert len(fs._cache) == 2
# indexing the same frame again will not require re-interpolation
with patch.object(
fs, "_interpolate_state", wraps=fs._interpolate_state
) as mock:
frame_5 = fs[5]
mock.assert_not_called()
fs._rebuild_frame_index()
assert len(fs._cache) == 0 | 643 |
def send_tweets_to_twitter(tweets: List[str], reference: str, api: tweepy.API):
"""Post tweets to the twitter account. If more than one tweet divides into
a tweet string that is enumerated.
Args:
tweets (List[str]): list containing tweet length text strings.
reference (str): reference to the source of the quote.
api (tweepy.API): Authenticated twitter api object.
"""
first_tweet = None
number_of_tweets = len(tweets)
if len(tweets) == 1: # if only one tweet do not add numbering only reference
first_tweet = api.update_status(f"{tweets[0]}")
api.update_status(f"{reference}", in_reply_to_status_id=first_tweet.id)
else: # if more than one tweet add numbering, then add reference last
counter = 0
for tweet in tweets:
counter = counter + 1
if first_tweet is None:
first_tweet = api.update_status(
f"{tweet} ({counter}/{number_of_tweets})"
)
else:
api.update_status(
f"{tweet} ({counter}/{number_of_tweets})",
in_reply_to_status_id=first_tweet.id,
)
api.update_status(f"{reference}", in_reply_to_status_id=first_tweet.id) | 644 |
def sell_shares_nb(cash_now, shares_now, size, direction, price, fees, fixed_fees, slippage,
min_size, allow_partial, raise_reject, log_record, log):
"""Sell shares."""
# Get optimal order size
if direction == Direction.LongOnly:
final_size = min(shares_now, size)
else:
final_size = size
# Check against minimum size
if abs(final_size) < min_size:
if raise_reject:
raise RejectedOrderError("Order rejected: Final size is less than minimum allowed")
return order_not_filled_nb(
cash_now, shares_now,
OrderStatus.Rejected, StatusInfo.MinSizeNotReached,
log_record, log)
# Check against partial fill
if np.isfinite(size) and is_less_nb(final_size, size) and not allow_partial:
# np.inf doesn't count
if raise_reject:
raise RejectedOrderError("Order rejected: Final size is less than requested")
return order_not_filled_nb(
cash_now, shares_now,
OrderStatus.Rejected, StatusInfo.PartialFill,
log_record, log)
# Get price adjusted with slippage
adj_price = price * (1 - slippage)
# Compute acquired cash
acq_cash = final_size * adj_price
# Update fees
fees_paid = acq_cash * fees + fixed_fees
# Get final cash by subtracting costs
if is_less_nb(acq_cash, fees_paid):
# Can't fill
if raise_reject:
raise RejectedOrderError("Order rejected: Fees cannot be covered")
return order_not_filled_nb(
cash_now, shares_now,
OrderStatus.Rejected, StatusInfo.CantCoverFees,
log_record, log)
final_cash = acq_cash - fees_paid
# Update current cash and shares
new_cash = cash_now + final_cash
new_shares = add_nb(shares_now, -final_size)
# Return filled order
order_result = OrderResult(
final_size,
adj_price,
fees_paid,
OrderSide.Sell,
OrderStatus.Filled,
-1
)
if log:
fill_res_log_nb(new_cash, new_shares, order_result, log_record)
return new_cash, new_shares, order_result | 645 |
def radec2altaz(ra, dec, obstime, lat=None, lon=None, debug=False):
"""
calculates the altitude and azimuth, given an ra, dec, time, and observatory location
Parameters:
===========
- ra: float
The right ascension of the target (in degrees)
- dec: float
The declination of the target (in degrees)
- obstime: astropy.time.Time object
Contains the time of the observation.
Can also contain the observatory location if
lat and lon are not given.
- lat: float
The latitude of the observatory, in degrees.
Not needed if given in the obstime object
- lon: float
The longitude of the observatory, in degrees.
Not needed if given in the obstime object
Returns:
========
The altitude and azimuth of the object, both in degrees.
"""
if lat is None:
lat = obstime.lat.degree
if lon is None:
lon = obstime.lon.degree
obstime = Time(obstime.isot, format='isot', scale='utc', location=(lon, lat))
# Find the number of days since J2000
j2000 = Time("2000-01-01T12:00:00.0", format='isot', scale='utc')
dt = (obstime - j2000).value # number of days since J2000 epoch
# get the UT time
tstring = obstime.isot.split("T")[-1]
segments = tstring.split(":")
ut = float(segments[0]) + float(segments[1]) / 60.0 + float(segments[2]) / 3600
# Calculate Local Sidereal Time
lst = obstime.sidereal_time('mean').deg
# Calculate the hour angle
HA = lst - ra
while HA < 0.0 or HA > 360.0:
s = -np.sign(HA)
HA += s * 360.0
# convert everything to radians
dec *= np.pi / 180.0
lat *= np.pi / 180.0
HA *= np.pi / 180.0
# Calculate the altitude
alt = np.arcsin(np.sin(dec) * np.sin(lat) + np.cos(dec) * np.cos(lat) * np.cos(HA))
# calculate the azimuth
az = np.arccos((np.sin(dec) - np.sin(alt) * np.sin(lat)) / (np.cos(alt) * np.cos(lat)))
if np.sin(HA) > 0:
az = 2.0 * np.pi - az
if debug:
print( "UT: ", ut)
print( "LST: ", lst)
print( "HA: ", HA * 180.0 / np.pi)
return alt * 180.0 / np.pi, az * 180.0 / np.pi | 646 |
def handler_no_answer(f):
"""Decorator that creates message handlers that don't reply."""
def handle_wrapper(*args, **kwds):
answer = None
try:
f(*args, **kwds)
except Exception:
return MSG_STATUS_ERROR, [
'Calling the cmd handler caused an error:\n{}'.format(traceback.format_exc())
], {}
return handle_wrapper | 647 |
def wrapper_handle_attrs(func):
"""转化html的标签属性为字典"""
# 这是一个装饰Parsing.handle_attrs_tmp、Parsing.handle_attrs_tag的装饰器
def handle_attrs(self, attrs_str):
attrs = dict()
if attrs_str == '/':
return attrs
attrs_list = re.findall(self.attr_reg, attrs_str)
for attr in attrs_list:
attrs[attr[0]] = func(self, attr)
return attrs
return handle_attrs | 648 |
def transfer_weights(model, weights=None):
"""
Always trains from scratch; never transfers weights
:param model:
:param weights:
:return:
"""
print('ENet has found no compatible pretrained weights! Skipping weight transfer...')
return model | 649 |
def status():
"""Print downloader's status to screen.
"""
used = get_space_used()
avail = get_space_available()
allowed = config.download.space_to_use
print "Space used by downloaded files: %.2f GB of %.2f GB (%.2f%%)" % \
(used/1024.0**3, allowed/1024.0**3, 100.0*used/allowed)
print "Space available on file system: %.2f GB" % (avail/1024.0**3)
numwait = jobtracker.query("SELECT COUNT(*) FROM requests " \
"WHERE status='waiting'", \
fetchone=True)[0]
numfail = jobtracker.query("SELECT COUNT(*) FROM requests " \
"WHERE status='failed'", \
fetchone=True)[0]
print "Number of requests waiting: %d" % numwait
print "Number of failed requests: %d" % numfail
numdlactive = jobtracker.query("SELECT COUNT(*) FROM files " \
"WHERE status='downloading'", \
fetchone=True)[0]
numdlfail = jobtracker.query("SELECT COUNT(*) FROM files " \
"WHERE status='failed'", \
fetchone=True)[0]
print "Number of active downloads: %d" % numdlactive
print "Number of failed downloads: %d" % numdlfail | 650 |
def collapse_json(text, indent=4):
"""Compacts a string of json data by collapsing whitespace after the
specified indent level
NOTE: will not produce correct results when indent level is not a multiple
of the json indent level
"""
initial = " " * indent
out = [] # final json output
sublevel = [] # accumulation list for sublevel entries
pending = None # holder for consecutive entries at exact indent level
for line in text.splitlines():
if line.startswith(initial):
if line[indent] == " ":
# found a line indented further than the indent level, so add
# it to the sublevel list
if pending:
# the first item in the sublevel will be the pending item
# that was the previous line in the json
sublevel.append(pending)
pending = None
item = line.strip()
sublevel.append(item)
if item.endswith(","):
sublevel.append(" ")
elif sublevel:
# found a line at the exact indent level *and* we have sublevel
# items. This means the sublevel items have come to an end
sublevel.append(line.strip())
out.append("".join(sublevel))
sublevel = []
else:
# found a line at the exact indent level but no items indented
# further, so possibly start a new sub-level
if pending:
# if there is already a pending item, it means that
# consecutive entries in the json had the exact same
# indentation and that last pending item was not the start
# of a new sublevel.
out.append(pending)
pending = line.rstrip()
else:
if pending:
# it's possible that an item will be pending but not added to
# the output yet, so make sure it's not forgotten.
out.append(pending)
pending = None
if sublevel:
out.append("".join(sublevel))
out.append(line)
return "\n".join(out) | 651 |
def get_skills_v1():
"""
READING THE FIRST SKILLSET
"""
f = open('skills_v1.json', 'rb')
for a in f:
skills_v1 = ast.literal_eval(a)
f.close()
return skills_v1 | 652 |
def create_mask(board: np.ndarray, dimensions: Tuple[int, int]) -> List[List[int]]:
""" Function to create Mask of possible valid values based on the initial sudoku Board. """
mask = list(board.tolist())
counts = Counter(board.flatten())
del counts[0]
counts = [number[0] for number in counts.most_common()]
most_common_clues = counts
for clue in range(dimensions[0], dimensions[1]):
if clue not in most_common_clues:
most_common_clues.append(clue)
for i, row in enumerate(mask):
if 0 in row:
while 0 in row:
zero_index = row.index(0)
mask[i][zero_index] = []
for number in most_common_clues:
if valid(board, number, (i, zero_index), box_size):
mask[i][zero_index].append(number)
else:
for number in row:
if number != 0:
mask[i][row.index(number)] = {number}
return mask | 653 |
def telegram(text: str, token: str, chat_id: int) -> str:
"""Send a telegram message"""
webhookAddress = f"https://api.telegram.org/bot{token}/sendMessage?" + urlencode({"text":text, "chat_id":chat_id})
handler = urlopen(webhookAddress)
return handler.read().decode('utf-8') | 654 |
def match_term(term, dictionary, case_sensitive, lemmatize=True):
"""
Parameters
----------
term
dictionary
case_sensitive
lemmatize Including lemmas improves performance slightly
Returns
-------
"""
if (not case_sensitive and term.lower() in dictionary) or term in dictionary:
return True
if (case_sensitive and lemmatize) and term.rstrip('s').lower() in dictionary:
return True
elif (not case_sensitive and lemmatize) and term.rstrip('s') in dictionary:
return True
return False | 655 |
def fill_user(user_ids, filename='user', write=True):
"""
Input: user_ids dictionary (user ids: task values)
Output: csv file with user id, name, email
"""
emails = {}
for user in user_ids:
r = requests.get('https://pe.goodlylabs.org'
'/api/user/{}?api_key={}&limit=100'
.format(user, PYBOSSA_API_KEY), headers=headers)
user_info = json.loads(r.text)
emails[user] = [user_info['fullname'], user_info['email_addr']]
if write:
with open('{}.csv'.format(filename), 'w') as f:
writer = csv.writer(f, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
writer.writerow(["id", "name", "email"])
for i in emails:
writer.writerow([i, emails[i][0], emails[i][1]])
return emails | 656 |
def test_update_transaction(
lunch_money_obj: LunchMoney, test_transactions: List[TransactionObject]
):
"""
Update a Transaction in Lunch Money
"""
transaction_note = f"Updated on {datetime.datetime.now()}"
transaction_update_obj = TransactionUpdateObject(notes=transaction_note)
response = lunch_money_obj.update_transaction(
transaction_id=test_transactions[1].id, transaction=transaction_update_obj
)
assert response["updated"] is True | 657 |
def project_along_flow(dX_raw,dY_raw,dX_prio,dY_prio,e_perp):
"""
Parameters
----------
dX_raw : np.array, size=(m,n), dtype=float
raw horizontal displacement with mixed signal
dY_raw : np.array, size=(m,n), dtype=float
raw vertical displacement with mixed signal
dX_prio : np.array, size=(m,n), dtype=float
reference of horizontal displacement (a-priori knowledge)
dY_prio : np.array, size=(m,n), dtype=float
reference of vertical displacement (a-priori knowledge)
e_perp : np.array, size=(2,1), float
vector in the perpendicular direction to the flightline (bearing).
Returns
-------
dX_proj : np.array, size=(m,n), dtype=float
projected horizontal displacement in the same direction as reference.
dY_proj : np.array, size=(m,n), dtype=float
projected vertical displacement in the same direction as reference.
Notes
-----
The projection function is as follows:
.. math:: P = ({d_{x}}e^{\perp}_{x} - {d_{y}}e^{\perp}_{y}) / ({\hat{d}_{x}}e^{\perp}_{x} - {\hat{d}_{y}}e^{\perp}_{y})
See also Equation 10 and Figure 2 in [1].
Furthermore, two different coordinate system are used here:
.. code-block:: text
indexing | indexing ^ y
system 'ij'| system 'xy' |
| |
| i | x
--------+--------> --------+-------->
| |
| |
image | j map |
based v based |
References
----------
.. [1] Altena & Kääb. "Elevation change and improved velocity retrieval
using orthorectified optical satellite data from different orbits"
Remote Sensing vol.9(3) pp.300 2017.
"""
# e_{\para} = bearing satellite...
assert(dX_raw.size == dY_raw.size) # all should be of the same size
assert(dX_prio.size == dY_prio.size)
assert(dX_raw.size == dX_prio.size)
d_proj = ((dX_raw*e_perp[0])-(dY_raw*e_perp[1])) /\
((dX_prio*e_perp[0])-(dY_prio*e_perp[1]))
dX_proj = d_proj * dX_raw
dY_proj = d_proj * dY_raw
return dX_proj,dY_proj | 658 |
def get_existing_pks(engine: Engine, table: Table) -> Mapping[int, dict]:
"""
Creates an index of hashes of the values of the primary keys in the table provided.
:param engine:
:param table:
:return:
"""
with engine.connect() as conn:
pk_cols = [table.c[col.name] for col in table.columns if col.primary_key]
query = select(pk_cols)
result = conn.execute(query)
return {hash_row_els(dict(row), [col.name for col in pk_cols]): dict(row) for row in result} | 659 |
def create_transform_parameters(
fill_mode = 'nearest',
interpolation = 'linear',
cval = 0,
data_format = None,
relative_translation = True,
):
""" Creates a dictionary to store parameters containing information on
method to apply transformation to an image
# Arguments
fill_mode: One of: 'constant', 'nearest', 'reflect', 'wrap'
interpolation: One of: 'nearest', 'linear', 'cubic', 'area', 'lanczos4'
cval: Fill value to use with fill_mode='constant'
data_format: Same as for keras.preprocessing.image_transform.apply_transform
relative_translation: If true (the default), interpret translation as a factor of the image size.
If false, interpret it as absolute pixels.
"""
# Apply processing to input arguments
if data_format is None:
data_format = 'channels_last'
if data_format == 'channels_first':
channel_axis = 0
elif data_format == 'channels_last':
channel_axis = 2
else:
raise ValueError("invalid data_format, expected 'channels_first' or 'channels_last', got '{}'".format(data_format))
if fill_mode == 'constant':
cv_border_mode = cv2.BORDER_CONSTANT
if fill_mode == 'nearest':
cv_border_mode = cv2.BORDER_REPLICATE
if fill_mode == 'reflect':
cv_border_mode = cv2.BORDER_REFLECT_101
if fill_mode == 'wrap':
cv_border_mode = cv2.BORDER_WRAP
if interpolation == 'nearest':
cv_interpolation = cv2.INTER_NEAREST
if interpolation == 'linear':
cv_interpolation = cv2.INTER_LINEAR
if interpolation == 'cubic':
cv_interpolation = cv2.INTER_CUBIC
if interpolation == 'area':
cv_interpolation = cv2.INTER_AREA
if interpolation == 'lanczos4':
cv_interpolation = cv2.INTER_LANCZOS4
# Create attribute dict to store parameters
_p = AttrDict(
fill_mode=fill_mode,
interpolation=interpolation,
cval=cval,
relative_translation=relative_translation,
data_format=data_format,
channel_axis=channel_axis,
cv_border_mode=cv_border_mode,
cv_interpolation=cv_interpolation
)
_p.immutable(True)
return _p | 660 |
def main():
"""Test sampler."""
nx = 40
ny = 40
nchan = 8
C = np.random.rand(ny,nx,nchan)
csum = np.sum(C,2);
for ix in range(nx):
for iy in range(ny):
C[iy,ix,:] /= csum[iy,ix]
print("C = ",C)
iterlist = [10 , 100, 1000, 10000, 100000]
#iterlist = [10000 , 100000]
for niter in iterlist:
G0 = np.zeros([ny,nx,nchan])
for iter in range(niter):
G = pdcglob(nx,ny,nchan,C)
G0 = G0 + G
G0 /= niter
#print G0 - C
print("niter = %8d, dev = %12.4e, relerr = %12.4e" % \
(niter,np.max(np.abs(G0 - C)),np.max(np.abs(G0-C)/C))) | 661 |
def error_rate(model, dataset):
"""Returns error rate for Keras model on dataset."""
d = dataset['dimension']
scores = np.squeeze(model.predict(dataset['features'][:, :, 0:d]), axis=-1)
diff = scores[:, 0] - scores[:, 1]
return np.mean(diff.reshape((-1)) <= 0) | 662 |
def diffs(**kwargs):
"""Log Datadog resources diffs."""
cfg = build_config(**kwargs)
check_diffs(cfg)
if cfg.logger.exception_logged:
exit(1) | 663 |
def test_kovasznay_0():
"""
test kovasznay
"""
standard_value = np.load("./standard/kovasznay.npz", allow_pickle=True)
solution = standard_value['solution'].tolist()
dynamic_rslt = kovasznay(static=False)
static_rslt = kovasznay()
compare(dynamic_rslt, static_rslt)
compare(solution, static_rslt) | 664 |
def k8s_cr_callback(func: Callable) -> Callable:
"""
Decorate a method as a K8s CR callback.
Is working only for K8sCRHandler and child classes.
"""
@functools.wraps(func)
def decorated_func(self, *args, **kwargs):
"""Provide automatic locking of CRs in process by this method."""
# Ensure that signature of K8sCRHandler._callback stays the same
name = args[0]
labels = args[1]
operation = args[2]
blocking = bool(operation != 'REPROCESS')
locked = self.cr_locks[name].acquire(blocking=blocking)
if locked:
_LOGGER.debug(
'CR "%s" locked by operation "%s" with label "%s"', name, operation, labels)
try:
return func(self, *args, **kwargs)
finally:
self.cr_locks[name].release()
_LOGGER.debug(
'CR "%s" unlocked by operation "%s" with label "%s"', name, operation, labels)
# Cleanup lock objects dictionary when CR was deleted
if operation == 'DELETED':
self.cr_locks.pop(name, None)
else:
_LOGGER.debug(
'CR "%s" in process - skipping operation "%s" this run', name, operation)
_LOGGER.debug('Method "%s" is decorated as K8s callback method', func)
return decorated_func | 665 |
def account():
"""Update the user's account"""
return _templates.account(UserContext.user()) | 666 |
def run_synchronously(computation: Awaitable[TSource]) -> TSource:
"""Runs the asynchronous computation and await its result."""
return asyncio.run(computation) | 667 |
def create_running_command(
command_id: str = "command-id",
command_key: str = "command-key",
command_type: str = "command-type",
created_at: datetime = datetime(year=2021, month=1, day=1),
params: Optional[BaseModel] = None,
) -> cmd.Command:
"""Given command data, build a running command model."""
return cast(
cmd.Command,
cmd.BaseCommand(
id=command_id,
key=command_key,
createdAt=created_at,
commandType=command_type,
status=cmd.CommandStatus.RUNNING,
params=params or BaseModel(),
),
) | 668 |
def x11_linux_stop_record():
"""
stop test_record action
"""
return xwindows_listener.stop_record() | 669 |
def yxy_intrinsic(mat: np.ndarray) -> np.ndarray:
"""Return yxy intrinsic Euler angle decomposition of mat (.., 4, 4))"""
# extract components
not_nan, r00, r01, r02, r10, r11, r12, _, r21, _ = extract_mat_components(mat)
# pre-initialize results
theta_y0 = np.full(not_nan.shape, np.nan)
theta_x = np.full(not_nan.shape, np.nan)
theta_y1 = np.full(not_nan.shape, np.nan)
# compute Euler angles
theta_y0[not_nan] = np.where(r11 < 1, np.where(r11 > -1, np.arctan2(-r01, -r21), 0), 0)
theta_x[not_nan] = np.where(r11 < 1, np.where(r11 > -1, -np.arccos(r11), -np.pi), 0)
theta_y1[not_nan] = np.where(r11 < 1, np.where(r11 > -1, np.arctan2(-r10, r12), np.arctan2(r02, r00)),
np.arctan2(r02, r00))
return np.stack((theta_y0, theta_x, theta_y1), -1) | 670 |
def mnemonic_and_path_to_key(*, mnemonic: str, path: str, password: str) -> int:
"""
Return the SK at position `path`, derived from `mnemonic`. The password is to be
compliant with BIP39 mnemonics that use passwords, but is not used by this CLI outside of tests.
"""
seed = get_seed(mnemonic=mnemonic, password=password)
sk = derive_master_SK(seed)
for node in path_to_nodes(path):
sk = derive_child_SK(parent_SK=sk, index=node)
return sk | 671 |
def test_function_with_annotations():
"""Parse a function docstring with signature annotations."""
def f(x: int, y: int, *, z: int) -> int:
"""
This function has annotations.
Parameters:
x: X value.
y: Y value.
Keyword Arguments:
z: Z value.
Returns:
Sum X + Y.
"""
return x + y
sections, errors = parse(inspect.getdoc(f), inspect.signature(f))
assert len(sections) == 4
assert not errors | 672 |
def acceleration(bodies, i, j):
"""
Calculer l'acceleration relative à un objet bodies[i]
bodies: tous les objets
i: index of concerned body which undergoes the gravitation of other objects.
j: index of the step
"""
N = len(bodies)
ax = 0; ay = 0; az = 0 #L'acceleration
for ip in range(N):
#Chaque objet bodies[ip] applique une force de gravitation sur l'objet bodies[i]
if ip == i: #On veut que pas avoir le même objet bodies[ip]
continue
# print(fx(bodies[ip].masse, bodies[i].x[j], bodies[i].y[j]-bodies[ip].y[j], bodies[i].z[j]))
ax += fx(bodies[ip].masse, bodies[i].x[j]-bodies[ip].x[j], bodies[i].y[j]-bodies[ip].y[j], bodies[i].z[j]-bodies[ip].z[j])
ay += fy(bodies[ip].masse, bodies[i].x[j]-bodies[ip].x[j], bodies[i].y[j]-bodies[ip].y[j], bodies[i].z[j]-bodies[ip].z[j])
az += fz(bodies[ip].masse, bodies[i].x[j]-bodies[ip].x[j], bodies[i].y[j]-bodies[ip].y[j], bodies[i].z[j]-bodies[ip].z[j])
return (ax, ay, az) | 673 |
def formatted(s):
"""If s contains substrings of form '#'<txt>'#', '(('<txt>'))',
"''"<txt>"''", returns list of tuples (FORMAT_x, txt).
Otherwise, returns s.
"""
matches = re.findall(_format_re, normalize(s))
if len(matches) == 1 and matches[0][0] != '':
return matches[0][0]
def to_fmt(txt_none, txt_sw, txt_rem, txt_em, txt_a):
if txt_none != '':
return FORMAT_NONE, txt_none
elif txt_sw != '':
return FORMAT_SW, txt_sw
elif txt_rem != '':
return FORMAT_REM, txt_rem
elif txt_em != '':
return FORMAT_EM, txt_em
elif txt_a != '':
return FORMAT_A, txt_a
return [to_fmt(*m) for m in matches] | 674 |
def get_ftp_creds(repo, options):
"""
Retrieves the data to connect to the FTP from .git/ftpdata
or interactively.
ftpdata format example:
[branch]
username=me
password=s00perP4zzw0rd
hostname=ftp.hostname.com
remotepath=/htdocs
ssl=yes
gitftpignore=.gitftpignore
Please note that it isn't necessary to have this file,
you'll be asked for the data every time you upload something.
"""
ftpdata = os.path.join(repo.git_dir, "ftpdata")
options.ftp = FtpData()
cfg = config_parser.ConfigParser()
if os.path.isfile(ftpdata):
get_ftp_creds_from_file(cfg, ftpdata, options, repo)
else:
print("Please configure settings for branch '{0!s}'".format(options.section))
options.ftp.username = input('FTP Username: ')
options.ftp.password = getpass.getpass('FTP Password: ')
options.ftp.hostname = input('FTP Hostname: ')
options.ftp.remotepath = input('Remote Path: ')
if hasattr(ftplib, 'FTP_TLS'):
options.ftp.ssl = ask_ok('Use SSL? ')
else:
logging.warning("SSL not supported, defaulting to no")
# set default branch
if ask_ok("Should I write ftp details to .git/ftpdata? "):
cfg.add_section(options.section)
cfg.set(options.section, 'username', options.ftp.username)
cfg.set(options.section, 'password', options.ftp.password)
cfg.set(options.section, 'hostname', options.ftp.hostname)
cfg.set(options.section, 'remotepath', options.ftp.remotepath)
cfg.set(options.section, 'ssl', options.ftp.ssl)
f = open(ftpdata, 'w')
cfg.write(f) | 675 |
def deep_len(lnk):
""" Returns the deep length of a possibly deep linked list.
>>> deep_len(Link(1, Link(2, Link(3))))
3
>>> deep_len(Link(Link(1, Link(2)), Link(3, Link(4))))
4
>>> levels = Link(Link(Link(1, Link(2)), \
Link(3)), Link(Link(4), Link(5)))
>>> print(levels)
<<<1 2> 3> <4> 5>
>>> deep_len(levels)
5
"""
if not lnk:
return 0
if type(lnk.first) == int:
return 1 + deep_len(lnk.rest)
return deep_len(lnk.first) + deep_len(lnk.rest) | 676 |
def gsearch_node(command,comm):
"""
This function is used to search the string that is obtained from the user's
comment. This function requires the 'googlesearch' module.
"""
print "Currently at the gsearch node"
print command
m = re.search('!gsearch(.+)',command)
print m
if m:
mod_comm = m.group(1)
for x in mod_comm:
if(x == '(' or x == ')'):
mod_comm = mod_comm.replace(x,'')
print mod_comm
#Now mod_comm should be used for the search string on google
url_list = []
for url in search(mod_comm, stop=5):
url_list.append(url)
comm.reply("Here are the top 5 search results for the string '"+mod_comm+"': \n"+
'1.'+url_list[0]+'\n'
'2.'+url_list[1]+'\n'
'3.'+url_list[2]+'\n'
'4.'+url_list[3]+'\n'
'5.'+url_list[4]+'\n') | 677 |
def union(A, B):
""" Add two subspaces (A, B) together.
Args:
- A: a matrix whose columns span subspace A [ndarray].
- B: a matrix whose columns span subspace B [ndarray].
Returns:
- union: a matrix whose columns form the orthogonal basis for subspace
addition A+B [ndarray].
"""
m,n = A.shape
x,y = B.shape
if m != x:
raise Exception('input matrices need to be of same height');
T = np.hstack((A, B))
return image(T) | 678 |
def ldns_create_nsec(*args):
"""LDNS buffer."""
return _ldns.ldns_create_nsec(*args) | 679 |
def str_to_seconds(time):
"""
Returns the number of seconds since midnight in the string time (as an int).
The value time is a string in extended ISO 8601 format. That is, it has the form
'hh:mm:ss' where h, m, and s are digits. There must be exactly two digits each for
hours, minutes, and seconds, so they are padded with 0s when necessary. So
seconds, minutes, and hours may have leading 0s if they are only one digit. For
more information, see
https://en.wikipedia.org/wiki/ISO_8601#Times
This function does not support time zones, abbreviated formats, or decimals
Example: str_to_seconds('12:35:15') returns 45315
Example: str_to_seconds('03:02:05') returns 10925
Example: str_to_seconds('00:00:00') returns 0
Parameter time: The string representation of the time
Precondition: time is a string in extended ISO 8601 format 'hh:mm:ss'
"""
assert type(time) == str
assert len(time) == 8
assert iso_8601(time) == True
result = get_hours(time)*60*60 + get_minutes(time)*60 + get_seconds(time)
return result
# assert iso_8601(time) == True | works but not whats needed
# assert type(time[get_hours(time):get_seconds(time)]) == str | works but not whats needed
# ¬ assert time == str
# change params in fn from time to hr, min, sec | str concatination?
#assert introcs.isdigit(time[0:1+1]) and introcs.isdigit(time[3:4+1]) and introcs.isdigit(time[6:7+1]) == True
#assert type(time[get_hours(time):get_seconds(time)]) == str
#print(time[0:1+1], time[3:4+1], time[6:7+1]) | 680 |
def conv_variance_scaling_initializer(in_channel, out_channel, kernel_size):
"""conv init"""
fan_in = in_channel * kernel_size * kernel_size
scale = 1.0
scale /= max(1., fan_in)
stddev = (scale ** 0.5) / .87962566103423978
mu, sigma = 0, stddev
weight = truncnorm(-2, 2, loc=mu, scale=sigma).rvs(out_channel * in_channel * kernel_size * kernel_size)
weight = np.reshape(weight, (out_channel, in_channel, kernel_size, kernel_size))
return Tensor(weight, dtype=mstype.float32) | 681 |
def vgg8(**kwargs):
"""VGG 8-layer model (configuration "S")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(cfg['S'], **kwargs)
return model | 682 |
def get_char_from_ascii(key_num):
"""Function that converts a character to an ascii code
Parameters
----------
ascii_code : int
Ascii code of character
Returns
-------
char : character
character converted from ascii
"""
return chr(key_num) | 683 |
def testTrade():
"""测试交易"""
try:
f = file('vnpy/trader/gateway/huobiGateway/HUOBI_connect.json')
except IOError:
return
# 解析json文件
setting = json.load(f)
try:
accessKey = str(setting['accessKey'])
secretKey = str(setting['secretKey'])
accountId = str(setting['accountId'])
except KeyError:
return
# 创建API对象并初始化
api = TradeApi()
# api.init(api.HADAX, accessKey, secretKey, mode=api.SYNC_MODE)
api.init(api.HUOBI, accessKey, secretKey, mode=api.SYNC_MODE)
api.start()
# 查询
# print (api.getSymbols())
print (api.getCurrencys())
print (api.getTimestamp())
#online unicode converter
symbol = str(setting['symbols'][0])
# symbol = str(symbols[0]) # 'eop':eos to udtc
print (api.getAccounts())
print (api.getAccountBalance(accountId))
print (api.getOpenOrders(accountId, symbol, 'sell'))
# print (api.getOrders(symbol, 'pre-submitted,submitted,partial-filled,partial-canceled,filled,canceled'))
# print (api.getOrders(symbol, 'filled'))
print (api.getMatchResults(symbol))
print (api.getOrder('2440401255'))
#api.getMatchResult('2440401255')
#api.placeOrder(accountid, '2', symbol, 'sell-market', source='api')
#api.cancelOrder('2440451757')
#api.batchCancel(['2440538580', '2440537853', '2440536765'])
# input() | 684 |
def get_files_path(file_path: str) -> list:
"""Get all files path
Args:
file_path: root folder path
Returns:
list: list of string containing all files paths
"""
filepath='data'
all_files = []
for root, dirs, files in os.walk(filepath):
files = glob.glob(os.path.join(root,'*.json'))
for f in files:
all_files.append(f)
return all_files | 685 |
def start():
"""
view for data entry for optimisation
"""
form = LocationForm()
if form.validate_on_submit():
return optimise(form.data)
return flask.render_template("start.html",
title="Start", form=form) | 686 |
def add_lldp_filter_by_host(query, hostid):
"""Adds a lldp-specific ihost filter to a query.
Filters results by host id if supplied value is an integer,
otherwise attempts to filter results by host uuid.
:param query: Initial query to add filter to.
:param hostid: host id or uuid to filter results by.
:return: Modified query.
"""
if utils.is_int_like(hostid):
return query.filter_by(host_id=hostid)
elif utils.is_uuid_like(hostid):
query = query.join(models.Hosts)
return query.filter(models.Hosts.uuid == hostid)
LOG.debug("lldp_filter_by_host: "
"No match for supplied filter id (%s)" % str(hostid)) | 687 |
def read_shakemap_data_from_str(grid_data_text):
"""
Helper to work with the tokens.
Can work with both strings and floats.
"""
# it must be tokenized (because of xml processing the newlines
# may not be consistent)
tokens = tokenize.tokenize(
io.BytesIO(grid_data_text.encode("utf-8")).readline
)
token_before = None
for token in tokens:
# 2 is number
if token.type == 2:
value = float(token.string)
if token_before is not None and token_before.string == "-":
value = -1 * value
yield value
# 3 is str
elif token.type == 3:
raw_value = token.string
# remove quotes around
value = raw_value[1:-1]
yield value
# take care about the before token for negative numbers
token_before = token | 688 |
def c_flag(opt, test_not=False):
""" convert a test parameter into t if true for the Fortran build system """
if test_not:
if opt: return "FALSE"
else: return "TRUE"
else:
if opt: return "TRUE"
else: return "FALSE" | 689 |
def create_menu(*args):
"""
Add an item into the Maya interface.
"""
maya_window = get_maya_main_window()
menu = mc.menu("Kitsu", parent=maya_window)
mc.menuItem(label="Launch Kitsu", command=launch_kitsu, parent=menu) | 690 |
def test_uniform_simplex_homotopy(sarr):
"""Test homotopy"""
uniform = sarr.uniform_mixture()
simp = sarr.mixture_to_simplex(uniform)
assert np.allclose(simp[0], simp[1:])
assert np.allclose(uniform, sarr.mixture_from_simplex(simp)) | 691 |
def false_function():
"""Sample function to test unit testing."""
return False | 692 |
def broker_task_send(task_uuid, request, broker_point, reply_to=None):
"""Command to publish `primitives.Request` to customer
Args:
task_uuid(str): task identification
request: Serialized request
broker_point(gromozeka.BrokerPoint):
reply_to(gromozeka.BrokerPoint):
Returns:
Command:
"""
return Command(command=BROKER_TASK_SEND,
args={'task_uuid': task_uuid, 'request': request, 'broker_point': broker_point,
'reply_to': reply_to}).as_tuple() | 693 |
def process_axis_labels(datadesc, blobs, offset=0):
"""Convert the raw axis label descriptions.
Similar to LiveDataPanel._process_axis_labels, but is flexible in datadesc.
"""
CLASSIC = {'define': 'classic'}
labels = {}
titles = {}
for size, axis in zip(reversed(datadesc['shape']), AXES):
# if the 'labels' key does not exist or does not have the right
# axis key set default to 'classic'.
label = datadesc.get(
'labels', {'x': CLASSIC, 'y': CLASSIC}).get(axis, CLASSIC)
if label['define'] == 'range':
start = label.get('start', 0)
size = label.get('length', 1)
step = label.get('step', 1)
end = start + step * size
labels[axis] = numpy.arange(start, end, step)
elif label['define'] == 'array':
index = label.get('index', 0)
labels[axis] = numpy.frombuffer(blobs[index],
label.get('dtype', '<i4'))
else:
labels[axis] = numpy.array(range(size))
labels[axis] += offset if axis == 'x' else 0
titles[axis] = label.get('title')
return labels, titles | 694 |
def _to_ranks_by_group(dat, group, formula, exclude_cols=[]):
"""
Covert predictors to ranks separately for each group for use in rank Lmer. Any columns not in the model formula or in exclude_cols will not be converted to ranks. Used by models.Lmer
Args:
dat (pd.DataFrame): dataframe of data
group (string): string name of column to group data on
formula (string): Lmer flavored model formula with random effects
exclude_cols (list): optional columns that are part of the formula to exclude from rank conversion.
Returns:
pandas.core.frame.DataFrame: ranked data
"""
if (not isinstance(group, str)) and (group not in dat.columns):
raise TypeError(
"group must be a valid column name in the dataframe. Currently only 1 grouping variable is supported."
)
if isinstance(exclude_cols, str):
exclude_cols = [exclude_cols]
original_col_order = list(dat.columns)
formula = formula.replace(" ", "")
to_rank = formula.split("~")[-1].split("(")[0].split("+")[:-1]
# add dv to be ranked
to_rank.append(formula.split("~")[0])
to_rank = [c for c in to_rank if c not in exclude_cols]
other_cols = [c for c in dat.columns if c not in to_rank]
dat = pd.concat(
[dat[other_cols], dat.groupby(group).apply(lambda g: g[to_rank].rank())], axis=1
)
return dat[original_col_order] | 695 |
def build_input_data(sentences, labels, vocabulary):
"""
Maps sentencs and labels to vectors based on a vocabulary.
"""
# With capped vocab, need to account for word not present in
# vocab. Using the padding word.
# TODO -- pass padding word in as an arg
padding_word = "<PAD/>"
pad_idx = vocabulary[padding_word]
x = np.array(
[[vocabulary.get(word, pad_idx) for word in sentence] for sentence in sentences])
y = np.array(labels)
return [x, y] | 696 |
def list_scans():
"""
:return: A JSON containing a list of:
- Scan resource URL (eg. /scans/1)
- Scan target
- Scan status
"""
data = []
for scan_id, scan_info in SCANS.iteritems():
if scan_info is None:
continue
target_urls = scan_info.target_urls
status = scan_info.w3af_core.status.get_simplified_status()
errors = True if scan_info.exception is not None else False
data.append({'id': scan_id,
'href': '/scans/%s' % scan_id,
'target_urls': target_urls,
'status': status,
'errors': errors})
return jsonify({'items': data}) | 697 |
def factor_list(f, *gens, **args):
"""
Compute a list of irreducible factors of ``f``.
**Examples**
>>> from sympy import factor_list
>>> from sympy.abc import x, y
>>> factor_list(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
(2, [(x + y, 1), (1 + x**2, 2)])
"""
return _generic_factor_list(f, gens, args, method='factor') | 698 |
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = dist.get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list | 699 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.