content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def _build_timecode(time, fps, drop_frame=False, additional_metadata=None):
"""
Makes a timecode xml element tree.
.. warning:: The drop_frame parameter is currently ignored and
auto-determined by rate. This is because the underlying otio timecode
conversion assumes DFTC based on rate.
:param time: The :class: `opentime.RationalTime` for the timecode.
:param fps: The framerate for the timecode.
:param drop_frame: If True, generates drop-frame timecode.
:param additional_metadata: A dictionary with other metadata items like
``field``, ``reel``, ``source``, and ``format``. It is assumed this
dictionary is of the form generated by :func:`_xml_tree_to_dict` when
the file was read originally.
:return: The ``timecode`` element.
"""
if additional_metadata:
# Only allow legal child items for the timecode element
filtered = {
k: v for k, v in additional_metadata.items()
if k in ("field", "reel", "source", "format")
}
tc_element = _dict_to_xml_tree(filtered, "timecode")
else:
tc_element = cElementTree.Element("timecode")
tc_element.append(_build_rate(fps))
rate_is_not_ntsc = (tc_element.find('./rate/ntsc').text == "FALSE")
if drop_frame and rate_is_not_ntsc:
tc_fps = fps * (1000 / 1001.0)
else:
tc_fps = fps
# Get the time values
tc_time = opentime.RationalTime(time.value_rescaled_to(fps), tc_fps)
tc_string = opentime.to_timecode(tc_time, tc_fps, drop_frame)
_append_new_sub_element(tc_element, "string", text=tc_string)
frame_number = int(round(time.value))
_append_new_sub_element(
tc_element, "frame", text="{:.0f}".format(frame_number)
)
drop_frame = (";" in tc_string)
display_format = "DF" if drop_frame else "NDF"
_append_new_sub_element(tc_element, "displayformat", text=display_format)
return tc_element | 1,700 |
def transform(nodes, fxn, *args, **kwargs):
"""
Apply an arbitrary function to an array of node coordinates.
Parameters
----------
nodes : numpy.ndarray
An N x M array of individual node coordinates (i.e., the
x-coords or the y-coords only)
fxn : callable
The transformation to be applied to the whole ``nodes`` array
args, kwargs
Additional positional and keyword arguments that are passed to
``fxn``. The final call will be ``fxn(nodes, *args, **kwargs)``.
Returns
-------
transformed : numpy.ndarray
The transformed array.
"""
return fxn(nodes, *args, **kwargs) | 1,701 |
def create_input(
basedir, pertdir, latout=False, longwave=False, slc=slice(0, None, None)
):
"""Extract variables from a given directory and places into dictionaries.
It assumes that base and pert are different directories and only one
experiment output is present in each directory.
Slicing into time chunks is allowed and providing the filenames
follow CMIP6 convention they should be concatenated in the correct
order.
Variables required are rsdt, rsus, rsds, clt, rsdscs, rsuscs, rsut, rsutcs
An error will be raised if variables are not detected.
Parameters
----------
basedir : str
Directory containing control climate simulation variables
pertdir : str
Directory containing perturbed climate simulation variables
latout : bool, default=False
if True, include array of latitude points in the output.
longwave : bool, default=False
if True, do the longwave calculation using cloud radiative effect, in
addition to the shortwave calculation using APRP.
slc: `slice`, optional
Slice of indices to use from each dataset if not all of them.
Returns
-------
base, pert : dict of array_like of variables needed for APRP from control
pert: dict of variables needed for APRP from experiment
[lat]: latitude points relating to axis 1 of arrays
"""
base = {}
pert = {}
if longwave:
varlist = [
"rsdt",
"rsus",
"rsds",
"clt",
"rsdscs",
"rsuscs",
"rsut",
"rsutcs",
"rlut",
"rlutcs",
]
else:
varlist = ["rsdt", "rsus", "rsds", "clt", "rsdscs", "rsuscs", "rsut", "rsutcs"]
def _extract_files(filenames, var, directory):
if len(filenames) == 0:
raise RuntimeError(
f"No variables of name {var} found in directory {directory}"
)
for i, filename in enumerate(filenames):
ncfile = Dataset(filename)
invar = ncfile.variables[var][slc, ...]
lat = ncfile.variables["lat"][:]
ncfile.close()
if i == 0:
outvar = invar
else:
# This works for me with CMIP6 netcdfs, but we don't have a small
# example to test with
outvar = np.append(outvar, invar, axis=0) # pragma: nocover
return outvar, lat
for var in varlist:
filenames = sorted(glob.glob(f"{basedir}/{var}_*.nc"))
base[var], lat = _extract_files(filenames, var, basedir)
filenames = sorted(glob.glob(f"{pertdir}/{var}_*.nc"))
pert[var], lat = _extract_files(filenames, var, pertdir)
if latout:
return base, pert, lat
return base, pert | 1,702 |
def get_tax_proteins(tax_id, tax_prot_dict, prot_id_dict, gbk_dict, cache_dir, args):
"""Get the proteins linked to a tax id in NCBI, and link the tax id with the local db protein ids
:param tax_id: str, NCBI tax db id
:param tax_prot_dict: {ncbi tax id: {local db protein ids}}
:param prot_id_dict: dict {protein ncbi id: prot acc}
:param gbk_dict: dict, {prot acc: local db id}
:param cache_dir: Path, path to cache dir
:param args: cmd-line args parser
Return dict {tax_id: {local db protein ids}} and bool (True=success, False=failed)
"""
logger = logging.getLogger(__name__)
try:
with entrez_retry(
args.retries,
Entrez.elink,
id=tax_id,
db="Protein",
dbfrom="Taxonomy",
linkname="taxonomy_protein",
) as handle:
tax_links = Entrez.read(handle, validate=False)
except (AttributeError, TypeError, RuntimeError) as err:
logger.warning(f"Failed to link NCBI tax id to NCBI Protein db for tax id {tax_id}\n{err}")
return tax_prot_dict, False
try:
tax_prot_dict[tax_id]
except KeyError:
tax_prot_dict[tax_id] = set()
for result in tax_links:
for item in result['LinkSetDb']:
links = item['Link']
for link in links:
linked_prot_id = link['Id']
# check if from the local database
try:
prot_ver_acc = prot_id_dict[linked_prot_id]
except KeyError:
continue
try:
prot_local_db_id = gbk_dict[prot_ver_acc]
except KeyError:
logger.error(
"Did not previously retrieved data from the local "
f"db for {prot_local_db_id}\n"
"Caching and skipping protein"
)
with open((cache_dir/"failed_local_db_retrieval.out"), "a") as fh:
fh.write(f"{prot_local_db_id}\n")
continue
tax_prot_dict[tax_id].add(prot_local_db_id)
return tax_prot_dict, True | 1,703 |
def define_features_vectorizer(columns, training_data, testing_data = None, ngramrange=(1,1)):
"""
Define the features for classification using CountVectorizer.
Parameters
----------
column: String or list of strings if using multiple columns
Names of columns of df that are used for trainig the classifier
training_data: Pandas dataframe
The dataframe containing the training data for the classifier
testing_data: Pandas dataframe
The dataframe containing the testing data for the classifier
ngramrange: tuple (min_n, max_n), with min_n, max_n integer values
range for ngrams used for vectorization
Returns
-------
vectorizer: sklearn CountVectorizer
CountVectorizer fit and transformed for training data
training_features: sparse matrix
Document-term matrix for training data
testing_features: sparse matrix
Document-term matrix for testing data
"""
#intialise Countvectorizer and fit transform to data
vectorizer = CountVectorizer(ngram_range = ngramrange)
vectorizer.fit_transform(training_data[columns].values)
#build matrixes for training_features and testing_features
training_features=vectorizer.transform(training_data[columns].values)
if testing_data is not None:
testing_features=vectorizer.transform(testing_data[columns].values)
else:
testing_features = None
return vectorizer, training_features, testing_features | 1,704 |
def resolve_4d_input_blob(
hparams: tf.contrib.training.HParams,
runtime_bs: tf.Tensor,
features: dict,
feature_columns: list,
info_log: bool = False,
) -> tf.Tensor:
"""Convert a dict feature input to a 4D input cube with dimension (NHWC).
this function is experimental.
Arguments:
hparams {tf.contrib.training.HParams} -- hyper parameters
runtime_bs {tf.Tensor} -- the batch_size in runtime
features {dict} -- the dict of feature
feature_columns {list} -- the list of feature columns
Keyword Arguments:
info_log {bool} -- True to enable debugging info logging (default: {False})
Returns:
tf.Tensor -- The created 4D input Tensor
"""
# initialize vars
frame_shape = resolve_simple_number_array(hparams, "frameShape")
batch_input_shape_tensor = tf.convert_to_tensor([runtime_bs] + frame_shape)
padding = resolve_simple_number_array(hparams, "padding")
# Process time-series and non-time-series features one by one
feature_list = []
for key in sorted(features):
cur_feature = features[key]
is_array, _ = check_array_feature(cur_feature)
# build ts feature planes
if is_array:
# padding
if sum(padding) > 0:
padding_tensor = tf.constant([[0, 0], padding])
cur_feature = tf.pad(
cur_feature, padding_tensor, mode="CONSTANT", constant_values=0
)
# reshape
cur_feature = tf.reshape(cur_feature, batch_input_shape_tensor)
# cast to float
if cur_feature.dtype != tf.float32:
cur_feature = tf.cast(cur_feature, dtype=tf.float32)
# add to list with added channel dim (NHWC)
feature_list.append(cur_feature[:, :, :, tf.newaxis])
# log ts feature
if info_log:
tf.logging.info("{}: {}".format(key, cur_feature))
# build non-ts feature planes (Numerical Features)
# note that we treat SparseTensor and Tensor with dtype=string as categorical features
elif type(cur_feature) is tf.Tensor and cur_feature.dtype.name != "string":
# tiling
cur_feature = tf.tile(
cur_feature[:, tf.newaxis], [1, frame_shape[0] * frame_shape[1]]
)
# reshape
cur_feature = tf.reshape(cur_feature, batch_input_shape_tensor)
# cast to float
if cur_feature.dtype != tf.float32:
cur_feature = tf.cast(cur_feature, dtype=tf.float32)
# add to list with added channel dim (NHWC)
feature_list.append(cur_feature[:, :, :, tf.newaxis])
# log numerical feature
if info_log:
tf.logging.info("{}: {}".format(key, cur_feature))
# build non-ts feature planes (Categorical Features)
else:
cur_feature = tfc.input_layer(
{key: cur_feature}, find_feature_column(key, feature_columns)
)
# padding
cur_feature = tf.tile(
cur_feature[:, :, tf.newaxis], [1, 1, frame_shape[0] * frame_shape[1]]
)
# split
cur_features = tf.split(
cur_feature, axis=1, num_or_size_splits=cur_feature.shape[1]
)
# process each feature plane
for entry in cur_features:
# reshape
entry = tf.reshape(entry, batch_input_shape_tensor)
# cast to float
if entry.dtype != tf.float32:
entry = tf.cast(entry, dtype=tf.float32)
# add to list with added channel dim (NHWC)
feature_list.append(entry[:, :, :, tf.newaxis])
# log categorical feature plane
if info_log:
tf.logging.info("{}: {}".format(key, entry))
# channel stacking
data = tf.concat(feature_list, -1)
# interpolation
interp = resolve_simple_number_array(hparams, "interp")
if interp is not None and interp != frame_shape:
data = tf.image.resize_images(data, tf.convert_to_tensor(interp))
return data | 1,705 |
def GetPostgreSQLLoginInfo():
"""
* Get database login information from pem file
"""
passfile = '/mnt/data/other/pem/sinnud_pg.dat'
with open(passfile, 'r') as f:
passinfo = f.read().strip()
(host, user, dbname, password, port) = passinfo.split()
if os.path.isfile(passfile):
return (True, (host, user, dbname, password, port))
return (False, None) | 1,706 |
def trace_condition(variables: TemplateVarsType) -> Generator:
"""Trace condition evaluation."""
trace_element = condition_trace_append(variables, trace_path_get())
trace_stack_push(trace_stack_cv, trace_element)
try:
yield trace_element
except Exception as ex:
trace_element.set_error(ex)
raise ex
finally:
trace_stack_pop(trace_stack_cv) | 1,707 |
def download_pictures(recent_seach_tweets):
"""
Download pictures from tweets
:param recent_seach_tweets: list of dictionnaries
"""
# Downloading pictures
print('%s - Downloading %d tweets' % (datetime.datetime.now().strftime('%d/%m/%Y - %H:%M'), len(recent_seach_tweets)))
for tw in recent_seach_tweets:
img_url = tw['images'][0]
filename = tw['text'][:tw['text'].index("#")-1].lower().replace(' ','_')
filename = "./downloaded_pics/%s.jpg" % filename
urllib.request.urlretrieve(img_url, filename) | 1,708 |
def show_graph_unique_not_callback(n_clicks, input_box):
""" Function which is called by a wrapped function in another module. It takes
user input in a text box, returns a graph if the query produces a hit in Solr.
Returns an error message otherwise.
ARGUMENTS: n_clicks: a parameter of the HTML button which indicates it has
been clicked
input_box: the content of the text box in which the user has
entered a comma-separated search query.
RETURNS: 1 graph (unique occurrences) of all terms which have results
from Solr """
# Store the layout with the appropriate title and y axis labels for the graph
layout_unique = go.Layout(
title = 'Percentage of papers containing chosen entity mention(s) per Month',
xaxis = {'title': 'Publication date', 'tickformat': '%b %y', 'tick0': '2007-04-30',
'dtick': 'M2', 'range': ['2007-03-25', '2018-01-25'], 'titlefont': {'size': 20}, 'tickfont': {'size': 15}},
yaxis = {'title': 'Percentage of papers with entity mention', 'ticksuffix': '%', 'titlefont': {'size': 19}, 'tickfont': {'size': 18}},
plot_bgcolor = colours['background'],
paper_bgcolor = colours['background'],
barmode = 'stack',
hovermode = 'closest',
font= {
'color': colours['text'],
'size': 15
},
showlegend=True,
legend = {'font': {'size': 18}, 'x': 0, 'y': -0.5, 'orientation': 'h'}
)
if input_box != '':
# Get the input data: both freq_df dfs will have index= published_date,
# columns = percentage_occurrences unique.
input_list = input_box.lower().split(',')
data_list_unique = []
notfound_list = []
for input_val in input_list:
# Make sure to strip input_val, otherwise if the user enters a
# space after the comma in the query, this space will get sent
# to Solr.
input_val = input_val.strip()
# If the search phrase doesn't start with the wikipedia url, it is a
# noun phrase which has to be converted to a URL
if not input_val.startswith('http://en.wikipedia.org/wiki'):
input_val = convert_phrase_to_url(input_val)
freq_df_total, freq_df_unique = get_aggregated_data(input_val)
if freq_df_unique is not None:
# Plot the graphs, published_date (index) goes on the x-axis,
# and percentage_occurrences (unique) goes on the y-axis.
data_list_unique.append(go.Bar(
x = freq_df_unique.index,
y = freq_df_unique.percentage_occurrences,
text = input_val.strip(), # hover text
opacity = 0.7,
name = input_val.strip() # legend text
))
else:
# Term not found, append it to the not found list and go to the
# next term.
notfound_list.append(input_val)
if data_list_unique == []:
if notfound_list != []:
# Append the error message for the terms not found in the
# Solr index
# return html.Br()
return not_found_message(notfound_list)
# One or more of the Solr queries returned a result
else:
graph_unique_terms = {'data': data_list_unique, 'layout': layout_unique}
if notfound_list != []:
terms_not_found = not_found_message(notfound_list)
#return terms_not_found, html.Br(),
return terms_not_found, dcc.Graph(id='uniquefreq', figure= graph_unique_terms)
return html.Br(), dcc.Graph(id='uniquefreq', figure= graph_unique_terms) | 1,709 |
def _add_string_datatype(graph, length):
"""Add a custom string datatype to the graph refering.
Args:
graph (Graph): The graph to add the datatype to
length (int): The maximim length of the string
Returns:
URIRef: The iri of the new datatype
"""
iri = rdflib_cuba[f"_datatypes/STRING-{length}"]
triple = (iri, RDF.type, RDFS.Datatype)
if graph is None or triple in graph:
return iri
graph.add(triple)
# length_triple = (iri, rdflib_cuba._length, Literal(int(length)))
# graph.add(length_triple)
return iri | 1,710 |
def cercle(x, y, r, color='black'):
"""tracé d'un cercle de centre (x,y) et de rayon r"""
MonCanvas.create_oval(x - r, y - r, x + r, y + r, outline=color) | 1,711 |
def get_selected_shipping_country(request):
"""Returns the selected shipping country for the passed request.
This could either be an explicitely selected country of the current
user or the default country of the shop.
"""
customer = customer_utils.get_customer(request)
if customer:
if customer.selected_shipping_address:
return customer.selected_shipping_address.country
elif customer.selected_country:
return customer.selected_country
return lfs.core.utils.get_default_shop(request).get_default_country() | 1,712 |
def func_item_iterator_next(*args):
"""
func_item_iterator_next(fii, testf, ud) -> bool
"""
return _ida_funcs.func_item_iterator_next(*args) | 1,713 |
def find_score_maxclip(tp_support, tn_support, clip_factor=ut.PHI + 1):
"""
returns score to clip true positives past.
Args:
tp_support (ndarray):
tn_support (ndarray):
Returns:
float: clip_score
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.score_normalization import * # NOQA
>>> tp_support = np.array([100, 200, 50000])
>>> tn_support = np.array([10, 30, 110])
>>> clip_score = find_score_maxclip(tp_support, tn_support)
>>> result = str(clip_score)
>>> print(result)
287.983738762
"""
max_true_positive_score = tp_support.max()
max_true_negative_score = tn_support.max()
if clip_factor is None:
clip_score = max_true_positive_score
else:
overshoot_factor = max_true_positive_score / max_true_negative_score
if overshoot_factor > clip_factor:
clip_score = max_true_negative_score * clip_factor
else:
clip_score = max_true_positive_score
return clip_score | 1,714 |
def rho_prof_out(cluster, fileout, **kwargs):
"""Write density profile to file
Parameters
----------
cluster : class
StarCluster
fileout : file
opened file to write data to
Returns
-------
None
Other Parameters
----------------
kwargs : str
key word arguments for rho_prof
History
-------
2018 - Written - Webb (UofT)
"""
fileout.write("%f " % (cluster.tphys))
rprof, pprof, nprof=rho_prof(cluster,**kwargs)
for r in rprof:
fileout.write("%f " % (r))
for p in pprof:
fileout.write("%f " % (p))
for n in nprof:
fileout.write("%f " % (n))
fileout.write("\n") | 1,715 |
def create_sponsor():
"""
Creates a new sponsor.
---
tags:
- sponsor
summary: Create sponsor
operationId: create_sponsor
requestBody:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/Sponsor'
- type: object
multipart/form-data:
schema:
type: object
properties:
sponsor:
deprecated: true
allOf:
- $ref: '#/components/schemas/Sponsor'
- type: object
description: >
Deprecated,
do not use `multipart/form-data`,
use `application/json`.
properties:
encoding:
sponsor:
contentType: application/json
description: Created sponsor Object
required: true
responses:
201:
description: OK
400:
description: Bad request.
409:
description: Sorry, that sponsor already exists.
5XX:
description: Unexpected error.
"""
if "multipart/form-data" in request.content_type:
try:
data = json.loads(request.form.get("sponsor"))
except JSONDecodeError:
raise BadRequest("Invalid JSON sent in sponsor form part.")
elif request.content_type == "application/json":
data = request.get_json()
else:
raise UnsupportedMediaType()
if not data:
raise BadRequest()
try:
sponsor = Sponsor.createOne(**data)
sponsor.save()
except NotUniqueError:
raise Conflict("Sorry, that sponsor already exists.")
except ValidationError:
raise BadRequest()
res = {
"status": "success",
"message": "sponsor was created!"
}
res = make_response(res)
if "multipart/form-data" in request.content_type:
res.headers["Deprecation"] = (
"The use of multipart/form-data is deprecated. ")
if "socials" in data:
res.headers["Deprecation"] = (
"The socials field is deprecated use sponsor_website instead")
return res, 201 | 1,716 |
def download_or_copy(uri, target_dir, fs=None) -> str:
"""Downloads or copies a file to a directory.
Downloads or copies URI into target_dir.
Args:
uri: URI of file
target_dir: local directory to download or copy file to
fs: if supplied, use fs instead of automatically chosen FileSystem for
uri
Returns:
the local path of file
"""
local_path = download_if_needed(uri, target_dir, fs=fs)
shutil.copy(local_path, target_dir)
return local_path | 1,717 |
def main():
""" Handle command-line invocation of this script """
# TODO: read directory, num_annotators and coverage_level as
# command-line parameters
gen_spreadsheets(3, 1, 'data-20150519/') | 1,718 |
async def test_sqlite_import(hass, client, caplog, monkeypatch):
"""Test import from sqlite keys file."""
m_open = mock_open(read_data="will raise JSONDecodeError")
monkeypatch.setattr(client, "is_connected", Mock(return_value=True))
monkeypatch.setattr(client, "connect", Mock(return_value=True))
with patch(
"homeassistant.components.webostv.os.path.isfile", Mock(return_value=True)
), patch("homeassistant.components.webostv.open", m_open, create=True), patch(
"homeassistant.components.webostv.db.create_engine",
side_effect=create_memory_sqlite_engine,
):
await setup_legacy_component(hass)
assert "imported from YAML config" in caplog.text
assert is_entity_unique_id_updated(hass) | 1,719 |
def test_simplest_numbers_generator(test):
"""To test the simplest_numbers_generator, use:
test_simplest_numbers_generator("for")
-- to see the generator behaviour on for loop
test_simplest_numbers_generator("next")
-- to see the generator behaviour on next() call
"""
print_header("Testing simplest_numbers_generator on {}".format(test), "#", 50)
# create num_gen generator
num_gen = simplest_numbers_generator()
# print( num_gen.__dir__() )
if test == "for":
for i in num_gen:
print(i)
elif test == "next":
# ask the num_gen for 2 values:
print( next(num_gen) )
print( next(num_gen) )
# ask num_gen for more values that it can yield
print( next(num_gen) ) | 1,720 |
def main(session):
""" Parse command line arguments, run recordData
and write the results into a csv file.
"""
# Get the services ALMemory and ALMotion.
memory_service = session.service("ALMemory")
motion_service = session.service("ALMotion")
# Set stiffness on for Head motors
motion_service.setStiffnesses("Head", 1.0)
# Will go to 1.0 then 0 radian in two seconds
motion_service.angleInterpolation(
["HeadYaw"],
[1.0, 0.0],
[1 , 2],
False,
_async=True
)
data = recordData(memory_service)
# Gently set stiff off for Head motors
motion_service.setStiffnesses("Head", 0.0)
output = os.path.abspath("record.csv")
with open(output, "w") as fp:
for line in data:
fp.write("; ".join(str(x) for x in line))
fp.write("\n")
print "Results written to", output | 1,721 |
def startup():
""" Starts everything up """
settings = {
'telegram_token': os.environ.get('TELEGRAM_TOKEN'),
'telegram_chat_id': os.environ.get('TELEGRAM_CHAT_ID'),
'gotify_url': os.environ.get('GOTIFY_URL'),
'gotify_token': os.environ.get('GOTIFY_TOKEN'),
'port': int(os.environ.get('PORT', '8899')),
'host': os.environ.get('ADDRESS', '*'),
'telegram_template': os.environ.get('TELEGRAM_TEMPLATE', 'html.j2'),
'gotify_template': os.environ.get('GOTIFY_TEMPLATE', 'markdown.md.j2'),
'null_template': os.environ.get('NULL_TEMPLATE', 'text.j2'),
'exclude_labels': os.environ.get('EXCLUDE_LABELS'),
'notifiers': [],
}
if settings['telegram_token'] and settings['telegram_chat_id']:
settings['notifiers'].append('telegram')
if settings['gotify_url'] and settings['gotify_token']:
settings['notifiers'].append('gotify')
log.info(f"Starting {__package__} {version}, listening on {settings['host']}:{settings['port']}")
return settings | 1,722 |
def as_yaml(config: Dict[str, Any], **yaml_args: Any) -> str:
"""Use PyYAML library to write YAML file"""
return yaml.dump(config, **yaml_args) | 1,723 |
def retrieve(filen,start,end):
"""Retrieve a block of text from a file.
Given the name of a file 'filen' and a pair of start and
end line numbers, extract and return the text from the
file.
This uses the linecache module - beware of problems with
consuming too much memory if the cache isn't cleared."""
text = ""
# Check for consistency and validity of lines
if start < 0 and end < 0 or end < start:
return ""
# Fetch from a file if possible
if os.path.isfile(filen):
try:
for i in range(start,end+1):
text = text+str(linecache.getline(filen,i))
return text
except Exception:
print "Exception raised in retrieve method:"
print "\tSource file = \""+str(filen)+"\""
print "\tStart line = "+str(start)
print "\tEnd line = "+str(end)
print "\tCurrent line = "+str(i)
raise
# Otherwise return nothing
return "" | 1,724 |
def test_file_groups_multiple_levels_nested_work_on_and_protect_dirs_with_pattern_and_debug(duplicates_dir, capsys):
"""Multiple protect dirs
Protect:
ki1 -> 6
ki1/df -> 3
ki1/df/ki12 -> 4
ki1/df/ki13 -> 4
ki1/df/ki13/ki14 -> 4
df2 -> 3
total: 24
Work_On:
ki1/df -> 4
ki1/df/ki13/df12 -> 4
df2 -> 4
total: 12
"""
kargs = ["ki1", "ki1/df/ki12", "ki1/df/ki13", "ki1/df/ki13/ki14"]
dargs = ["df2", "ki1/df", "ki1/df/ki13/df12"]
# Exclude from protext filenames where the 'stem' ends with 4 or 1
# Include in work_on filenames where the 'stem' ends with 1
with FGC(FileGroups(kargs, dargs, protect_exclude=re.compile(r'.*[41](\..*)?$'), work_include=re.compile(r'.*1(\..*)?$'), debug=True), duplicates_dir) as ck:
assert ck.ckfl(
'must_protect.files',
'ki1/df/ki12/f12', 'ki1/df/ki12/f22',
'ki1/df/ki12/f512', 'ki1/df/ki13/f12', 'ki1/df/ki13/f22',
'ki1/df/ki13/ki14/f12', 'ki1/df/ki13/ki14/f22', 'ki1/df/ki13/ki14/fffff52.txt',
'ki1/f12', 'ki1/f22', 'ki1/f512')
assert ck.ckfl(
'may_work_on.files',
'df2/f11', 'df2/f21', 'df2/f41',
'ki1/df/f11', 'ki1/df/f21', 'ki1/df/f31', 'ki1/df/f41',
'ki1/df/ki13/df12/f11', 'ki1/df/ki13/df12/f21')
ck.fg.stats()
out, _ = capsys.readouterr()
assert 'collected protect_directories:' in out
assert 'collected protect_directory_symlinks:' in out
assert 'collected work_on_directories:' in out
assert 'collected work_on_directory_symlinks:' in out
assert 'collected must_protect_files:' in out
assert 'collected must_protect_symlinks:' in out
assert 'collected may_work_on_files:' in out
assert 'collected may_work_on_symlinks:' in out
pytest.xfail('TODO: directory counts') | 1,725 |
def assert_equal(actual: numpy.bool_, desired: numpy.ndarray):
"""
usage.dask: 1
"""
... | 1,726 |
def L1_Charbonnier_loss(predict, real):
"""
损失函数
Args:
predict: 预测结果
real: 真实结果
Returns:
损失代价
"""
eps = 1e-6
diff = tf.add(predict, -real)
error = tf.sqrt(diff * diff + eps)
loss = tf.reduce_mean(error)
return loss | 1,727 |
def correction_byte_table_h() -> dict[int, int]:
"""Table of the number of correction bytes per block for the correction
level H.
Returns:
dict[int, int]: Dictionary of the form {version: number of correction
bytes}
"""
table = {
1: 17, 2: 28, 3: 22, 4: 16, 5: 22, 6: 28, 7: 26, 8: 26, 9: 24, 10: 28,
11: 24, 12: 28, 13: 22, 14: 24, 15: 24, 16: 30, 17: 28, 18: 28, 19: 26,
20: 28, 21: 30, 22: 24, 23: 30, 24: 30, 25: 30, 26: 30, 27: 30, 28: 30,
29: 30, 30: 30, 31: 30, 32: 30, 33: 30, 34: 30, 35: 30, 36: 30, 37: 30,
38: 30, 39: 30, 40: 30
}
return table | 1,728 |
def stash_rename(node_id, new_name):
"""Renames a node."""
return stash_invoke('rename', node_id, new_name) | 1,729 |
def _xor(cpu_context: ProcessorContext, instruction: Instruction):
""" XOR """
operands = instruction.operands
opvalue1 = operands[0].value
opvalue2 = operands[1].value
width = get_max_operand_size(operands)
result = opvalue1 ^ opvalue2
cpu_context.registers.cf = 0
cpu_context.registers.zf = int(result == 0)
cpu_context.registers.sf = utils.sign_bit(result, width)
cpu_context.registers.of = 0
cpu_context.registers.pf = get_parity(result)
if cpu_context.emulator.branch_tracking:
cpu_context.jcccontext.update_flag_opnds(["cf", "zf", "sf", "of", "pf"], operands)
logger.debug("0x%X ^ 0x%X = 0x%X", opvalue1, opvalue2, result)
operands[0].value = result | 1,730 |
def get_fake_value(attr): # attr = (name, type, [dim, [dtype]])
""" returns default value for a given attribute based on description.py """
if attr[1] == pq.Quantity or attr[1] == np.ndarray:
size = []
for i in range(int(attr[2])):
size.append(np.random.randint(100) + 1)
to_set = np.random.random(size) * pq.millisecond # let it be ms
if attr[0] == 't_start': to_set = 0.0 * pq.millisecond
if attr[0] == 't_stop': to_set = 1.0 * pq.millisecond
if attr[0] == 'sampling_rate': to_set = 10000.0 * pq.Hz
if attr[1] == np.ndarray:
to_set = np.array(to_set, dtype=attr[3])
if attr[1] == str:
to_set = str(np.random.randint(100000))
if attr[1] == int:
to_set = np.random.randint(100)
if attr[1] == datetime:
to_set = datetime.now()
return to_set | 1,731 |
def find_x(old_time,omega,new_time):
"""
Compute x at the beginning of new time array.
"""
interp_omega=spline(old_time,omega)
x=interp_omega(new_time[0])**(2./3)
return x | 1,732 |
def _platformio_library_impl(ctx):
"""Collects all transitive dependencies and emits the zip output.
Outputs a zip file containing the library in the directory structure expected
by PlatformIO.
Args:
ctx: The Skylark context.
"""
name = ctx.label.name
# Copy the header file to the desired destination.
header_file = ctx.actions.declare_file(
_HEADER_FILENAME.format(dirname=name, filename=name))
inputs = [ctx.file.hdr]
outputs = [header_file]
commands = [_COPY_COMMAND.format(
source=ctx.file.hdr.path, destination=header_file.path)]
# Copy all the additional header and source files.
for additional_files in [ctx.attr.add_hdrs, ctx.attr.add_srcs]:
for target in additional_files:
if len(target.files.to_list()) != 1:
fail("each target listed under add_hdrs or add_srcs must expand to " +
"exactly one file, this expands to %d: %s" %
(len(target.files), target.files))
# The name of the label is the relative path to the file, this enables us
# to prepend "lib/" to the path. For PlatformIO, all the library files
# must be under lib/...
additional_file_name = target.label.name
additional_file_source = [f for f in target.files.to_list()][0]
additional_file_destination = ctx.actions.declare_file(
_ADDITIONAL_FILENAME.format(dirname=name, filename=additional_file_name))
inputs.append(additional_file_source)
outputs.append(additional_file_destination)
commands.append(_COPY_COMMAND.format(
source=additional_file_source.path,
destination=additional_file_destination.path))
# The src argument is optional, some C++ libraries might only have the header.
if ctx.attr.src != None:
source_file = ctx.actions.declare_file(
_SOURCE_FILENAME.format(dirname=name, filename=name))
inputs.append(ctx.file.src)
outputs.append(source_file)
commands.append(_COPY_COMMAND.format(
source=ctx.file.src.path, destination=source_file.path))
# Zip the entire content of the library folder.
outputs.append(ctx.outputs.zip)
commands.append(_ZIP_COMMAND.format(
output_dir=ctx.outputs.zip.dirname, zip_filename=ctx.outputs.zip.basename))
ctx.actions.run_shell(
inputs=inputs,
outputs=outputs,
command="\n".join(commands),
)
# Collect the zip files produced by all transitive dependancies.
transitive_zip_files=depset([ctx.outputs.zip])
for dep in ctx.attr.deps:
transitive_zip_files = depset(transitive=[
transitive_zip_files, dep.transitive_zip_files
])
return struct(
transitive_zip_files=transitive_zip_files,
) | 1,733 |
def repeat_interleave(x, arg):
"""Use numpy to implement repeat operations"""
return paddle.to_tensor(x.numpy().repeat(arg)) | 1,734 |
def quantize_arr(arr, min_val=None, max_val=None, dtype=np.uint8):
"""Quantization based on real_value = scale * (quantized_value - zero_point).
"""
if (min_val is None) | (max_val is None):
min_val, max_val = np.min(arr), np.max(arr)
scale, zero_point = choose_quant_params(min_val, max_val, dtype=dtype)
transformed_arr = zero_point + arr / scale
# print(transformed_arr)
if dtype == np.uint8:
clamped_arr = np.clip(transformed_arr, 0, 255)
quantized = clamped_arr.astype(np.uint8)
elif dtype == np.uint32:
clamped_arr = np.clip(transformed_arr, 0, 2 ** 31)
quantized = clamped_arr.astype(np.uint32)
else:
raise ValueError('dtype={} is not supported'.format(dtype))
# print(clamped_arr)
min_val = min_val.astype(np.float32)
max_val = max_val.astype(np.float32)
return quantized, min_val, max_val | 1,735 |
def EST_NOISE(images):
"""Implementation of EST_NOISE in Chapter 2 of Trucco and Verri."""
num = images.shape[0]
m_e_bar = sum(images)/num
m_sigma = np.sqrt(sum((images - m_e_bar)**2) / (num - 1))
return m_sigma | 1,736 |
def continue_cad_funcionario(request):
""" Continuação do Cadastro do Funcionário.
"""
usuario = request.user
try:
funcionario = Funcionario.objects.get(usuario=usuario)
except Exception:
raise Http404()
if funcionario and request.method == "POST":
form = FuncionarioForm(request.POST)
if form.is_valid():
form.save()
return redirect("funcionario")
else:
form = FuncionarioForm()
return render(request, "continue_cad_funcionario.html", {"form": form})
# if request.method == "POST":
# form = FuncionarioForm(request.POST)
# if form.is_valid():
# #'nome', 'rua', 'cpf', 'rg', 'fone', 'bloqueado', 'usuario_fun'
# nome = form.cleaned_data['nome']
# rua = form.cleaned_data['rua']
# cpf = form.cleaned_data['cpf']
# rg = form.cleaned_data['rg']
# fone = form.cleaned_data['fone']
# bloqueado = form.cleaned_data['bloqueado']
# usuario_fun = form.cleaned_data['usuario_fun']
# novo = Funcionario(
# nome=nome, rua=rua, cpf=cpf,
# rg=rg, fone=fone, bloqueado=bloqueado,
# suario_fun=usuario_fun
# )
# novo.save()
# return redirect("funcionario")
# else:
# form = FuncionarioForm()
# return render(request, "continue_cad_funcionario.html", {"form": form}) | 1,737 |
def remove_words(i_list, string):
"""
remove the input list of word from string
i_list: list of words to be removed
string: string on the operation to be performed
"""
regexStr = re.compile(r'\b%s\b' %
r'\b|\b'.join(map(re.escape, i_list)))
o_string = regexStr.sub("", string)
return o_string | 1,738 |
def save_user_time():
"""
Creates a DateTime object with correct save time
Checks if that save time is now
"""
save_time = datetime.utcnow().replace(hour=18, minute=0, second=0, microsecond=0)
return (save_time == (datetime.utcnow() - timedelta(hours=4))) | 1,739 |
def manage_rating_mails(request, orders_sent=[], template_name="manage/marketing/rating_mails.html"):
"""Displays the manage view for rating mails
"""
return render(request, template_name, {}) | 1,740 |
def cancel(request_url: str,
wait: Optional[bool] = False,
poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
verbose: Optional[bool] = False) -> int:
"""
Cancel the request at the given URL.
This method returns immediately by default since the API processes
this request asynchronously. If you would prefer to wait for it
to be completed, set the 'wait' parameter to True. You can adjust
the polling time using the 'poll_interval' parameter.
Args:
request_url: the URL string of the request to be canceled
wait: set to True to block until the cancellation request
has been completed (may wait for several minutes)
poll_interval: seconds to wait between polling
calls, defaults to STANDARD_POLLING_SLEEP_TIME.
verbose: if True then output poll times and other
progress, defaults to False
Returns:
1 on success
Raises:
pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error
pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
"""
# do request
req = AuroraXRequest(method="delete",
url=request_url,
null_response=True)
req.execute()
# return immediately if we don't want to wait
if (wait is False):
return 1
# get status
status = get_status(request_url)
# wait for request to be cancelled
while (status["search_result"]["data_uri"] is None and status["search_result"]["error_condition"] is False):
time.sleep(poll_interval)
if (verbose is True):
print("[%s] Checking for cancellation status ..." % (datetime.datetime.now()))
status = get_status(request_url)
# return
if (verbose is True):
print("[%s] The request has been cancelled" % (datetime.datetime.now()))
return 1 | 1,741 |
def main():
"""
TODO: To find the best match of DNA similarity.
"""
long_sequence = input('Please give me a DNA sequence to search: ')
short_sequence = input('What DNA sequence would you like to match? ')
best_match = find_match(short_sequence, long_sequence)
print('The best match is '+str(best_match)) | 1,742 |
def set_jit_fusion_options():
"""Set PyTorch JIT layer fusion options."""
# set flags if we are using the 21.10 container
if torch.__version__ == "1.10.0a0+0aef44c":
# nvfuser
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(True)
torch._C._debug_set_autodiff_subgraph_inlining(False) | 1,743 |
def plot_train_progress(scores, img_title, save_path, show, names=None):
"""
A plotting function using the array of loss values saved while training.
:param train_losses, dev_losses: losses saved during training
:return:
"""
nrows, ncols = 2, 3
dx, dy = 2, 1
num_iter = len(scores[0])
xs = np.arange(start=1, stop=num_iter + 1, step=1)
figsize = plt.figaspect(float(dy * nrows) / float(dx * ncols))
fig, axes = plt.subplots(nrows, ncols, figsize=figsize)
fig.suptitle(img_title)
for sc, ax, name in zip(scores, axes.flat, names):
# Set label for the X axis
ax.set_xlabel('EpochN', fontsize=12)
if type(name) in [list, tuple]: # this should happen with loss plotting only
# It means that scores are represented as an MxN Numpy array
num_curves = sc.shape[1]
for idx in range(num_curves):
ax.plot(xs, sc[:, idx])
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.legend(name) # name is a list -> need to create a legend for this subplot
ax.set_ylabel('Loss', fontsize=12)
else:
ax.plot(xs, sc)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.set_ylabel(name, fontsize=12)
plt.legend(loc='best', fancybox=True, framealpha=0.5)
pad = 0.05 # Padding around the edge of the figure
xpad, ypad = dx * pad, dy * pad
fig.tight_layout(pad=2, h_pad=xpad, w_pad=xpad)
if save_path is not None:
logger.debug("Saving the learning curve plot --> %s" % save_path)
fig.savefig(save_path)
if show:
plt.show() | 1,744 |
def selected_cases(self):
"""Get a list of all grid cases selected in the project tree
Returns:
A list of :class:`rips.generated.generated_classes.Case`
"""
case_infos = self._project_stub.GetSelectedCases(Empty())
cases = []
for case_info in case_infos.data:
cases.append(self.case(case_info.id))
return cases | 1,745 |
def create_whatsapp_group(org, subject):
"""
Creates a Whatsapp group using the subject
"""
result = requests.post(
urljoin(org.engage_url, "v1/groups"),
headers=build_turn_headers(org.engage_token),
data=json.dumps({"subject": subject}),
)
result.raise_for_status()
return json.loads(result.content)["groups"][0]["id"] | 1,746 |
def compute_MVBS_index_binning(ds_Sv, range_sample_num=100, ping_num=100):
"""Compute Mean Volume Backscattering Strength (MVBS)
based on intervals of ``range_sample`` and ping number (``ping_num``) specified in index number.
Output of this function differs from that of ``compute_MVBS``, which computes
bin-averaged Sv according to intervals of range (``echo_range``) and ``ping_time`` specified
in physical units.
Parameters
----------
ds_Sv : xr.Dataset
dataset containing ``Sv`` and ``echo_range`` [m]
range_sample_num : int
number of samples to average along the ``range_sample`` dimension, default to 100
ping_num : int
number of pings to average, default to 100
Returns
-------
A dataset containing bin-averaged Sv
"""
da_sv = 10 ** (ds_Sv["Sv"] / 10) # average should be done in linear domain
da = 10 * np.log10(
da_sv.coarsen(ping_time=ping_num, range_sample=range_sample_num, boundary="pad").mean(
skipna=True
)
)
# Attach attributes and coarsened echo_range
da.name = "Sv"
ds_MVBS = da.to_dataset()
ds_MVBS.coords["range_sample"] = (
"range_sample",
np.arange(ds_MVBS["range_sample"].size),
{"long_name": "Along-range sample number, base 0"},
) # reset range_sample to start from 0
ds_MVBS["echo_range"] = (
ds_Sv["echo_range"]
.coarsen( # binned echo_range (use first value in each average bin)
ping_time=ping_num, range_sample=range_sample_num, boundary="pad"
)
.min(skipna=True)
)
_set_MVBS_attrs(ds_MVBS)
ds_MVBS["Sv"] = ds_MVBS["Sv"].assign_attrs(
{
"cell_methods": (
f"ping_time: mean (interval: {ping_num} pings "
"comment: ping_time is the interval start) "
f"range_sample: mean (interval: {range_sample_num} samples along range "
"comment: range_sample is the interval start)"
),
"comment": "MVBS binned on the basis of range_sample and ping number specified as index numbers", # noqa
"binning_mode": "sample number",
"range_sample_interval": f"{range_sample_num} samples along range",
"ping_interval": f"{ping_num} pings",
"actual_range": [
round(float(ds_MVBS["Sv"].min().values), 2),
round(float(ds_MVBS["Sv"].max().values), 2),
],
}
)
prov_dict = echopype_prov_attrs(process_type="processing")
prov_dict["processing_function"] = "preprocess.compute_MVBS_index_binning"
ds_MVBS = ds_MVBS.assign_attrs(prov_dict)
ds_MVBS["frequency_nominal"] = ds_Sv["frequency_nominal"] # re-attach frequency_nominal
return ds_MVBS | 1,747 |
def set_image_exposure_time(exp_time):
"""
Send the command to set the exposure time per frame to SAMI.
Parameters
----------
exp_time (float) : the exposure time in seconds.
Returns
-------
message (string) : DONE if successful.
"""
message = send_command("dhe set obs.exptime {:f}".format(exp_time))
return message | 1,748 |
def ping(host):
""" Returns True if host (str) responds to a ping request.
Remember that a host may not respond to a ping (ICMP) request even if the
host name is valid.
Base on https://bit.ly/2TmgeX2 but with pexpect
:param str host: A host name or ip
:return boolean: True if ping is replied correctly
"""
# Option for the number of packets as a function of
param = '-n' if platform.system().lower() == 'windows' else '-c'
# Building the command. Ex: "ping -c 1 example.com"
command = ['ping', param, '1', host]
return subprocess.call(command) == 0 | 1,749 |
def stack_exists(client, stack_name):
""" Checks that stack was specified is existing """
cfn_stacks = client.list_stacks()
for cfn_stack in cfn_stacks["StackSummaries"]:
if cfn_stack['StackName'] == stack_name and "COMPLETE" in cfn_stack['StackStatus'] and "DELETE" not in cfn_stack['StackStatus']:
return True
return False | 1,750 |
def endorsement_services():
"""Return endorsement service list
Loads all defined service modules unless settings specifies otherwise
"""
global ENDORSEMENT_SERVICES
if ENDORSEMENT_SERVICES is None:
ENDORSEMENT_SERVICES = _load_endorsement_services()
return ENDORSEMENT_SERVICES | 1,751 |
def test_set_initial_open_orders():
"""
Check _set_initial_open_orders method for return
of an empty dictionary.
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
exchange = ExchangeMock()
data_handler = DataHandlerMock()
sb = SimulatedBroker(start_dt, exchange, data_handler)
assert sb._set_initial_open_orders() == {} | 1,752 |
def select(arrays, index):
"""
Index each array in a tuple of arrays.
If the arrays tuple contains a ``None``, the entire tuple will be returned
as is.
Parameters
----------
arrays : tuple of arrays
index : array
An array of indices to select from arrays.
Returns
-------
indexed_arrays : tuple of arrays
Examples
--------
>>> import numpy as np
>>> select((np.arange(5), np.arange(-3, 2, 1)), [1, 3])
(array([1, 3]), array([-2, 0]))
>>> select((None, None, None, None), [1, 2])
(None, None, None, None)
"""
if arrays is None or any(i is None for i in arrays):
return arrays
return tuple(i.ravel()[index] for i in arrays) | 1,753 |
def waypoint(waypoint_id):
"""view a book page"""
wp = Waypoint.query.filter_by(id=waypoint_id).first()
options = Option.query.filter_by(sourceWaypoint_id=waypoint_id)
if wp is None:
abort(404)
return render_template('books/waypoint.html', book=wp.book_of, waypoint=wp, options=options) | 1,754 |
def test_insert_requirement(client):
"""
Testing the method for posting information
to insert a requirement on the csv file (synthetic data)
"""
specification = dict(specification_id='X1C2V3B7',
product='OSLC SDK 7',
project='OSLC-Project 7',
title='OSLC RM Spec 7',
description='The OSLC RM Specification needs to be awesome 7',
source='Ian Altman',
author='Frank',
category='Customer Requirement',
discipline='Software Development',
revision='0',
target_value='1',
degree_of_fulfillment='0',
status='Draft')
response = client.post('oslc/rm/requirement',
data=json.dumps(specification),
content_type='application/json')
assert response.status_code == 201
assert response.data == b''
response = client.delete('oslc/rm/requirement/X1C2V3B7',
content_type='application/json')
assert response.status_code == 200 | 1,755 |
def encode_big_endian_16(i):
"""Take an int and return big-endian bytes"""
return encode_big_endian_32(i)[-2:] | 1,756 |
def get_comments_from_fawm_page(
url: str,
username: str,
password: str,
) -> List[Response]:
"""Extract comments from a given FAWM page."""
response = requests.get(url, auth=(username, password))
response.encoding = "UTF-8"
html = response.text
soup = BeautifulSoup(html, "html.parser")
responses = []
# there are non-comments with the class "comment-item", so we need to narrow down
for el in soup.find_all("li", {"class": "comment-item", "id": re.compile(r"c\d+")}):
responses.append(get_response_from_li(url, el))
return responses | 1,757 |
def send_message(service, user_id, message):
"""Send an email message.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
message: Message to be sent.
Returns:
Sent Message.
"""
try:
message = (service.users().messages().send(userId=user_id, body=message)
.execute())
print ('Message Id: %s' % message['id'])
return message
except errors.HttpError, error:
print ('An error occurred: %s' % error) | 1,758 |
def get_supported_solvers():
"""
Returns a list of solvers supported on this machine.
:return: a list of SolverInterface sub-classes :list[SolverInterface]:
"""
return [sv for sv in builtin_solvers if sv.supported()] | 1,759 |
def eff_satellite_testing_auto_end_dating(context):
"""
Define the structures and metadata to load effectivity satellites
"""
context.hashed_columns = {
"STG_CUSTOMER_ORDER": {
"CUSTOMER_ORDER_PK": ["CUSTOMER_ID", "ORDER_ID"],
"CUSTOMER_PK": "CUSTOMER_ID",
"ORDER_PK": "ORDER_ID"
},
"STG_ORDER_CUSTOMER": {
"ORDER_CUSTOMER_PK": ["CUSTOMER_ID", "ORDER_ID"],
"CUSTOMER_PK": "CUSTOMER_ID",
"ORDER_PK": "ORDER_ID"
}
}
context.vault_structure_columns = {
"LINK_CUSTOMER_ORDER": {
"source_model": "STG_CUSTOMER_ORDER",
"src_pk": "CUSTOMER_ORDER_PK",
"src_fk": ["CUSTOMER_PK", "ORDER_PK"],
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"LINK_ORDER_CUSTOMER": {
"source_model": "STG_ORDER_CUSTOMER",
"src_pk": "ORDER_CUSTOMER_PK",
"src_fk": ["CUSTOMER_PK", "ORDER_PK"],
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"EFF_SAT_CUSTOMER_ORDER": {
"source_model": "STG_CUSTOMER_ORDER",
"src_pk": "CUSTOMER_ORDER_PK",
"src_dfk": ["CUSTOMER_PK"],
"src_sfk": "ORDER_PK",
"src_start_date": "START_DATE",
"src_end_date": "END_DATE",
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"EFF_SAT_ORDER_CUSTOMER": {
"source_model": "STG_ORDER_CUSTOMER",
"src_pk": "ORDER_CUSTOMER_PK",
"src_dfk": ["ORDER_PK"],
"src_sfk": "CUSTOMER_PK",
"src_start_date": "START_DATE",
"src_end_date": "END_DATE",
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
}
}
context.seed_config = {
"RAW_STAGE_CUSTOMER_ORDER": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR",
"ORDER_ID": "VARCHAR",
"START_DATE": "DATETIME",
"END_DATE": "DATETIME",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"RAW_STAGE_ORDER_CUSTOMER": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR",
"ORDER_ID": "VARCHAR",
"START_DATE": "DATETIME",
"END_DATE": "DATETIME",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"LINK_CUSTOMER_ORDER": {
"+column_types": {
"CUSTOMER_ORDER_PK": "BINARY(16)",
"CUSTOMER_PK": "BINARY(16)",
"ORDER_PK": "BINARY(16)",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"LINK_ORDER_CUSTOMER": {
"+column_types": {
"ORDER_CUSTOMER_PK": "BINARY(16)",
"CUSTOMER_PK": "BINARY(16)",
"ORDER_PK": "BINARY(16)",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"EFF_SAT_CUSTOMER_ORDER": {
"+column_types": {
"CUSTOMER_ORDER_PK": "BINARY(16)",
"CUSTOMER_PK": "BINARY(16)",
"ORDER_PK": "BINARY(16)",
"START_DATE": "DATETIME",
"END_DATE": "DATETIME",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"EFF_SAT_ORDER_CUSTOMER": {
"+column_types": {
"ORDER_CUSTOMER_PK": "BINARY(16)",
"CUSTOMER_PK": "BINARY(16)",
"ORDER_PK": "BINARY(16)",
"START_DATE": "DATETIME",
"END_DATE": "DATETIME",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR"
}
}
} | 1,760 |
def _length_hint(obj):
"""Returns the length hint of an object."""
try:
return len(obj)
except (AttributeError, TypeError):
try:
get_hint = type(obj).__length_hint__
except AttributeError:
return None
try:
hint = get_hint(obj)
except TypeError:
return None
if hint is NotImplemented or \
not isinstance(hint, (int, long)) or \
hint < 0:
return None
return hint | 1,761 |
def _folder_to_json(jdir, key_path=None, in_memory=True,
ignore_prefix=('.', '_'), dic={}, parse_decimal=False):
""" read in folder structure as json
e.g.
jdir
sub_dir1
data.json
sub_dir2
data.json
_folder_to_json(jdir)
=> {'sub_dir1':{'data':{...}},
'sub_dir2':{'data':{...}}}
NB: json files are identified with .json extension
"""
key_path = [] if key_path is None else key_path
if not hasattr(jdir, 'iterdir'):
raise ValueError('jdir is not a path object; {}'.format(jdir))
key_found = False if key_path else True
search_key = key_path[0] if len(key_path) > 0 else None
for jsub in jdir.iterdir():
if jsub.is_file() and jsub.name.endswith('.json'):
name, ext = os.path.splitext(jsub.name)
if name == search_key or not key_path:
key_found = True
if key_path:
data = to_dict(jsub, key_path[1:], in_memory,
ignore_prefix, parse_decimal)
if isinstance(data, dict):
dic.update(data)
else:
dic.update({_Terminus(): data})
else:
dic[name] = to_dict(jsub, key_path[1:], in_memory,
ignore_prefix, parse_decimal)
elif (jsub.is_dir()
and not jsub.name.startswith(ignore_prefix)
and (jsub.name == search_key or not key_path)):
key_found = True
if jsub.name in dic.keys():
raise IOError(
'directory has a sub-dir and file with same name: '
'{1} and {1}.json in {0}'.format(jdir, jsub.name))
if key_path:
sub_d = dic
else:
dic[jsub.name] = {}
sub_d = dic[jsub.name]
_folder_to_json(jsub, key_path[1:], in_memory, ignore_prefix,
sub_d, parse_decimal)
if not key_found:
raise KeyError('key not found: {0}'.format(search_key)) | 1,762 |
async def generate_images(queue, filenames):
"""Producer coroutine"""
for filename in filenames:
await queue.put(filename)
print('All task requests sent') | 1,763 |
def chimeric_data():
"""Example containing spanning + junction reads from single fusion."""
return _build_chimeric_data(
[('1', 300, 1, 'T2onc', 420, 1, 2, '100M2208p38M62S', '62M38S', 'R1'),
('1', 300, 1, 'T2onc', 420, 1, 1, '100M2208p52M48S', '48M52S', 'R2'),
('1', 301, 1, 'T2onc', 420, 1, 1, '100M2208p52M48S', '48M52S', 'R3'),
('1', 300, 1, 'T2onc', 421, 1, 1, '100M2208p52M48S', '48M52S', 'R4'),
('1', 280, 1, 'T2onc', 435, 1, -1, '100M', '97M3S', 'S1'),
('1', 270, 1, 'T2onc', 445, 1, -1, '100M', '98M2S', 'S2'),
('1', 275, 1, 'T2onc', 435, 1, -1, '100M', '98M2S', 'S3')]) | 1,764 |
def _get_merge_for_alias_key(database, key):
"""Return the Alias record of the merged player.
Allow for value.merge on the record with key srkey being any value.
Return the record if value.merge is None True or False.
Otherwise assume value.merge is integer and use it to retreive and
return a record.
return None if get_alias() returns None.
"""
r = resultsrecord.get_alias(database, key)
if r is None:
return
elif r.value.merge is None:
return r
elif r.value.merge is True:
return r
elif r.value.merge is False:
return r
r = resultsrecord.get_alias(database, r.value.merge)
if r is None:
return
return r | 1,765 |
def saveusers(argv):
"""Save stdin to users.plist."""
try:
plist = plistlib.readPlist(sys.stdin)
except:
print >>sys.stderr, "Malformed users.plist"
return 2
os.unlink(users_path)
plistlib.writePlist(plist, users_path)
return 0 | 1,766 |
def get_bucket(self):
"""
Documentation:
---
Description:
Use bucket name to return a single S3 bucket object.
---
Returns:
bucket : S3 bucket
S3 bucket object
"""
# return
# 6 dictionary containing Name tag / EC2 instance object
buckets = self.get_buckets()
# check that there is an instance with that name
assert self.bucket_name in self.get_bucket_names(), "\nNo S3 bucket with that name.\n"
# filter instances by instance_name
bucket = buckets[self.bucket_name]
return bucket | 1,767 |
def set_emoji_author(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
"""
This migration establishes the invariant that all RealmEmoji objects have .author set
and queues events for reuploading all RealmEmoji.
"""
RealmEmoji = apps.get_model("zerver", "RealmEmoji")
Realm = apps.get_model("zerver", "Realm")
UserProfile = apps.get_model("zerver", "UserProfile")
ROLE_REALM_OWNER = 100
realm_emoji_to_update = []
for realm_emoji in RealmEmoji.objects.all():
if realm_emoji.author_id is None:
user_profile = (
UserProfile.objects.filter(
realm_id=realm_emoji.realm_id, is_active=True, role=ROLE_REALM_OWNER
)
.order_by("id")
.first()
)
realm_emoji.author_id = user_profile.id
realm_emoji_to_update.append(realm_emoji)
RealmEmoji.objects.bulk_update(realm_emoji_to_update, ["author_id"])
if settings.TEST_SUITE:
# There are no custom emoji in the test suite data set, and
# the below code won't work because RabbitMQ isn't enabled for
# the test suite.
return
for realm_id in Realm.objects.order_by("id").values_list("id", flat=True):
event = {
"type": "reupload_realm_emoji",
"realm_id": realm_id,
}
queue_json_publish("deferred_work", event) | 1,768 |
def requestShutdown(programPath, reason):
"""
Log the shutdown reason and call the shutdown-requesting program.
In the case the service is spawned by launchd (or equivalent), if our
service decides it needs to shut itself down, because of a misconfiguration,
for example, we can't just exit. We may need to go through the system
machinery to unload our job, manage reverse proxies, update admin UI, etc.
Therefore you can configure the ServiceDisablingProgram plist key to point
to a program to run which will stop our service.
@param programPath: the full path to a program to call (with no args)
@type programPath: C{str}
@param reason: a shutdown reason to log
@type reason: C{str}
"""
log.error("Shutting down Calendar and Contacts server")
log.error(reason)
Popen(
args=[config.ServiceDisablingProgram],
stdout=PIPE,
stderr=PIPE,
).communicate() | 1,769 |
def euclidean_distance(x, y, weight=None):
"""Computes the Euclidean distance between two time series.
If the time series do not have the same length, an interpolation is performed.
Parameters
----------
x : nd-array
Time series x.
y : nd-array
Time series y.
weight: nd-array (Default: None)
query weight values.
Returns
-------
float
Euclidean distance value.
"""
p = 2
if len(x) != len(y):
x, y = interpolation(x, y)
if weight is None:
ed = np.linalg.norm(x - y, p)
else:
if len(np.shape(x)) > 1:
distance = _lnorm_multidimensional(x, y, weight, p=p)
else:
distance = _lnorm_unidimensional(x, y, weight, p=p)
ed = np.sum(distance)
return ed | 1,770 |
def path(artifactory_server, artifactory_auth):
"""ArtifactoryPath with defined server URL and authentication"""
def f(uri):
return artifactory.ArtifactoryPath(
artifactory_server + uri, auth=artifactory_auth
)
return f | 1,771 |
def compile_gif():
"""Сделать кино"""
utils.compile_gif('./current/map_thumbnails/') | 1,772 |
def get_figure_of_dataframe_contrasting_prof_with_other_profs(dict_cursor, ax, instructorFullName, cID, campus):
"""
Plot the prof vs other profs DataFrame in python.
"""
df = __get_dataframe_by_contrasting_prof_with_other_profs(dict_cursor, instructorFullName, cID, campus)
__get_figure_by_dataframe(df, ax, title="Prof {} vs other profs who taught {}".format(instructorFullName, cID)) | 1,773 |
def get_outmost_polygon_boundary(img):
"""
Given a mask image with the mask describes the overlapping region of
two images, get the outmost contour of this region.
"""
mask = get_mask(img)
mask = cv2.dilate(mask, np.ones((2, 2), np.uint8), iterations=2)
cnts, hierarchy = cv2.findContours(
mask,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2:]
# get the contour with largest aera
C = sorted(cnts, key=lambda x: cv2.contourArea(x), reverse=True)[0]
# polygon approximation
polygon = cv2.approxPolyDP(C, 0.009 * cv2.arcLength(C, True), True)
return polygon | 1,774 |
def update_uploaded_file_with_log(project, path_to_log_file):
"""Update file details that weren't properly uploaded to db from cli log"""
import botocore
from dds_web.database import models
from dds_web import db
from dds_web.api.api_s3_connector import ApiS3Connector
import json
proj_in_db = models.Project.query.filter_by(public_id=project).one_or_none()
assert proj_in_db
with open(path_to_log_file, "r") as f:
log = json.load(f)
errors = {}
files_added = []
for file, vals in log.items():
status = vals.get("status")
if not status or not status.get("failed_op") == "add_file_db":
continue
with ApiS3Connector(project=proj_in_db) as s3conn:
try:
_ = s3conn.resource.meta.client.head_object(
Bucket=s3conn.project.bucket, Key=vals["path_remote"]
)
except botocore.client.ClientError as err:
if err.response["Error"]["Code"] == "404":
errors[file] = {"error": "File not found in S3", "traceback": err.__traceback__}
else:
file_object = models.File.query.filter(
sqlalchemy.and_(
models.File.name == sqlalchemy.func.binary(file),
models.File.project_id == proj_in_db.id,
)
).first()
if file_object:
errors[file] = {"error": "File already in database."}
else:
new_file = models.File(
name=file,
name_in_bucket=vals["path_remote"],
subpath=vals["subpath"],
project_id=proj_in_db.id,
size_original=vals["size_raw"],
size_stored=vals["size_processed"],
compressed=not vals["compressed"],
public_key=vals["public_key"],
salt=vals["salt"],
checksum=vals["checksum"],
)
new_version = models.Version(
size_stored=new_file.size_stored, time_uploaded=datetime.datetime.utcnow()
)
proj_in_db.file_versions.append(new_version)
proj_in_db.files.append(new_file)
new_file.versions.append(new_version)
db.session.add(new_file)
files_added.append(new_file)
db.session.commit()
flask.current_app.logger.info(f"Files added: {files_added}")
flask.current_app.logger.info(f"Errors while adding files: {errors}") | 1,775 |
def on_save_handler(model_class, instance, created):
"""Hash password on creation/save."""
# If password changed then it won't start with hash's method prefix
is_password_changed = not instance.password.startswith('pbkdf2:sha256')
if created or is_password_changed:
instance.password = generate_password_hash(instance.password)
is_api_key_changed = not instance.api_key.startswith('pbkdf2:sha256')
if created or is_api_key_changed:
if not instance.api_key:
instance.api_key = model_class.random_password()
instance.api_key = generate_password_hash(instance.api_key) | 1,776 |
def check_ddp_wrapped(model: nn.Module) -> bool:
"""
Checks whether model is wrapped with DataParallel/DistributedDataParallel.
"""
parallel_wrappers = nn.DataParallel, nn.parallel.DistributedDataParallel
# Check whether Apex is installed and if it is,
# add Apex's DistributedDataParallel to list of checked types
try:
from apex.parallel import DistributedDataParallel as apex_DDP
parallel_wrappers = parallel_wrappers + (apex_DDP,)
except ImportError:
pass
return isinstance(model, parallel_wrappers) | 1,777 |
def adminRecords(request):
"""
管理租赁记录
:param request:
:return: html page
"""
token = request.COOKIES.get('admintoken')
if token is None:
return redirect('/adminLogin/')
result = MysqlConnector.get_one('YachtClub', 'select adminname from admincookies where token=%s', token)
if result is None:
return redirect('/adminLogin/')
return render(request, 'adminRecords.html') | 1,778 |
def _make_fold(draw):
"""
Helper strategy for `test_line_fold` case.
The shape of the content will be the same every time:
a
b
c
But the chars and size of indent, plus trailing whitespace on each line
and number of line breaks will all be fuzzed.
"""
return (
draw(make_interspace(symbol_a, 0)),
draw(make_interspace(symbol_b, 1)),
draw(make_interspace(symbol_c, 1)),
) | 1,779 |
def temporary_namespace(locals_ref, keep: T.List[str] = []):
"""Temporary Namespace within ``with`` statement.
1. copies current namespace from `locals_ref`
2. Enters ``with`` statement
3. restores original namespace except those specified in `keep`
Parameters
----------
module : module
``sys.modules[__name__]`` of module calling from.
.. todo::
not need to pass any module information. infer.
keep : list, optional
list of (str) variable names to keep.
Yields
------
locals_ref : locals
for accessing namespace
Warnings
--------
Does NOT work within functions.
"""
original_namespace = locals_ref.copy()
try:
yield locals_ref
finally:
# difference of current and old namespace
# without the keep keys
drop_keys = (
set(locals_ref.keys())
.difference(original_namespace.keys())
.difference(keep)
)
# kept values
keep_dict = {k: locals_ref[k] for k in keep}
# print("drop_keys", drop_keys, "keep_dict", keep_dict)
# Restoring Namespace
original_namespace.update(keep_dict) # add keep values to original
locals_ref.update(original_namespace) # restore original (+kept)
for key in drop_keys: # drop unkept context-specific values
locals_ref.pop(key)
# /try | 1,780 |
def get_user_jwt() -> str:
"""
Returns:
str: The JWT token of the user
"""
login_data = check_login()
if not login_data:
token = requests.get(
'https://formee-auth.hackersreboot.tech/visitor').json()['token']
return token
if login_data:
token = requests.get('https://formee-auth.hackersreboot.tech/', json={
'username': login_data['username'], 'password': login_data['password']}).json()['token']
return token | 1,781 |
def auth(driver, cred):
"""
Method for automating login procedure
"""
try:
ele_un = driver.find_element_by_xpath("//input[@id='ft_un']")
ele_un.send_keys(cred['username'])
ele_pd = driver.find_element_by_xpath("//input[@id='ft_pd']")
ele_pd.send_keys(cred['password'])
driver.find_element_by_xpath("//input[@type='submit']").click()
except NoSuchElementException:
print('Already active or No internet connection') | 1,782 |
def nostdout():
"""Kill standart output.
Example
-------
>> with nostdout():
raw = mne.io.Raw(fname)
"""
# -- Works both in python2 and python3 -- #
try:
from io import StringIO
except ImportError:
from io import StringIO
# --------------------------------------- #
save_stdout = sys.stdout
sys.stdout = StringIO()
yield
sys.stdout = save_stdout | 1,783 |
def site_pressure(dset):
"""Get atmospheric pressure from local site measurements
If local atmospheric pressure measurements on a site are not available an alternative model given in configuration
file is used to determine atmospheric pressure.
TODO:
So far only gridded VMF1 model is used, if local pressure data are not available. Which alternative is used,
should be decided via the configuration file. How to check after an alternative model in configuration file?
model_list = config.tech[MODEL].meteorological_data.list???
Args:
dset (Dataset): A Dataset containing model data.
Returns:
numpy.ndarray: Atmospheric pressure for each observation in [hPa]
"""
pressure = np.zeros(dset.num_obs)
i_given = np.zeros(dset.num_obs, dtype=bool)
if "pressure" + (dset.default_field_suffix or "") in dset.fields:
i_given[np.logical_not(np.isnan(dset.pressure))] = True
pressure[i_given] = dset.pressure[i_given]
i_missing = np.logical_not(i_given)
if i_missing.any():
pressure[i_missing] = vmf1_gridded_pressure(dset)[i_missing]
return pressure | 1,784 |
def write_clean_po(filename, catalog):
"""Writes out a .po file in a canonical way, to minimize spurious diffs."""
catalog.creation_date = datetime.datetime(2000, 1, 1, 0, 0, 0)
file = open(filename, 'w')
pofile.write_po(file, catalog,
no_location=True, sort_output=True, ignore_obsolete=True)
file.close() | 1,785 |
def add_gradient_penalty(critic, C_input_gp, C_input_fake):
"""Helper Function: Add gradient penalty to enforce Lipschitz continuity
Interpolates = Real - alpha * ( Fake - Real )
Parameters
----------
critic : tf.Sequential
Critic neural network
C_input_gp : np.matrix
Critic input for gradient penalty. Mean values of all similar samples
provided by the Sampler.
C_input_fake : tf.Tensor
Critic input Generator(X)
Returns
-------
tf.tensor(dtype=tf.Float64)
Gradient Penalty
"""
alpha = tf.random.uniform(
shape=[1, int(C_input_fake.shape[1])], minval=0.0, maxval=1.0, dtype=tf.float64
)
interpolates = C_input_gp + alpha * (C_input_fake - C_input_gp)
disc_interpolates = critic(interpolates)
gradients = tf.gradients(disc_interpolates, [interpolates])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients)))
return tf.reduce_mean((slopes - 1) ** 2) | 1,786 |
def parse_airomon_datetime(airomon_dt: str) -> datetime:
"""Parse string used by airomon and also make timezone aware."""
aileen_tz = pytz.timezone(settings.TIME_ZONE)
try:
dt: datetime = datetime.strptime(airomon_dt, "%Y-%m-%d %H:%M:%S")
dt = dt.astimezone(aileen_tz)
except ValueError:
print(
"%s Warning: could not parse datetime %s, using 1-1-1970 for this one!"
% (settings.TERM_LBL, airomon_dt)
)
dt = datetime(1970, 1, 1, 1, 1, 1, tzinfo=aileen_tz)
return dt | 1,787 |
def i2nm(i):
"""
Return the n and m orders of the i'th zernike polynomial
========= == == == == == == == == == == == == == == == ===
i 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 ...
n-order 0 1 1 2 2 2 3 3 3 3 4 4 4 4 4 ...
m-order 0 -1 1 -2 0 2 -3 -1 1 3 -4 -2 0 2 4 ...
========= == == == == == == == == == == == == == == == ===
"""
ia = array(i)
n = (1 + (sqrt(8 * (ia) + 1) - 3) / 2).astype(int)
ni = n * (n + 1) / 2
m = -n + 2 * (i - ni)
return n, m | 1,788 |
def update_ftov_msgs(
ftov_msgs: jnp.ndarray, updates: Dict[Any, jnp.ndarray], fg_state: FactorGraphState
) -> jnp.ndarray:
"""Function to update ftov_msgs.
Args:
ftov_msgs: A flat jnp array containing ftov_msgs.
updates: A dictionary containing updates for ftov_msgs
fg_state: Factor graph state
Returns:
A flat jnp array containing updated ftov_msgs.
Raises: ValueError if:
(1) provided ftov_msgs shape does not match the expected ftov_msgs shape.
(2) provided name is not valid for ftov_msgs updates.
"""
for names in updates:
data = updates[names]
if names in fg_state.variable_group.names:
variable = fg_state.variable_group[names]
if data.shape != (variable.num_states,):
raise ValueError(
f"Given belief shape {data.shape} does not match expected "
f"shape {(variable.num_states,)} for variable {names}."
)
var_states_for_edges = np.concatenate(
[
wiring_by_type.var_states_for_edges
for wiring_by_type in fg_state.wiring.values()
]
)
starts = np.nonzero(
var_states_for_edges == fg_state.vars_to_starts[variable]
)[0]
for start in starts:
ftov_msgs = ftov_msgs.at[start : start + variable.num_states].set(
data / starts.shape[0]
)
else:
raise ValueError(
"Invalid names for setting messages. "
"Supported names include a tuple of length 2 with factor "
"and variable names for directly setting factor to variable "
"messages, or a valid variable name for spreading expected "
"beliefs at a variable"
)
return ftov_msgs | 1,789 |
def dict_list_add(d, k, v):
"""
Convenience function to create empty list in dictionary if not
already present under that key
"""
if k in d:
d[k].append(v)
else:
d[k] = [v] | 1,790 |
def normalize_features(
current: np.ndarray,
previous: Optional[np.ndarray],
normalize_samples: int,
method: str = NORM_METHODS.MEAN.value,
clip: bool = False,
) -> tuple[np.ndarray, np.ndarray]:
"""Normalize features with respect to the past number of normalize_samples.
Parameters
----------
current : numpy array
current features to normalize.
previous : numpy array or None
previous features, not normalized. Used for normalization of current features.
normalize_samples : int
number of past samples considered for normalization
method : str | default is 'mean'
data is normalized via subtraction of the 'mean' or 'median' and
subsequent division by the 'mean' or 'median'. For z-scoring enter
'zscore'.
clip : int | float, optional
value at which to clip on the lower and upper end after normalization.
Useful for artifact rejection and handling of outliers.
Returns
-------
current : numpy array
normalized current features
previous : numpy array
previous features, not normalized.
Raises
------
ValueError
returned if method is not 'mean', 'median' or 'zscore'
"""
if previous is None:
return np.zeros_like(current), current
previous = np.vstack((previous, current))
previous = _transform_previous(
previous=previous, normalize_samples=normalize_samples
)
current, previous = _normalize_and_clip(
current=current,
previous=previous,
method=method,
clip=clip,
description="feature",
)
return current, previous | 1,791 |
def anim(filename, rows: int, cols: int ,
frame_duration: float = 0.1, loop=True) -> Animation:
"""Create Animation object from image of regularly arranged subimages.
+filename+ Name of file in resource directory of image of subimages
regularly arranged over +rows+ rows and +cols+ columns.
+frame_duration+ Seconds each frame of animation should be displayed.
"""
img = pyglet.resource.image(filename)
image_grid = pyglet.image.ImageGrid(img, rows, cols)
animation = image_grid.get_animation(frame_duration, True)
centre_animation(animation)
return animation | 1,792 |
def show_umbrella_plot(profileFilename, histogramFilename):
"""Muestra el gráfico del perfil y los histogramas en el mismo gráfico. Útil para determinar
si al cálculo le faltan ventanas."""
figure = plt.figure()
histogramsData = parseXVG(histogramFilename)
histoPlot = figure.add_subplot(111)
for histogramNum in range(1, len(histogramsData)):
histoPlot.fill_between(
histogramsData[0], 0, histogramsData[histogramNum], color="grey", alpha=0.35
)
histoPlot.set_xlabel("Distance from bilayer center [nm]")
histoPlot.set_ylabel("Population")
profileData = parseXVG(profileFilename)
profilePlot = figure.add_subplot(111, sharex=histoPlot, frameon=False)
profilePlot.plot(profileData[0], profileData[1])
profilePlot.yaxis.tick_right()
profilePlot.yaxis.set_label_position("right")
profilePlot.set_ylabel("Mean force potential [kj/mol]")
profilePlot.grid()
plt.show() | 1,793 |
def process_summary(article):
"""Ensures summaries are not cut off. Also inserts
mathjax script so that math will be rendered"""
summary = article._get_summary()
summary_parsed = BeautifulSoup(summary, 'html.parser')
math = summary_parsed.find_all(class_='math')
if len(math) > 0:
last_math_text = math[-1].get_text()
if len(last_math_text) > 3 and last_math_text[-3:] == '...':
content_parsed = BeautifulSoup(article._content, 'html.parser')
full_text = content_parsed.find_all(class_='math')[len(math)-1].get_text()
math[-1].string = "%s ..." % full_text
summary = summary_parsed.decode()
article._summary = "%s<script type='text/javascript'>%s</script>" % (summary, process_summary.mathjax_script) | 1,794 |
def filter_factory(global_conf, **local_conf):
"""Standard filter factory to use the middleware with paste.deploy"""
register_swift_info('vertigo')
conf = global_conf.copy()
conf.update(local_conf)
vertigo_conf = dict()
vertigo_conf['devices'] = conf.get('devices', '/srv/node')
vertigo_conf['execution_server'] = conf.get('execution_server')
vertigo_conf['mc_timeout'] = conf.get('mc_timeout', 5)
vertigo_conf['mc_pipe'] = conf.get('mc_pipe', 'vertigo_pipe')
# vertigo_conf['api_pipe'] = conf.get('mc_pipe', 'api_pipe')
vertigo_conf['metadata_visibility'] = conf.get('metadata_visibility', True)
vertigo_conf['mc_dir'] = conf.get('mc_dir', '/home/docker_device/vertigo/scopes')
vertigo_conf['cache_dir'] = conf.get('cache_dir', '/home/docker_device/cache/scopes')
vertigo_conf['mc_container'] = conf.get('mc_container', 'microcontroller')
vertigo_conf['mc_dependency'] = conf.get('mc_dependency', 'dependency')
''' Load storlet parameters '''
configParser = RawConfigParser()
configParser.read(conf.get('__file__'))
storlet_parameters = configParser.items('filter:storlet_handler')
for key, val in storlet_parameters:
vertigo_conf[key] = val
""" Load Storlets Gateway configuration """
configParser = RawConfigParser()
configParser.read(vertigo_conf['storlet_gateway_conf'])
additional_items = configParser.items("DEFAULT")
for key, val in additional_items:
vertigo_conf[key] = val
""" Load Storlets Gateway class """
module_name = vertigo_conf.get('storlet_gateway_module', 'stub')
gateway_class = load_gateway(module_name)
vertigo_conf['storlets_gateway_module'] = gateway_class
"""
Register Lua script to retrieve policies in a single redis call
"""
vertigo_conf['redis_host'] = conf.get('redis_host', 'controller')
vertigo_conf['redis_port'] = int(conf.get('redis_port', 6379))
vertigo_conf['redis_db'] = int(conf.get('redis_db', 0))
if vertigo_conf['execution_server'] == 'proxy':
r = redis.StrictRedis(vertigo_conf['redis_host'],
vertigo_conf['redis_port'],
vertigo_conf['redis_db'])
lua = """
local t = {}
if redis.call('EXISTS', 'mc_pipeline:'..ARGV[1]..':'..ARGV[2]..':'..ARGV[3])==1 then
t = redis.call('HGETALL', 'mc_pipeline:'..ARGV[1]..':'..ARGV[2]..':'..ARGV[3])
elseif redis.call('EXISTS', 'mc_pipeline:'..ARGV[1]..':'..ARGV[2])==1 then
t = redis.call('HGETALL', 'mc_pipeline:'..ARGV[1]..':'..ARGV[2])
end
return t"""
lua_sha = r.script_load(lua)
vertigo_conf['LUA_get_mc_sha'] = lua_sha
def swift_vertigo(app):
return VertigoHandlerMiddleware(app, global_conf, vertigo_conf)
return swift_vertigo | 1,795 |
def get_review_score_fields(call, proposals):
"""Return a dictionary of the score banner fields in the reviews.
Compute the score means and stdevs. If there are more than two score
fields, then also compute the mean of the means and the stdev of the means.
This is done over all finalized reviews for each proposal.
Store the values in the proposal document.
"""
fields = dict([(f['identifier'], f)
for f in call['review']
if f.get('banner') and f['type'] == constants.SCORE])
for proposal in proposals:
reviews = utils.get_docs_view('reviews', 'proposal',
proposal['identifier'])
# Only include finalized reviews in the calculation.
reviews = [r for r in reviews if r.get('finalized')]
scores = dict([(id, list()) for id in fields])
for review in reviews:
for id in fields:
value = review['values'].get(id)
if value is not None: scores[id].append(float(value))
proposal['scores'] = dict()
for id in fields:
proposal['scores'][id] = d = dict()
d['n'] = len(scores[id])
try:
d['mean'] = round(statistics.mean(scores[id]), 1)
except statistics.StatisticsError:
d['mean'] = None
try:
d['stdev'] = round(statistics.stdev(scores[id]), 1)
except statistics.StatisticsError:
d['stdev'] = None
if len(fields) >= 2:
mean_scores = [d['mean'] for d in proposal['scores'].values()
if d['mean'] is not None]
try:
mean_means = round(statistics.mean(mean_scores), 1)
except statistics.StatisticsError:
mean_means = None
proposal['scores']['__mean__'] = mean_means
try:
stdev_means = round(statistics.stdev(mean_scores), 1)
except statistics.StatisticsError:
stdev_means = None
proposal['scores']['__mean__'] = mean_means
proposal['scores']['__stdev__'] = stdev_means
return fields | 1,796 |
def vThreadFunction():
"""Function to do CPU-bound work.
Args:
Returns:
"""
iResult = 0
for iCnt in range(50000000):
iResult += iCnt | 1,797 |
def test_simple_profiler_iterable_durations(simple_profiler, action, expected):
"""Ensure the reported durations are reasonably accurate."""
iterable = _sleep_generator(expected)
for _ in simple_profiler.profile_iterable(iterable, action):
pass
# we exclude the last item in the recorded durations since that's when StopIteration is raised
np.testing.assert_allclose(
simple_profiler.recorded_durations[action][:-1], expected, rtol=0.2
) | 1,798 |
def tokenize(text):
"""
Tokenize and normalize
"""
tokens = nltk.word_tokenize(text)
lemmatizer = nltk.WordNetLemmatizer()
clean_tokens = [lemmatizer.lemmatize(w).lower().strip() for w in tokens]
return clean_tokens | 1,799 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.