content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def nodal_distribution_factors_v2(topo: ndarray, volumes: ndarray):
"""The j-th factor of the i-th row is the contribution of
element i to the j-th node. Assumes a regular topology."""
ndf = nodal_distribution_factors(topo, volumes)
return ndf | 1,500 |
def start(update: Update, context: CallbackContext):
"""Send a message when the command /start is issued."""
update.message.reply_text("Hi. Please create or choose task list from existing.") | 1,501 |
def game_core_binary(number_to_guess):
"""Binary search approach.
Set the first predict value as the middle of interval, i.e. 50.
Then decrease or increase the predict number by step.
The step is calculated using the check interval divided by 2,
i.e. 25, 13 ... 1
The minimum step is always 1.
The function return count of guesses"""
count_guesses = 1
predict = step = round(MAX_NUMBER / 2)
while number_to_guess != predict:
count_guesses += 1
step = round(step / 2) if step > 1 else 1
if number_to_guess > predict:
predict += step
elif number_to_guess < predict:
predict -= step
return count_guesses | 1,502 |
def linear_trend(series, return_line=True):
"""
USAGE
-----
line = linear_trend(series, return_line=True)
OR
b, a, x = linear_trend(series, return_line=False)
Returns the linear fit (line = b*x + a) associated
with the 'series' array.
Adapted from pylab.detrend_linear.
"""
series = np.asanyarray(series)
x = np.arange(series.size, dtype=np.float_)
C = np.cov(x, series, bias=1) # Covariance matrix.
b = C[0, 1]/C[0, 0] # Angular coefficient.
a = series.mean() - b*x.mean() # Linear coefficient.
line = b*x + a
if return_line:
return line
else:
return b, a, x | 1,503 |
def gradcheck_wrapper_masked_operation(op, input, *args, **kwargs):
"""Gradcheck wrapper for masked operations.
When mask is specified, replaces masked-out elements with zeros.
Use for operations that produce non-finite masked-out elements,
for instance, for minimum and maximum reductions.
"""
output = op(input, *args, **kwargs)
mask = kwargs.get('mask')
if mask is not None:
output_mask = torch._masked._output_mask(op, input, *args, **kwargs)
output = torch.where(output_mask, output, output.new_zeros([]))
return output | 1,504 |
def classify(neural_net, image_file):
"""
Using the given model and image file, returns the model's prediction
for the image as an array.
"""
img = Image.open(image_file)
img.load()
img_array = np.asarray(img)
img_array.shape = (1, 100, 100, 3)
prediction = model.predict(img_array)[0][0]
return prediction | 1,505 |
def delete(job):
"""Delete a job."""
# Initialise variables.
jobid = job["jobid"]
try:
shellout = shellwrappers.sendtossh(job, ["qdel " + jobid])
except exceptions.SSHError:
raise exceptions.JobdeleteError("Unable to delete job.")
return shellout[0] | 1,506 |
def regexp_span_tokenize(s, regexp):
"""
Identify the tokens in the string, as defined by the token
delimiter regexp, and generate (start, end) offsets.
@param s: the string to be tokenized
@type s: C{str}
@param regexp: the token separator regexp
@type regexp: C{str}
@rtype: C{iter} of C{tuple} of C{int}
"""
left = 0
for m in finditer(regexp, s):
right, next = m.span()
if right != 0:
yield left, right
left = next
yield left, len(s) | 1,507 |
def optimum_simrank(x_p, x_n, alpha):
"""Intermediary function to the one below."""
pos_pair_1 = itertools.combinations(x_p, 2)
pos_pair_2 = itertools.combinations(x_n, 2)
neg_pair = itertools.product(x_p, x_n)
def get_val_from_pair(x):
# Transforms each pair into one minus the minimum of its l1 distance to (0,0) or (1,1).
distance_to_lower_corner = max(abs(x[0]), abs(x[1]))
distance_to_upper_corner = max(abs(1. - x[0]), abs(1. - x[1]))
return 1 - min(distance_to_lower_corner, distance_to_upper_corner)
x_p = (np.array(list(map(get_val_from_pair, pos_pair_1))
+ list(map(get_val_from_pair, pos_pair_2))))
x_n = np.array(list(map(get_val_from_pair, neg_pair)))
def opt_fun(i_p, i_n):
if float(i_n) / x_n.shape[0] <= alpha:
return i_p / x_p.shape[0]
return - float("inf")
X = np.hstack([x_p, x_n])
Y = np.array([+1]*len(x_p) + [-1]*len(x_n))
f_opt, crit_opt, _ = ut.bipart_partition(X, Y, opt_fun)
return 1-f_opt, crit_opt | 1,508 |
def imports(operators=None, union=True):
""" Lazily imports and returns an enaml imports context.
Parameters
----------
operators : dict, optional
An optional dictionary of operators to push onto the operator
stack for the duration of the import context. If this is not
provided, the default Enaml operators will be used. Unless a
custom model framework is being used (i.e. not Atom), custom
operators will typically not be needed.
union : bool, optional
Whether to union the operators with the operators on the top
of the operator stack. The default is True and is typically
the correct choice to allow overriding a subset of the default
Enaml operators.
Returns
-------
result : context manager
A context manager which will install the Enaml import hook
(and optional operators) for the duration of the context.
"""
from enaml.core.import_hooks import imports
if operators is None:
return imports()
from contextlib import contextmanager
from enaml.core.operators import operator_context
@contextmanager
def imports_context():
with imports():
with operator_context(operators, union):
yield
return imports_context() | 1,509 |
def create_capture_database(capture, configfile=None):
"""Create 1 database with 2 tables:
- DoC table for coverage data
- Annotation table with gene-target info.
"""
if configfile is None:
configfile = os.path.join(SCRIPTDIR, 'config.py')
config = get_config_dict(configfile)
annotbed = get_annot_bedlocation(capture, pipelinedir=config['pipelinedir'])
dfannot = annotbed_to_df(annotbed)
DB = Databases(capture)
DB.create_annot_table(dfannot)
DB.create_doc_table()
return | 1,510 |
def apply_job_security(code):
"""Treat input `code` like Python 2 (implicit strings are byte literals).
The implementation is horribly inefficient but the goal is to be compatible
with what Mercurial does at runtime.
"""
buf = io.BytesIO(code.encode("utf8"))
tokens = tokenize.tokenize(buf.readline)
# NOTE: by setting the fullname to `mercurial.pycompat` below, we're
# ensuring that hg-specific pycompat imports aren't inserted to the code.
data = tokenize.untokenize(replacetokens(list(tokens), "mercurial.pycompat"))
return cast(str, data.decode("utf8")) | 1,511 |
def fetch(gpname: str):
""""
Gives gunpowder
Parameters
----------
gpname: str
Gunpowder name
Returns
-------
gpowder: dict
Gunpowder in dictionary form
"""
gpowders = _load_many()
return gpowders[gpname] | 1,512 |
def _kc_frequency_features(time_data, times, sfreq):
""" Calculate absolute power of delta and alpha band before (on a 3 seconds
windows) and after K-complexes"""
exp = [('before', -2.5, -0.5), ('after', 1, 3)]
res = {}
for m in exp:
kc_matrix_temp = time_data[:, np.bitwise_and(times > m[1], times < m[2])]
absol_power = compute_absol_pow_freq_bands(sfreq, kc_matrix_temp, psd_method='multitaper',
psd_params={'mt_adaptive': True, 'mt_bandwidth': 3,
'mt_low_bias': True},
freq_bands=[0.5, 4, 8, 12])
delta = absol_power[:, 0]
alpha = absol_power[:, 2]
res[m[0]] = (delta, alpha)
delta_before, alpha_before, delta_after, alpha_after = res['before'][0], res['before'][1],\
res['after'][0], res['after'][1]
return delta_before, alpha_before, delta_after, alpha_after | 1,513 |
def test_atomic_integer_total_digits_4_nistxml_sv_iv_atomic_integer_total_digits_5_5(mode, save_output, output_format):
"""
Type atomic/integer is restricted by facet totalDigits with value 18.
"""
assert_bindings(
schema="nistData/atomic/integer/Schema+Instance/NISTSchema-SV-IV-atomic-integer-totalDigits-5.xsd",
instance="nistData/atomic/integer/Schema+Instance/NISTXML-SV-IV-atomic-integer-totalDigits-5-5.xml",
class_name="NistschemaSvIvAtomicIntegerTotalDigits5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 1,514 |
async def CommandProfile(
ctx: SlashContext, user: Union[InteractionMember, UserImpl]
) -> None:
"""Handler for the /profile slash command."""
if hasattr(user, "user"):
try:
user.user = await ctx.rest.fetch_user(user.id)
except Exception as e:
logger.warning(
f"Failed to fetch user {Responses.ExpandUser(user.id, False)}, {e}"
)
fields: List[Dict[str, Any]] = []
altAvatar: Optional[str] = None
accent: Optional[str] = None
if hasattr(user, "nickname"):
if (nickname := user.nickname) is not None:
fields.append({"name": "Nickname", "value": nickname})
if hasattr(user, "created_at"):
if (created := user.created_at) is not None:
fields.append({"name": "Created", "value": Timestamps.Relative(created)})
if hasattr(user, "joined_at"):
if (joined := user.joined_at) is not None:
fields.append({"name": "Joined", "value": Timestamps.Relative(joined)})
if hasattr(user, "premium_since"):
if (booster := user.premium_since) is not None:
fields.append(
{
"name": "Nitro Booster",
"value": f"Since {Timestamps.Relative(booster)}",
}
)
if hasattr(user, "communication_disabled_until"):
if (timeout := user.communication_disabled_until()) is not None:
fields.append(
{"name": "Timed Out", "value": f"Until {Timestamps.Relative(timeout)}"}
)
if hasattr(user, "is_pending"):
if user.is_pending:
fields.append({"name": "Passed Screening", "value": "No"})
if hasattr(user, "is_mute"):
if user.is_mute:
fields.append({"name": "Muted", "value": "Yes"})
if hasattr(user, "is_deaf"):
if user.is_deaf:
fields.append({"name": "Deafened", "value": "Yes"})
if hasattr(user, "guild_avatar_url"):
if (url := user.guild_avatar_url) is not None:
altAvatar = url
if hasattr(user, "accent_color"):
if (color := user.accent_color) is not None:
accent = str(color).replace("#", "")
result: Embed = Responses.Success(
color=accent,
fields=fields,
author=f"{user.username}#{user.discriminator}",
authorIcon=altAvatar,
thumbnail=user.default_avatar_url
if (avatar := user.avatar_url) is None
else avatar,
image=None if not hasattr(user, "user") else user.user.banner_url,
footer=user.id,
timestamp=None if created is None else created.astimezone(),
)
await ctx.respond(embed=result) | 1,515 |
def gentrends(x, window=1/3.0, charts=True):
"""
Returns a Pandas dataframe with support and resistance lines.
:param x: One-dimensional data set
:param window: How long the trendlines should be. If window < 1, then it
will be taken as a percentage of the size of the data
:param charts: Boolean value saying whether to print chart to screen
"""
import numpy as np
import pandas.io.data as pd
x = np.array(x)
if window < 1:
window = int(window * len(x))
max1 = np.where(x == max(x))[0][0] # find the index of the abs max
min1 = np.where(x == min(x))[0][0] # find the index of the abs min
# First the max
if max1 + window > len(x):
max2 = max(x[0:(max1 - window)])
else:
max2 = max(x[(max1 + window):])
# Now the min
if min1 - window < 0:
min2 = min(x[(min1 + window):])
else:
min2 = min(x[0:(min1 - window)])
# Now find the indices of the secondary extrema
max2 = np.where(x == max2)[0][0] # find the index of the 2nd max
min2 = np.where(x == min2)[0][0] # find the index of the 2nd min
# Create & extend the lines
maxslope = (x[max1] - x[max2]) / (max1 - max2) # slope between max points
minslope = (x[min1] - x[min2]) / (min1 - min2) # slope between min points
a_max = x[max1] - (maxslope * max1) # y-intercept for max trendline
a_min = x[min1] - (minslope * min1) # y-intercept for min trendline
b_max = x[max1] + (maxslope * (len(x) - max1)) # extend to last data pt
b_min = x[min1] + (minslope * (len(x) - min1)) # extend to last data point
maxline = np.linspace(a_max, b_max, len(x)) # Y values between max's
minline = np.linspace(a_min, b_min, len(x)) # Y values between min's
# OUTPUT
trends = np.transpose(np.array((x, maxline, minline)))
trends = pd.DataFrame(trends, index=np.arange(0, len(x)),
columns=['Data', 'Max Line', 'Min Line'])
if charts is True:
from matplotlib.pyplot import plot, grid, show, figure
figure()
plot(trends)
grid()
show()
return trends, maxslope, minslope | 1,516 |
def test_clean_slug_exists(mock_super_clean):
"""
Cleaning an instance that already has a slug should not replace the
slug.
"""
name = "foo"
slug = "bar-baz"
inst = models.SlugModel(slug=slug)
setattr(inst, models.SlugModel.SLUG_SOURCE, name)
new_slug = "foo-bar"
with mock.patch.object(
inst, "generate_slug", return_value=new_slug
) as mock_gen_slug:
inst.clean()
assert mock_super_clean.call_count == 1
assert inst.slug == slug
assert mock_gen_slug.call_count == 0 | 1,517 |
def gen_cand_keyword_scores(phrase_words, word_score):
"""
Computes the score for the input phrases.
:param phrase_words: phrases to score
:type phrase_words: list
:param word_score: calculated word scores
:type word_score: list
:return: dict *{phrase: score, ...}*
"""
keyword_candidates = defaultdict(int)
for phrase, word_list in phrase_words:
if not word_list:
continue
candidate_score = functools.reduce(
add, [word_score[word] for word in word_list]
)
keyword_candidates[phrase] = candidate_score
return keyword_candidates | 1,518 |
def analyze_network(directed=False, base_url=DEFAULT_BASE_URL):
"""Calculate various network statistics.
The results are added to the Node and Edge tables and the Results Panel.
The summary statistics in the Results Panel are also returned by the function
as a list of named values.
Args:
directed (bool): If True, the network is considered a directed graph. Default is False.
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://127.0.0.1:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
dict: Named list of summary statistics
Raises:
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> analyze_network()
{'networkTitle': 'galFiltered.sif (undirected)', 'nodeCount': '330', 'edgeCount': '359', 'avNeighbors': '2.379032258064516', 'diameter': '27', 'radius': '14', 'avSpl': '9.127660963823953', 'cc': '0.06959203036053131', 'density': '0.009631709546819902', 'heterogeneity': '0.8534500004035027', 'centralization': '0.06375695335900727', 'ncc': '26'}
>>> analyze_network(True)
{'networkTitle': 'galFiltered.sif (directed)', 'nodeCount': '330', 'edgeCount': '359', 'avNeighbors': '2.16969696969697', 'diameter': '10', 'radius': '1', 'avSpl': '3.4919830756382395', 'cc': '0.03544266191325015', 'density': '0.003297411808050106', 'ncc': '26', 'mnp': '1', 'nsl': '0'}
"""
res = commands.commands_post(f'analyzer analyze directed={directed}', base_url=base_url)
return res | 1,519 |
def create_user():
"""
Create new user
"""
# request.get_json(): extract the JSON from the request and return it as
# a Python structure.
data = request.get_json() or {}
# Validate mandatory fields
if 'username' not in data or 'email' not in data or \
'password' not in data:
return bad_request('must include username, email and password fields')
if User.query.filter_by(username=data['username']).first():
return bad_request('please use a different username')
if User.query.filter_by(email=data['email']).first():
return bad_request('please use a different email address')
# Create user
user = User()
user.from_dict(data, new_user=True)
db.session.add(user)
db.session.commit()
# Make response
response = jsonify(user.to_dict())
# The status code for a POST request that creates a resource should be 201
response.status_code = 201
response.headers['Location'] = url_for('api.get_user', id=user.id)
return response | 1,520 |
def hash_long_to_short(long_url):
"""
turn a long input url into a short url's url-safe 5 character hash
this is deterministic and the same long_url will always have the same hash
"""
encoded = long_url.encode("utf-8")
md5_hash = hashlib.md5(encoded).digest()
return base64.urlsafe_b64encode(md5_hash)[:SHORT_URL_HASH_LENGTH] | 1,521 |
def transform_asset_time_fields_recursive(asset):
"""
Transforms the asset time fields recursively
"""
if isinstance(asset, list):
for sub_asset_object in asset:
transform_asset_time_fields_recursive(sub_asset_object)
if isinstance(asset, dict):
for k, v in asset.items():
if isinstance(v, (list, dict)):
transform_asset_time_fields_recursive(v)
elif k in ASSET_TIME_FIELDS and v:
asset[k] = epoch_to_iso(v) | 1,522 |
def cutout_vstAtlas(ra, dec, bands=["u","g","r","i","z"], database="ATLASDR3",\
psfmags=None, imDir="/data/vst-atlas/", input_filename=[], saveFITS=False,\
width_as=20., smooth=False, cmap="binary", minmax="MAD", origin="lower", figTitle=True, \
return_val=False, saveDir=None):
"""
Plot all the bands cutouts on one plot for an input source position
## Cutouts parameters
width_as: size of the cutout box; default is 20arcsec
smooth: gaussian smoothing with sigma=1.0; defaul is False
cmap: image colour map
minmax: Defined the min-max scale of the image; default is from sigma_MAD(image) (SEE def cutout_scale)
origin: where to place the [0,0] index of the image; default is "lower"
figTitle: add a title to the final figure (ex: VISTA cutout 20"x20" ra=, dec= (Jradec); default is True)
## VISTA parameters
ra, dec: position of the source in deg (single object, not an array)
bands: filters for which to do the cutouts
psfmags: magnitudes of the source. Should be an array of the same size than bands or None (default)
Will be added to band cutout title if not None
imDir: directory of the fits file if already save on disk
input_filename: name of the input file if save on disk
database: ATLAS database used = ATLAS + DataRealease
saveFITS: save fits tile file on disk (to imDir)
## Output parameters
return_val: return image data, min-max(image); default is False
saveDir: output directory to save the final figure. If None do not save; default is None
"""
print("VST-ATLAS cutout(s), band(s):", "".join(bands))
### radec: HHMMSSsDDMMSS
radec_str = radecStr(ra, dec, precision=1)
### Figure: defined fig and gs
figWidth = len(bands) * 8./3.
fig = plt.figure(figsize=(figWidth, 4))
fig.subplots_adjust(left = 0.05, right = 0.95, top = 0.90, bottom = 0, wspace = 0)
gs = gridspec.GridSpec(1, len(bands))
datas = []
for i, band in enumerate(bands):
print("{}-band".format(band))
### Filename of fits image if save of the disk
if len(input_filename) == 0:
input_filename = ""
else:
input_filename = input_filename[i]
filename = imDir + input_filename
### If filename does nor exists -> get file from url
if not os.path.exists(filename) or input_filename == "":
filename = cdl.vstAtlas_dl(ra, dec, band, database=database, width_as=width_as,\
FitsOutputPath=imDir, saveFITS=saveFITS)
print(" ", filename)
### Read fits file: cutout size = width_as
### filename could be a system path or an url or ""
print(" Try to read the fits file ...")
image,wcs = rd_fits(filename, ra, dec, hdrNum=1, width_as=width_as, pixelscale=0.21, smooth=smooth)
### Plot image: cutout size = width_as
print(" Plot the cutout ...")
ax = fig.add_subplot(gs[0,i])
if psfmags is not None:
psfmags = psfmags[i]
vmin, vmax = plt_image(band, image, fig, ax, psfmags=psfmags, cmap=cmap, minmax=minmax, origin=origin)
datas.append((image, vmin, vmax, wcs))
## Add a title to the figure
if figTitle:
fig.suptitle('VST-ATLAS cutouts ({:.0f}"x{:.0f}") \n ra: {:.4f}, dec: {:.4f} (J{})'.format(width_as, width_as,\
ra, dec, radec_str), fontsize=15)
### Output
if return_val:
print(" Return image data")
plt.close(fig)
return datas
if saveDir is not None:
print(" Save the figure to", saveDir)
allBands = "".join(bands)
plt.savefig(saveDir + "Cutouts_VISTA-{}_{}_{}_{:.0f}arcsec.png".format(survey, radec_str, allBands, width_as),\
bbox_inches="tight")
plt.close()
else:
print(" Return the figure")
return fig | 1,523 |
def has_prefix(sub_s):
"""
Test possibility of sub_s before doing recursion.
:param sub_s: sub_string of input word from its head.
:return: (boolean) whether word stars with sub_s.
"""
for word in DATABASE:
if word.startswith(sub_s):
return True | 1,524 |
def transform_results(search_result, user, department_filters):
"""
Transform podcast and podcast episode, and userlist and learning path in aggregations
Add 'is_favorite' and 'lists' fields to the '_source' attributes for learning resources.
Args:
search_result (dict): The results from ElasticSearch
user (User): the user who performed the search
Returns:
dict: The Elasticsearch response dict with transformed aggregates and source values
"""
for aggregation_key in [
"type",
"topics",
"offered_by",
"audience",
"certification",
"department_name",
"level",
"course_feature_tags",
"resource_type",
]:
if f"agg_filter_{aggregation_key}" in search_result.get("aggregations", {}):
if aggregation_key == "level":
levels = (
search_result.get("aggregations", {})
.get(f"agg_filter_{aggregation_key}", {})
.get("level", {})
.get("level", {})
)
if levels:
search_result["aggregations"]["level"] = {
"buckets": [
{
"key": bucket["key"],
"doc_count": bucket["courses"]["doc_count"],
}
for bucket in levels.get("buckets", [])
if bucket["courses"]["doc_count"] > 0
]
}
else:
search_result["aggregations"][aggregation_key] = search_result[
"aggregations"
][f"agg_filter_{aggregation_key}"][aggregation_key]
search_result["aggregations"].pop(f"agg_filter_{aggregation_key}")
types = search_result.get("aggregations", {}).get("type", {})
if types:
type_merges = dict(
zip(
(PODCAST_EPISODE_TYPE, LEARNING_PATH_TYPE),
(PODCAST_TYPE, USER_LIST_TYPE),
)
)
for child_type, parent_type in type_merges.items():
child_type_bucket = None
parent_type_bucket = None
for type_bucket in search_result["aggregations"]["type"]["buckets"]:
if type_bucket["key"] == child_type:
child_type_bucket = type_bucket
elif type_bucket["key"] == parent_type:
parent_type_bucket = type_bucket
if child_type_bucket and parent_type_bucket:
parent_type_bucket["doc_count"] = (
child_type_bucket["doc_count"] + parent_type_bucket["doc_count"]
)
search_result["aggregations"]["type"]["buckets"].remove(
child_type_bucket
)
elif child_type_bucket:
child_type_bucket["key"] = parent_type
search_result["aggregations"]["type"]["buckets"].sort(
key=lambda bucket: bucket["doc_count"], reverse=True
)
if not user.is_anonymous:
favorites = (
FavoriteItem.objects.select_related("content_type")
.filter(user=user)
.values_list("content_type__model", "object_id")
)
for hit in search_result.get("hits", {}).get("hits", []):
object_type = hit["_source"]["object_type"]
if object_type in LEARNING_RESOURCE_TYPES:
if object_type == LEARNING_PATH_TYPE:
object_type = USER_LIST_TYPE
object_id = hit["_source"]["id"]
hit["_source"]["is_favorite"] = (object_type, object_id) in favorites
hit["_source"]["lists"] = get_list_items_by_resource(
user, object_type, object_id
)
search_result = _transform_search_results_suggest(search_result)
if len(department_filters) > 0:
_transform_search_results_coursenum(search_result, department_filters)
return search_result | 1,525 |
def extract_rest_proxy_info(event):
"""Extract REST task proxy info"""
data = event.data
use_proxy = data.get('use_proxy')
if use_proxy and not data.get('proxy_server'):
event.form.widgets.errors += (Invalid(_("Proxy access defined without proxy server!")), ) | 1,526 |
def kl_div_loss(inputs: Tensor, targets: Tensor) -> Tensor:
"""Computes the Kullback–Leibler divergence loss between two probability distributions."""
return F.kl_div(F.log_softmax(inputs, dim=-1), F.softmax(targets, dim=-1), reduction="none") | 1,527 |
def get_schema_names(connection: psycopg2.extensions.connection) -> List[psycopg2.extras.RealDictRow]:
"""Function for getting the schema information from the given connection
:param psycopg2.extensions.connection connection: The connection
:return: List of rows using key-value pairs for the data
:rtype: List[psycopg2.extras.RealDictRow]
"""
with connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cursor:
query = """SELECT *
FROM information_schema.schemata"""
cursor.execute(query)
results = cursor.fetchall()
return results | 1,528 |
def parse(string):
"""Returns a list of specs from an input string.
For creating one spec, see Spec() constructor.
"""
return SpecParser().parse(string) | 1,529 |
def get_provider_idx(provider_type):
"""Return the index associated to the type.
"""
try:
return PROVIDERS_TYPE[provider_type]['idx']
except KeyError as error:
raise ProviderError(
"Provider type (%s) is not supported yet." % (provider_type, )
) | 1,530 |
def file_based_convert_examples_to_features(
examples, slot_label_list, intent_label_list, max_seq_length, tokenizer, output_file):
"""
将InputExamples转成tf_record,并写入文件
Convert a set of InputExample to a TFRecord file.
:param examples: [(text, CRF_label, class_label), ...]
:param slot_label_list: CRF标签列表(String)
:param intent_label_list: 触发词类别列表(String)
:param max_seq_length:
:param tokenizer:
:param output_file: TFRecord file
:return:
"""
writer = tf.io.TFRecordWriter(output_file)
for ex_index, example in enumerate(examples):
def create_int_feature(values):
return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
if ex_index % 10000 == 0:
logger.info("Writing example %d of length %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, slot_label_list, intent_label_list,
max_seq_length, tokenizer)
# convert to tensorflow format
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["slot_ids"] = create_int_feature(feature.slot_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features['is_value_ids'] = create_int_feature(feature.is_value_ids)
features["is_real_example"] = create_int_feature([int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString()) # 写入一个样本到tf_record
writer.close() | 1,531 |
def create_readme(df):
"""Retrieve text from README.md and update it."""
readme = str
categories = pd.unique(df["category"])
categories.sort()
with open('README.md', 'r', encoding='utf-8') as read_me_file:
read_me = read_me_file.read()
splits = read_me.split('<!---->')
# Initial project description
text_intro = splits[0]
# Contribution and contacts
text_contributing = splits[3]
text_contacts = splits[4]
# TOC
toc = "\n\n- [Awesome Citizen Science Projects](#awesome-citizen-science-projects)\n"
# Add categories
for cat in range(len(categories)):
toc += f" - [{categories[cat]}](#{categories[cat]})" + "\n"
# Add contributing and contact to TOC
toc += "- [Contributing guidelines](#contributing-guidelines)\n"
toc += "- [Contacts](#contacts)\n"
# Add first part and toc to README
readme = text_intro + "<!---->" + toc + "\n<!---->\n"
# Add projects subtitle
readme += "\n## Projects\n"
# Add individual categories to README
list_blocks = ""
for cat in range(len(categories)):
block = f"\n### {categories[cat]}\n\n"
filtered = df[df["category"] == categories[cat]]
list_items = ""
for i, r in filtered.iterrows():
try:
start_date = int(r['start_date'])
except:
start_date = "NA"
if not pd.isna(r['icon']):
project = f"- {r['icon']} [{r['name']}]({r['main_source']}) - {r['description']} (`{start_date}` - `{str(r['end_date'])}`)\n"
list_items = list_items + project
else:
project = f"- [{r['name']}]({r['main_source']}) - {r['description']} (`{start_date}` - `{str(r['end_date'])}`)\n"
list_items = list_items + project
list_blocks = list_blocks + block + list_items
# Add to categories to README.md
readme += list_blocks + "\n"
# Add contribution and contacts
readme += '<!---->' + text_contributing
readme += '<!---->' + text_contacts
return readme | 1,532 |
async def get_leaderboard_info_by_id(
# ScoreSaber leaderboardId
leaderboardId: float
):
"""
GET /api/leaderboard/by-id/{leaderboardId}/info
"""
# request
request_url = f'{SERVER}/api/leaderboard/by-id/{leaderboardId}/info'
response_dict = await request.get(request_url)
return LeaderboardInfo.gen(response_dict) | 1,533 |
async def check(app_name: str = "all"):
"""
Lists all migrations which have and haven't ran.
:param app_name:
The name of the app to check. Specify a value of 'all' to check
the migrations for all apps.
"""
await CheckMigrationManager(app_name=app_name).run() | 1,534 |
def distance(lat1,lon1,lat2,lon2):
"""Input 2 points in Lat/Lon degrees.
Calculates the great circle distance between them in radians
"""
rlat1= radians(lat1)
rlon1= radians(lon1)
rlat2= radians(lat2)
rlon2= radians(lon2)
dlat = rlat1 - rlat2
dlon = rlon1 - rlon2
a = pow(sin(dlat/2.0),2) + cos(rlat1)*cos(rlat2)*pow(sin(dlon/2.0),2)
c = 2* atan2(sqrt(a), sqrt(1-a))
return c | 1,535 |
def minimumSwaps(arr):
"""
O(nlogn)
"""
len_arr = len(arr)
arr_dict = {key+1:value for key, value in enumerate(arr)}
arr_checked = [False]*len_arr
total_count = 0
for key, value in arr_dict.items():
count = 0
while key != value and arr_checked[key-1] is False:
arr_checked[value-1] = True
count += 1
value = arr_dict.get(value)
arr_checked[key-1] = True
total_count += count
return total_count | 1,536 |
def store_user_bot(user_id, intended_user, bot_id):
"""Store an uploaded bot in object storage."""
if user_id != intended_user:
raise api_util.user_mismatch_error(
message="Cannot upload bot for another user.")
if bot_id != 0:
raise util.APIError(
400, message="Sorry, only one bot allowed per user.")
uploaded_file = validate_bot_submission()
with model.engine.connect() as conn:
team = conn.execute(model.team_leader_query(user_id)).first()
if team:
user_id = intended_user = team["leader_id"]
bot_where_clause = (model.bots.c.user_id == user_id) & \
(model.bots.c.id == bot_id)
bot = conn.execute(model.bots.select(bot_where_clause)).first()
if not bot:
raise util.APIError(404, message="Bot not found.")
# Check if the user already has a bot compiling
if bot["compile_status"] == model.CompileStatus.IN_PROGRESS.value:
raise util.APIError(400, message="Cannot upload new bot until "
"previous one is compiled.")
blob = gcloud_storage.Blob("{}_{}".format(user_id, bot_id),
model.get_compilation_bucket(),
chunk_size=262144)
blob.upload_from_file(uploaded_file)
# Flag the user as compiling
update = model.bots.update() \
.where(bot_where_clause) \
.values(
compile_status=model.CompileStatus.UPLOADED.value,
update_time=sqlalchemy.sql.func.now(),
timeout_sent=False,
)
conn.execute(update)
return util.response_success({
"user_id": user_id,
"bot_id": bot["id"],
}) | 1,537 |
def dispatch(methods, request, notification_errors=False):
"""Dispatch JSON-RPC requests to a list of methods::
r = dispatch([cat], {'jsonrpc': '2.0', 'method': 'cat', 'id': 1})
The first parameter can be either:
- A *list* of functions, each identifiable by its ``__name__`` attribute.
- Or a *dictionary* of name:method pairs.
When using a **list**, the methods must be identifiable by a ``__name__``
attribute.
Functions already have a ``__name__`` attribute::
>>> def cat():
... return 'meow'
...
>>> cat.__name__
'cat'
>>> dispatch([cat], ...)
Lambdas require setting it::
>>> cat = lambda: 'meow'
>>> cat.__name__ = 'cat'
>>> dispatch([cat], ...)
As do partials::
>>> max_ten = partial(min, 10)
>>> max_ten.__name__ = 'max_ten'
>>> dispatch([max_ten], ...)
Alternatively, consider using a **dictionary** instead::
>>> dispatch({'cat': cat, 'max_ten': max_ten}, ...)
See the `Methods`_ module for another easy way to build the list of methods.
:param methods: List or dict of methods to dispatch to.
:param request:
JSON-RPC request. This can be in dict or string form. Byte arrays
should be `decoded
<https://docs.python.org/3/library/codecs.html#codecs.decode>`_ first.
:param notification_errors:
Should `notifications
<http://www.jsonrpc.org/specification#notification>`_ get error
responses? Typically notifications don't receive any response, except
for "Parse error" and "Invalid request" errors. Enabling this will
include all other errors such as "Method not found". A notification is
then similar to many unix commands - *"There was no response, so I can
assume the request was successful."*
:returns: A `Response`_ object - either `RequestResponse`_,
`NotificationResponse`_, or `ErrorResponse`_ if there was a
problem processing the request. In any case, the return value
gives you ``body``, ``body_debug``, ``json``, ``json_debug``, and
``http_status`` values.
"""
# Process the request
r = None
error = None
try:
# Log the request
request_log.info(str(request))
# Create request object (also validates the request)
r = Request(request)
# Call the requested method
result = _call(methods, r.method_name, r.args, r.kwargs)
# Catch any JsonRpcServerError raised (Invalid Request, etc)
except JsonRpcServerError as e:
error = e
# Catch uncaught exceptions, respond with ServerError
except Exception as e: # pylint: disable=broad-except
# Log the uncaught exception
logger.exception(e)
# Create an exception object, used to build the response
error = ServerError(str(e))
# Now build a response.
# Error
if error:
# Notifications get a non-response - see spec
if r and r.is_notification and not notification_errors:
response = NotificationResponse()
else:
# Get the 'id' part of the request, to include in error response
request_id = r.request_id if r else None
response = ErrorResponse(
error.http_status, request_id, error.code, error.message,
error.data)
# Success
else:
# Notifications get a non-response
if r and r.is_notification:
response = NotificationResponse()
else:
response = RequestResponse(r.request_id, result)
# Log the response and return it
response_log.info(response.body, extra={
'http_code': response.http_status,
'http_reason': HTTP_STATUS_CODES[response.http_status]})
return response | 1,538 |
async def test_purehotcoollink_component_setup_only_once(devices, login, hass):
"""Test if entities are created only once."""
config = _get_config()
await async_setup_component(hass, DYSON_DOMAIN, config)
await hass.async_block_till_done()
entity_ids = hass.states.async_entity_ids(DOMAIN)
assert len(entity_ids) == 1
state = hass.states.get(entity_ids[0])
assert state.name == "Temp Name" | 1,539 |
def parse_conv(weights_file, cfg_parser, section, layer_dict):
""" parse conv layer
Args:
weights_file (file object): file object of .weights file
cfg_parser (ConfigParser object): ConfigParser object of .cfg file for net
section (str): name of conv layer
layer_dict (dictionary): dict storing layer info
Returns:
dict storing layer info and weights values
"""
prev_layer_channel = layer_dict['prev_layer_channel']
count = layer_dict['count']
filters = int(cfg_parser[section]['filters'])
size = int(cfg_parser[section]['size'])
stride = int(cfg_parser[section]['stride'])
pad = int(cfg_parser[section]['pad'])
activation = cfg_parser[section]['activation']
batch_normalize = 'batch_normalize' in cfg_parser[section]
weights_shape = (size, size, prev_layer_channel, filters)
darknet_w_shape = (filters, weights_shape[2], size, size)
weights_size = np.product(weights_shape)
prev_layer_channel = filters
print('conv2d', 'bn'
if batch_normalize else ' ', activation, weights_shape)
bn_weight_list = []
conv_bias = []
if batch_normalize:
bn_weights = np.ndarray(
shape=(4, filters),
dtype='float32',
buffer=weights_file.read(filters * 16))
count += 4 * filters
bn_weight_list = [
bn_weights[1], # scale gamma
bn_weights[0], # shift beta
bn_weights[2], # running mean
bn_weights[3] # running var
]
else:
conv_bias = np.ndarray(
shape=(filters, ),
dtype='float32',
buffer=weights_file.read(filters * 4))
count += filters
conv_weights = np.ndarray(
shape=darknet_w_shape,
dtype='float32',
buffer=weights_file.read(weights_size * 4))
count += weights_size
# DarkNet conv_weights are serialized Caffe-style:
# (out_dim, in_dim, height, width)
# We would like to set these to Tensorflow order:
# (height, width, in_dim, out_dim)
conv_weights = np.transpose(conv_weights, [2, 3, 1, 0])
layer_dict['prev_layer_channel'] = prev_layer_channel
layer_dict['count'] = count
layer_dict['conv_weights'] = conv_weights
layer_dict['conv_bias'] = conv_bias
layer_dict['bn_weight_list'] = bn_weight_list
return layer_dict | 1,540 |
def generate_format_spec(num_vals, sep, dtypes, decimals=None):
"""
Generate a format specifier for generic input.
--------------------------------------------------------------
Input
num_vals : number of wild-cards
sep : separator string (could be '_', '-', '--' ...)
used to separate wild-cards
dtypes : data types of the wildcards ('str', 'float', 'int')
decimals : number of decimals (only relevant for floats)
--------------------------------------------------------------
Output
String of the form: "{0:<dtype>}<sep>{1:<dtype>}<sep>...",
where each occurrence of <dtype> is replaced by the dtype value of
the current wild-card and <sep> is replaced by the separator string.
"""
assert type(num_vals) is int
# dictionary of identifiers for supported data types
dident = dict([(str, 's'),
(int, 'd'), \
(float, ''), #'.1f'\
(np.float64, '') #'.1f'
]
)
if decimals is not None:
assert type(decimals) is int
dident[float] = '.{}f'.format(decimals)
dident[np.float64] = '.{}f'.format(decimals)
if not hasattr(dtypes, '__iter__'):
dtypes = [dtypes,] * num_vals
elif type(dtypes) is str:
dtypes = [dtypes,] * num_vals
elif len(dtypes) < num_vals:
dtypes = [dtypes[0],] * num_vals
for dt in dtypes:
assert dt in dident.keys(), dt
# construct actual output
out = ""
for i in range(num_vals):
out += "{" + str(i) + ":" + dident[dtypes[i]] + "}"
out += sep
# remove additional separator from output
return out[:-len(sep)] | 1,541 |
def products_with_low_stock(threshold: Optional[int] = None):
"""Return queryset with stock lower than given threshold."""
if threshold is None:
threshold = settings.LOW_STOCK_THRESHOLD
stocks = (
Stock.objects.select_related("product_variant")
.values("product_variant__product_id", "warehouse_id")
.annotate(total_stock=Sum("quantity"))
)
return stocks.filter(total_stock__lte=threshold).distinct() | 1,542 |
def robust_topological_sort(deps):
"""
A topological sorting algorithm which is robust enough to handle cyclic graphs.
First, we bucket nodes into strongly connected components (we use Tarjan's linear algorithm for that).
Then, we topologically sort these buckets grouping sibling buckets into sets.
:param deps: a dictionary representing the dependencies between nodes
:return: groups of buckets (a bucket is a strongly connected component) sorted bottom-up
>>> deps1 = {'S':{'S','X', 'A'}, 'X':{'Y', 'B'}, 'Y':{'Z'}, 'Z':{'X'}, 'A':{'B'}, 'B':{}}
>>> expected = [frozenset({frozenset({'B'})}), frozenset({frozenset({'A'}), frozenset({'Y', 'X', 'Z'})}), frozenset({frozenset({'S'})})]
>>> order = robust_topological_sort(deps1)
>>> order == expected
True
"""
# correspondences between nodes and buckets (strongly connected components)
n2c = defaultdict(None)
components = tarjan(deps)
for i, component in enumerate(components):
for v in component:
n2c[v] = i
# find the dependencies between strongly connected components
cdeps = defaultdict(set)
for head, tail in deps.items():
hc = n2c[head]
for t in tail:
tc = n2c[t]
if hc != tc:
cdeps[hc].add(tc)
# topsort buckets and translate bucket ids back into nodes
return deque(frozenset(components[c] for c in group) for group in topological_sort(cdeps)) | 1,543 |
def deprecated(reason):
"""
This is a decorator which can be used to mark functions and classes
as deprecated. It will result in a warning being emitted
when the function is used.
From https://stackoverflow.com/a/40301488
"""
string_types = (type(b""), type(u""))
if isinstance(reason, string_types):
# The @deprecated is used with a 'reason'.
#
# .. code-block:: python
#
# @deprecated("please, use another function")
# def old_function(x, y):
# pass
def decorator(func1):
if inspect.isclass(func1):
fmt1 = "Call to deprecated class {name} ({reason})."
else:
fmt1 = "Call to deprecated function {name} ({reason})."
@functools.wraps(func1)
def new_func1(*args, **kwargs):
warnings.simplefilter("always", DeprecationWarning)
warnings.warn(
fmt1.format(name=func1.__name__, reason=reason),
category=DeprecationWarning,
stacklevel=2,
)
warnings.simplefilter("default", DeprecationWarning)
return func1(*args, **kwargs)
return new_func1
return decorator
elif inspect.isclass(reason) or inspect.isfunction(reason):
# The @deprecated is used without any 'reason'.
#
# .. code-block:: python
#
# @deprecated
# def old_function(x, y):
# pass
func2 = reason
if inspect.isclass(func2):
fmt2 = "Call to deprecated class {name}."
else:
fmt2 = "Call to deprecated function {name}."
@functools.wraps(func2)
def new_func2(*args, **kwargs):
warnings.simplefilter("always", DeprecationWarning)
warnings.warn(
fmt2.format(name=func2.__name__),
category=DeprecationWarning,
stacklevel=2,
)
warnings.simplefilter("default", DeprecationWarning)
return func2(*args, **kwargs)
return new_func2
else:
raise TypeError(repr(type(reason))) | 1,544 |
def rotate_tensor(l: torch.Tensor, n: int = 1) -> torch.Tensor:
"""Roate tensor by n positions to the right
Args:
l (torch.Tensor): input tensor
n (int, optional): positions to rotate. Defaults to 1.
Returns:
torch.Tensor: rotated tensor
"""
return torch.cat((l[n:], l[:n])) | 1,545 |
def test_apply_spatial1_onecolumn(c_or_python):
"""Test the code path for Python _apply_array_spatial1 with one column
"""
fqe.settings.use_accelerated_code = c_or_python
work = fqe_data.FqeData(2, 1, 3)
work.set_wfn(strategy='ones')
# dummy Hamiltonian with one nonzero column
h1 = numpy.asarray(
[[1.0 + 0.j, 0. + 0.j, 0.0 + 0.j], [0.0 + 0.j, 0.0 + 0.j, 0.0 + 0.j],
[1.0 + 0.j, 0.0 + 0.j, 0.0 + 0.j]],
dtype=numpy.complex128)
work.apply_inplace((h1,))
assert numpy.allclose(
work.coeff,
numpy.asarray(
[[2. + 0.j, 1. + 0.j, 2. + 0.j], [2. + 0.j, 1. + 0.j, 2. + 0.j],
[0. + 0.j, -1. + 0.j, 0. + 0.j]],
dtype=numpy.complex128)) | 1,546 |
def test_multiple_header_rows():
"""column names that indicate coordinates
are found even if buried in the middle of multiple header rows."""
tables = etree.XML(
"""<extracted-tables-set>
<pmcid>123</pmcid>
<extracted-table>
<table-id />
<table-label />
<transformed-table>
<table>
<thead>
<tr><td>Task 1</td></tr>
<tr><td>x, y, z</td></tr>
<tr><td>Something</td></tr>
</thead>
<tbody>
<tr><td>-10,-15,+68 </td></tr>
</tbody>
</table>
</transformed-table>
</extracted-table>
</extracted-tables-set>
"""
)
coords = _coordinates._extract_coordinates_from_article_tables(tables)
assert (
coords.loc[:, ["x", "y", "z"]].values.ravel() == [-10, -15, 68]
).all() | 1,547 |
def generate_all_fish(
n_fish,
n_replica_fish,
channel,
interaction,
k_coh,
k_ar,
alpha,
lim_neighbors,
weights = [1],
neighbor_weights=None,
fish_max_speeds=None,
clock_freqs=None,
verbose=False,
names=None
):
"""Generate both replica and regular fish
Arguments:
n_fish {int} -- Number of ideal fish to generate
n_replica_fish {int} -- Number of replica fish to generate
channel {Channel} -- Channel instance
interaction {Interaction} -- Interaction instance
k_coh {float} -- Parameter to Delight Fish
k_ar {float} -- Weighting of neighbors in Delight Fish
alpha {int} -- Goal distance from neighbor for Delight Fish
lim_neighbors {list} -- Tuple of min and max neighbors
weights {float|list} -- List of weights for replica fish learned function
neighbor_weight {float|list} -- List of neighbor weights
fish_max_speeds {float|list} -- List of max speeds
clock_freqs {int|list} -- List of clock speeds
names {list} -- List of names for your replica fish
"""
n = n_fish + n_replica_fish
if neighbor_weights is None:
neighbor_weights = [1.0] * n
elif not isinstance(neighbor_weights, list):
neighbor_weights = [neighbor_weights] * n
if fish_max_speeds is None:
fish_max_speeds = [1.0] * n
elif not isinstance(fish_max_speeds, list):
fish_max_speeds = [fish_max_speeds] * n
if clock_freqs is None:
clock_freqs = [1] * n
elif not isinstance(clock_freqs, list):
clock_freqs = [clock_freqs] * n
if names is None:
names = ['Unnamed'] * n
all_fish = []
for i in range(n_fish):
all_fish.append(Fish(
id=i,
channel=channel,
interaction=interaction,
k_coh = k_coh,
k_ar = k_ar,
alpha = alpha,
lim_neighbors=lim_neighbors,
neighbor_weight=neighbor_weights[i],
fish_max_speed=fish_max_speeds[i],
clock_freq=clock_freqs[i],
verbose=verbose,
name=names[i]
))
for i in range(n_fish, n_fish + n_replica_fish):
all_fish.append(ReplicaFish(
id=i,
channel=channel,
interaction=interaction,
weights = weights,
fish_max_speed=fish_max_speeds[i],
clock_freq=clock_freqs[i],
name=names[i],
verbose=verbose
))
return all_fish | 1,548 |
def allclose(a, b):
""" close to machine precision """
return np.allclose(a, b, rtol=1e-14, atol=1e-14) | 1,549 |
def check_pwhash(pwhash, password):
"""Check a password against a given hash value. Since
many forums save md5 passwords with no salt and it's
technically impossible to convert this to an sha hash
with a salt we use this to be able to check for
plain passwords::
plain$$default
md5 passwords without salt::
md5$$c21f969b5f03d33d43e04f8f136e7682
md5 passwords with salt::
md5$123456$7faa731e3365037d264ae6c2e3c7697e
sha passwords::
sha$123456$118083bd04c79ab51944a9ef863efcd9c048dd9a
Note that the integral passwd column in the table is
only 60 chars long. If you have a very large salt
or the plaintext password is too long it will be
truncated.
>>> check_pwhash('plain$$default', 'default')
True
>>> check_pwhash('sha$$5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8', 'password')
True
>>> check_pwhash('sha$$5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8', 'wrong')
False
>>> check_pwhash('md5$xyz$bcc27016b4fdceb2bd1b369d5dc46c3f', u'example')
True
>>> check_pwhash('sha$5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8', 'password')
False
>>> check_pwhash('md42$xyz$bcc27016b4fdceb2bd1b369d5dc46c3f', 'example')
False
"""
if isinstance(password, unicode):
password = password.encode('utf-8')
if pwhash.count('$') < 2:
return False
method, salt, hashval = pwhash.split('$', 2)
if method == 'plain':
return hashval == password
elif method == 'md5':
h = md5()
elif method == 'sha':
h = sha1()
else:
return False
h.update(salt)
h.update(password)
return h.hexdigest() == hashval | 1,550 |
def make_markov_model(tweets):
"""Wrapper around making Markov Chain"""
return markovify.Text(" ".join(tweets)) | 1,551 |
def slice_image(sitk_image, start=(0, 0, 0), end=(-1, -1, -1)):
""""Returns the `sitk_image` sliced from the `start` index (x,y,z) to the `end` index.
"""
size = sitk_image.GetSize()
assert len(start) == len(end) == len(size)
# replace -1 dim index placeholders with the size of that dimension
end = [size[i] if end[i] == -1 else end[i] for i in range(len(end))]
slice_filter = sitk.SliceImageFilter()
slice_filter.SetStart(start)
slice_filter.SetStop(end)
return slice_filter.Execute(sitk_image) | 1,552 |
def MakeGlyphs(src, reverseNormals):
"""
Glyph the normals on the surface.
You may need to adjust the parameters for maskPts, arrow and glyph for a
nice appearance.
:param: src - the surface to glyph.
:param: reverseNormals - if True the normals on the surface are reversed.
:return: The glyph object.
"""
# Sometimes the contouring algorithm can create a volume whose gradient
# vector and ordering of polygon (using the right hand rule) are
# inconsistent. vtkReverseSense cures this problem.
reverse = vtk.vtkReverseSense()
# Choose a random subset of points.
maskPts = vtk.vtkMaskPoints()
maskPts.SetOnRatio(5)
maskPts.RandomModeOn()
if reverseNormals:
reverse.SetInputData(src)
reverse.ReverseCellsOn()
reverse.ReverseNormalsOn()
maskPts.SetInputConnection(reverse.GetOutputPort())
else:
maskPts.SetInputData(src)
# Source for the glyph filter
arrow = vtk.vtkArrowSource()
arrow.SetTipResolution(16)
arrow.SetTipLength(0.3)
arrow.SetTipRadius(0.1)
glyph = vtk.vtkGlyph3D()
glyph.SetSourceConnection(arrow.GetOutputPort())
glyph.SetInputConnection(maskPts.GetOutputPort())
glyph.SetVectorModeToUseNormal()
glyph.SetScaleFactor(1)
glyph.SetColorModeToColorByVector()
glyph.SetScaleModeToScaleByVector()
glyph.OrientOn()
glyph.Update()
return glyph | 1,553 |
def get_tags_categorys(self):
"""02返回添加文档的变量"""
tags = Tag.all()
categorys = Category.all()
return tags, categorys | 1,554 |
def main():
"""Operations executed when calling this script from the command line"""
args = ArgparseUserOptions(
description=parser_description,
args_dict_list=[required_args_dict, optional_args_dict],
epilog=__doc__,
).parse_args(sys.argv[1:])
return args | 1,555 |
def single_mode_constant_rotation(**kwargs):
"""Return WaveformModes object a single nonzero mode, with phase proportional to time
The waveform output by this function will have just one nonzero mode. The behavior of that mode will be fairly
simple; it will be given by exp(i*omega*t). Note that omega can be complex, which gives damping.
Parameters
----------
s : int, optional
Spin weight of the waveform field. Default is -2.
ell, m : int, optional
The (ell, m) values of the nonzero mode in the returned waveform. Default value is (abs(s), -abs(s)).
ell_min, ell_max : int, optional
Smallest and largest ell values present in the output. Default values are abs(s) and 8.
data_type : int, optional
Default value is whichever psi_n corresponds to the input spin. It is important to choose these, rather than
`h` or `sigma` for the analytical solution to translations, which doesn't account for the direct contribution
of supertranslations (as opposed to the indirect contribution, which involves moving points around).
t_0, t_1 : float, optional
Beginning and end of time. Default values are -20. and 20.
dt : float, optional
Time step. Default value is 0.1.
omega : complex, optional
Constant of proportionality such that nonzero mode is exp(i*omega*t). Note that this can be complex, which
implies damping. Default is 0.5.
"""
s = kwargs.pop("s", -2)
ell = kwargs.pop("ell", abs(s))
m = kwargs.pop("m", -ell)
ell_min = kwargs.pop("ell_min", abs(s))
ell_max = kwargs.pop("ell_max", 8)
data_type = kwargs.pop("data_type", scri.DataType[scri.SpinWeights.index(s)])
t_0 = kwargs.pop("t_0", -20.0)
t_1 = kwargs.pop("t_1", 20.0)
dt = kwargs.pop("dt", 1.0 / 10.0)
t = np.arange(t_0, t_1 + dt, dt)
n_times = t.size
omega = complex(kwargs.pop("omega", 0.5))
data = np.zeros((n_times, sf.LM_total_size(ell_min, ell_max)), dtype=complex)
data[:, sf.LM_index(ell, m, ell_min)] = np.exp(1j * omega * t)
if kwargs:
import pprint
warnings.warn(f"\nUnused kwargs passed to this function:\n{pprint.pformat(kwargs, width=1)}")
return scri.WaveformModes(
t=t,
data=data,
ell_min=ell_min,
ell_max=ell_max,
frameType=scri.Inertial,
dataType=data_type,
r_is_scaled_out=True,
m_is_scaled_out=True,
) | 1,556 |
def get_file(file_pattern: list, sub_type: str = None) -> list:
"""Get a subset from file patterns that belong to a sub-type.
If no sub-type is specified, return all file patterns.
Args:
file_pattern (list): The input file patterns
sub_type (str, optional): A string to search in file patterns. Defaults to None.
Raises:
ValueError: No file pattern matches the sub-type provided.
Returns:
list: A filtered sub list of file patterns.
"""
if sub_type is None:
return file_pattern
result = []
for entry in file_pattern:
if sub_type in entry:
result.append(entry)
if len(result) < 1:
raise ValueError(
"No file found for sub-type {}: {}".format(sub_type, file_pattern)
)
else:
return result | 1,557 |
def etopo_subset(llcrnrlon=None, urcrnrlon=None, llcrnrlat=None,
urcrnrlat=None, tfile='dap', smoo=False, subsample=False):
"""Get a etopo subset.
Should work on any netCDF with x, y, data
http://www.trondkristiansen.com/wp-content/uploads/downloads/
2011/07/contourICEMaps.py
Example
-------
>>> import matplotlib.pyplot as plt
>>> offset = 5
>>> #tfile = './ETOPO1_Bed_g_gmt4.grd'
>>> tfile = 'dap'
>>> llcrnrlon, urcrnrlon, llcrnrlat, urcrnrlat = -43, -30, -22, -17
>>> lons, lats, bathy = etopo_subset(llcrnrlon - offset,
... urcrnrlon + offset,
... llcrnrlat - offset,
... urcrnrlat + offset,
... smoo=True, tfile=tfile)
>>> fig, ax = plt.subplots()
>>> cs = ax.pcolormesh(lons, lats, bathy)
>>> _ = ax.axis([-42, -28, -23, -15])
>>> _ = ax.set_title(tfile)
"""
if tfile == 'dap':
tfile = 'http://opendap.ccst.inpe.br/Misc/etopo2/ETOPO2v2c_f4.nc'
etopo = Dataset(tfile, 'r')
lons = etopo.variables["x"][:]
lats = etopo.variables["y"][:]
res = get_indices(llcrnrlat, urcrnrlat, llcrnrlon, urcrnrlon, lons, lats)
lon, lat = np.meshgrid(lons[res[0]:res[1]], lats[res[2]:res[3]])
bathy = etopo.variables["z"][int(res[2]):int(res[3]),
int(res[0]):int(res[1])]
if smoo:
bathy = laplace_filter(bathy, M=None)
if subsample:
bathy = bathy[::subsample]
lon, lat = lon[::subsample], lat[::subsample]
return lon, lat, bathy | 1,558 |
def get_all_report_data(db):
"""
Gets all report data for pre report page
"""
query = r'SELECT * FROM report WHERE relevent=1 ORDER BY id DESC'
return db_get(db, query) | 1,559 |
def find_center_projection(mat1, mat2, flip=True, chunk_height=None,
start_row=None, denoise=True, norm=False,
use_overlap=False):
"""
Find the center-of-rotation (COR) using projection images at 0-degree
and 180-degree based on a method in Ref. [1].
Parameters
----------
mat1 : array_like
2D array. Projection image at 0-degree.
mat2 : array_like
2D array. Projection image at 180-degree.
flip : bool, optional
Flip the 180-degree projection in the left-right direction if True.
chunk_height : int or float, optional
Height of the sub-area of projection images. If a float is given, it
must be in the range of [0.0, 1.0].
start_row : int, optional
Starting row used to extract the sub-area.
denoise : bool, optional
Apply the Gaussian filter if True.
norm : bool, optional
Apply the normalization if True.
use_overlap : bool, optional
Use the combination of images in the overlap area for calculating
correlation coefficients if True.
Returns
-------
cor : float
Center-of-rotation.
References
----------
.. [1] https://doi.org/10.1364/OE.418448
"""
(nrow, ncol) = mat1.shape
if flip is True:
mat2 = np.fliplr(mat2)
win_width = ncol // 2
if chunk_height is None:
chunk_height = int(0.1 * nrow)
if isinstance(chunk_height, float):
if 0.0 < chunk_height <= 1.0:
chunk_height = int(chunk_height * nrow)
else:
chunk_height = int(0.1 * nrow)
chunk_height = np.clip(chunk_height, 1, nrow - 1)
if start_row is None:
start = nrow // 2 - chunk_height // 2
elif start_row < 0:
start = nrow + start_row - chunk_height // 2
else:
start = start_row - chunk_height // 2
stop = start + chunk_height
start = np.clip(start, 0, nrow - chunk_height - 1)
stop = np.clip(stop, chunk_height, nrow - 1)
mat1_roi = mat1[start: stop]
mat2_roi = mat2[start: stop]
(overlap, side, _) = find_overlap(mat1_roi, mat2_roi, win_width, side=None,
denoise=denoise, norm=norm,
use_overlap=use_overlap)
if side == 0:
cor = overlap / 2.0 - 1.0
else:
cor = ncol - overlap / 2.0 - 1.0
return cor | 1,560 |
def parse_date(str):
"""
parsing given str
to date
"""
ymd = str.split('-')
return date(int(ymd[0]), int(ymd[1]), int(ymd[2])) | 1,561 |
def mark_as_widget(view):
"""
Marks @view as a widget so we can later inspect that attribute, for
example, when hiding panels in _vi_enter_normal_mode.
Used prominently by '/', '?' and ':'.
XXX: This doesn't always work as we expect. For example, changing
settings to a panel created instants before does not make those
settings visible when the panel is activated. Investigate.
We still need this so that contexts will ignore widgets, though.
However, the fact that they are widgets should suffice to disable
Vim keys for them...
"""
view.settings().set('is_vintageous_widget', True)
return view | 1,562 |
def module_name(ctx, f):
"""Given Haskell source file path, turn it into a dot-separated module name.
module_name(
ctx,
"some-workspace/some-package/src/Foo/Bar/Baz.hs",
) => "Foo.Bar.Baz"
Args:
ctx: Rule context.
f: Haskell source file.
Returns:
string: Haskell module name.
"""
return _drop_extension(_rel_path_to_module(ctx, f).replace('/', '.')) | 1,563 |
def train(
train_dir,
batch_size_per_gpu,
num_train_epochs,
dataset_name,
dataset_parent_dir,
strategy=None):
"""TBD."""
# ----------------------------------------------------------------------------
# Print train configuration.
# ----------------------------------------------------------------------------
os.makedirs(train_dir, exist_ok=True)
configs = list_configs()
print(configs)
with open(os.path.join(train_dir, 'run_config.txt'), 'a') as f:
f.write(configs)
f.write('# =============================================================\n')
# ----------------------------------------------------------------------------
# Initialize tf.distribute.Strategy and other training variables.
# ----------------------------------------------------------------------------
if strategy is None:
strategy = tf.distribute.MirroredStrategy()
print ('Number of devices: {}'.format(strategy.num_replicas_in_sync))
global_batch_size = batch_size_per_gpu * strategy.num_replicas_in_sync
# ----------------------------------------------------------------------------
# Prepare input data
# ----------------------------------------------------------------------------
train_dataset = voxceleb.provide_data(
data_source_pattern=osp.join(dataset_parent_dir, opts.trainset_pattern),
use_segmaps = opts.use_segmaps,
batch_size=global_batch_size,
k_frames=opts.K,
num_concatenations=opts.num_frame_concatenations,
is_training=True,
shuffle=True)
dist_train_dataset = strategy.experimental_distribute_dataset(train_dataset)
train_summary_writer = tf.summary.create_file_writer(train_dir)
train_summary_writer.set_as_default()
# ----------------------------------------------------------------------------
# Construct graph and ops for training and evaluation.
# ----------------------------------------------------------------------------
lr_warmstart_steps = opts.lr_warmstart_steps
# decay_start_step = (opts.num_train_epochs - opts.num_lr_decay_epochs) * (
# opts.trainset_size // global_batch_size)
# decay_end_step = (
# opts.num_train_epochs * opts.trainset_size // global_batch_size)
prev_global_step = utils.load_variable_from_checkpoint(
train_dir, 'global_step')
prev_train_examples_count = utils.load_variable_from_checkpoint(
train_dir, 'train_examples_count')
if prev_global_step is None:
prev_global_step = 0
if prev_train_examples_count is None:
prev_train_examples_count = 0
total_train_examples = opts.total_k_examples * 1000
num_lr_decay_examples = opts.num_lr_decay_k_examples * 1000
decay_start_example = total_train_examples - num_lr_decay_examples
remaining_steps_to_decay = (
(decay_start_example - prev_train_examples_count) // global_batch_size)
decay_start_step = prev_global_step + remaining_steps_to_decay
remaining_steps_to_terminate = (
(total_train_examples - prev_train_examples_count) // global_batch_size)
decay_end_step = prev_global_step + remaining_steps_to_terminate
decay_num_intervals = opts.decay_num_intervals
if opts.scale_lr_with_num_gpus:
lr_mul_factor = strategy.num_replicas_in_sync
else:
lr_mul_factor = 1
with strategy.scope():
model = _create_model(opts.model_type, opts)
model.init_extra_train_and_eval_networks()
optimizers = model.create_optimizers(
lr_warmstart_steps,
decay_start_step,
decay_end_step,
decay_num_intervals,
lr_mul_factor)
if opts.model_type == 'pretrain_layout':
g_optimizer = optimizers['g_layout_optimizer']
else:
g_optimizer = optimizers['g_optimizer']
if 'd_optimizer' in optimizers:
d_optimizer = optimizers['d_optimizer']
else:
d_optimizer = None
tf.compat.v1.logging.info('Creating Timer ...')
global_step = tf.compat.v1.train.get_or_create_global_step()
timer = tf.estimator.SecondOrStepTimer(every_steps=100)
timer.update_last_triggered_step(global_step.numpy())
# ----------------------------------------------------------------------------
# Create checkpoing manager.
# ----------------------------------------------------------------------------
tf.compat.v1.logging.info('Creating checkpoint ...')
epoch_var = tf.Variable(0, dtype=tf.int64, trainable=False)
train_examples_count_var = tf.Variable(0, dtype=tf.int64, trainable=False)
checkpoint = tf.train.Checkpoint(
**model.get_optimizers(),
global_step=global_step,
epoch=epoch_var,
train_examples_count=train_examples_count_var,
training_finished=tf.Variable(False, dtype=tf.bool, trainable=False),
**model.get_networks())
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
directory=train_dir,
max_to_keep=opts.max_checkpoints_to_keep,
keep_checkpoint_every_n_hours=opts.keep_checkpoint_every_n_hours)
# ----------------------------------------------------------------------------
# Restore pre-trained models or old checkpoints, if any.
# ----------------------------------------------------------------------------
if checkpoint_manager.latest_checkpoint:
print('Restoring model weights from {}'.format(
checkpoint_manager.latest_checkpoint))
# checkpoint.restore(checkpoint_manager.latest_checkpoint).expect_partial()
status = checkpoint.restore(checkpoint_manager.latest_checkpoint)
# status.assert_consumed()
elif opts.warmup_checkpoint:
warmup_ckpt = tf.train.Checkpoint(
# **model.get_optimizers(),
global_step=global_step,
epoch=epoch_var,
train_examples_count=train_examples_count_var,
# training_finished=tf.Variable(False, dtype=tf.bool, trainable=False),
**model.get_networks())
warmup_ckpt_manager = tf.train.CheckpointManager(
warmup_ckpt,
directory=opts.warmup_checkpoint,
max_to_keep=1)
assert warmup_ckpt_manager.latest_checkpoint, (
f'No latest ckpt for --warmup_checkpoint={opts.warmup_checkpoint}')
print('*** Warmstarting model weights from {}'.format(
warmup_ckpt_manager.latest_checkpoint))
status = warmup_ckpt.restore(warmup_ckpt_manager.latest_checkpoint)
status.expect_partial()
else:
print('Initializing networks from scratch!')
# Load pre-trained weights for auxiliary netwrosk (e.g. VGGFace).
model.load_loss_pretrained_weights()
if train_examples_count_var.numpy() >= opts.total_k_examples * 1000:
print('Model has already trained for --total_k_examples=%d.' % (
opts.total_k_examples))
return
# ----------------------------------------------------------------------------
# Main loop.
# ----------------------------------------------------------------------------
st_time = time.time()
steps_per_second = -1
tf.summary.experimental.set_step(step=global_step.numpy())
first_step_flag = True
# Alternatively, you can use (global_step.numpy() < opts.max_steps).
# while epoch_var.numpy() < num_train_epochs:
while train_examples_count_var.numpy() < opts.total_k_examples * 1000:
for batch_idx, train_input_dict in enumerate(dist_train_dataset):
log_info = (global_step.numpy() % opts.log_info_every_n_steps) == 0
log_str = ''
if log_info:
d_iter = 'N/A' if d_optimizer is None else str(
d_optimizer.iterations.numpy())
log_str += (
f'[EPOCH {epoch_var.numpy() + 1}; i_batch={batch_idx}; '
f'global_step={global_step.numpy()} '
f'num_k_examples={train_examples_count_var.numpy() // 1000} '
f'(g_iter={g_optimizer.iterations.numpy()}, d_iter={d_iter})]: ')
if first_step_flag:
train_g_step = True
else:
train_g_step = (batch_idx % opts.disc_steps_per_g) == 0
# First call to @tf.functions needs to construct the full graph, hence
# needs to run both G and D train steps.
if not opts.alternate_G_D_training or first_step_flag:
loss_dict, _, summaries = model.train_distributed(
strategy,
train_input_dict,
global_batch_size,
train_g_step=train_g_step,
train_d_step=True)
else:
if train_g_step:
loss_dict, _, summaries = model.train_distributed(
strategy,
train_input_dict,
global_batch_size,
train_g_step=True,
train_d_step=False)
loss_dict, _, summaries = model.train_distributed(
strategy,
train_input_dict,
global_batch_size,
train_g_step=False,
train_d_step=True)
if first_step_flag:
for network_name, network in model.get_networks().items():
model_utils.print_model_summary(network, network_name, list_vars=True)
first_step_flag = False
# Increment train examples count.
train_examples_count_var.assign_add(global_batch_size)
# Write summaries.
if global_step.numpy() % opts.save_summaries_every_n_steps == 0:
tf.summary.experimental.set_step(step=global_step.numpy())
# tf.summary.scalar('iterations', global_step.numpy())
for optimizer_name, optimizer in optimizers.items():
tf.summary.scalar(f'learning_rate/{optimizer_name}',
optimizer.learning_rate.numpy())
# Write loss summaries.
for key, value in loss_dict.items():
tf.summary.scalar('losses/' + key, value)
# Write other scalar, image and text summaries.
scalar_summaries, image_summaries, text_summaries = summaries
# Scalar summaries.
for key, value in scalar_summaries.items():
if strategy.num_replicas_in_sync > 1:
value = value.values[0]
tf.summary.scalar(key, value)
# Image summaries.
for key, value in image_summaries.items():
if strategy.num_replicas_in_sync > 1:
value = value.values[0]
tf.summary.image(key, (value + 1.) / 2.)
# Text summaries.
for key, value in text_summaries.items():
if strategy.num_replicas_in_sync > 1:
value = value.values[0]
tf.summary.text(key, value)
# Log losses.
if log_info:
for i, (key, value) in enumerate(loss_dict.items()):
log_str += (', ' if i > 0 else '') + f'{key}={value:.3f}'
print(log_str)
# Log steps/sec.
if timer.should_trigger_for_step(global_step.numpy()):
elapsed_time, elapsed_steps = timer.update_last_triggered_step(
global_step.numpy())
if elapsed_time is not None:
steps_per_second = elapsed_steps / elapsed_time
tf.summary.scalar(
'steps/sec', steps_per_second, step=global_step)
# Increment global_step.
tf.compat.v1.assign_add(global_step, 1)
# Save checkpoint.
if (global_step.numpy() - 1) % opts.save_checkpoint_every_n_steps == 0:
tf.compat.v1.logging.info('Saving checkpoint at step %d to %s.' % (
global_step.numpy(), train_dir))
checkpoint_manager.save(
checkpoint_number=global_step.numpy())
if (train_examples_count_var.numpy() // opts.trainset_size) > (
epoch_var.numpy()):
# Increment epoch.
epoch_var.assign_add(1)
break
# Assign training_finished variable to True after training is finished,
# save the last checkpoint and close summary writer.
checkpoint.training_finished.assign(True)
checkpoint_manager.save(checkpoint_number=global_step.numpy())
train_summary_writer.close()
# Print overall training time.
total_time = time.time() - st_time
print('Total runtime for %d K examples (%d epochs) = %s.' % (
opts.total_k_examples, epoch_var.numpy(), total_time)) | 1,564 |
def store(key):
"""Gets the configured default store. The default is PickleStore
:return store: Store object
"""
global __stores
if __stores is None:
__stores = {}
if key not in __stores:
__stores[key] = __configuration[STORE](key)
return __stores[key] | 1,565 |
def test_cross_val_score_weighted():
"""Test ``cross_val_score_weighted``."""
sample_weights = np.array([1.0, 1.0, 0.0, 1.0, 1.0, 0.0])
cv_score_kwargs = {
'estimator': LinearRegression(),
'x_data': np.arange(6).reshape(6, 1),
'y_data': np.array([0, 1, 1000, 0, -1, -1000]),
'groups': ['A', 'A', 'A', 'B', 'B', 'B'],
'scoring': 'neg_mean_absolute_error',
'cv': LeaveOneGroupOut(),
'fit_params': {'sample_weight': sample_weights},
'sample_weights': sample_weights,
}
scores = cross_val_score_weighted(**cv_score_kwargs)
np.testing.assert_allclose(scores, [-2.0, -4.0]) | 1,566 |
def get_label_for_line(line, leg):
"""
Can't remember what I was using this for but seems useful to keep
"""
# leg = line.figure.legends[0]
# leg = line.axes.get_legend()
for h, t in zip(leg.legendHandles, leg.texts):
if h.get_label() == line.get_label():
return t.get_text() | 1,567 |
def test_save_unfitted():
"""Save an unfitted HMMClassifier object."""
try:
with pytest.raises(RuntimeError) as e:
HMMClassifier().save('test.pkl')
assert str(e.value) == 'The classifier needs to be fitted before it can be saved'
finally:
if os.path.exists('test.pkl'):
os.remove('test.pkl') | 1,568 |
def validate_maximum(value, maximum):
"""Validate that ``value`` is at most ``maximum``
Normally called in :meth:`~mopidy.config.types.ConfigValue.deserialize`.
"""
if maximum is not None and value > maximum:
raise ValueError(
'{!r} must be smaller than {!r}.'.format(value, maximum)) | 1,569 |
def _node_parent_listener(target, value, oldvalue, initiator):
"""Listen for Node.parent being modified and update path"""
if value != oldvalue:
if value is not None:
if target._root != (value._root or value):
target._update_root(value._root or value)
target._update_path(newparent=value)
else:
# This node just got orphaned. It's a new root
target._update_root(target)
target._update_path(newparent=target)
return value | 1,570 |
def flatten(sequence):
"""
Gets a first visit iterator for the given tree.
Args:
sequence: The iterable that is to be flattened
Returns: iterable
"""
for item in sequence:
if isinstance(item, (list, tuple)):
for subitem in flatten(item):
yield subitem
else:
yield item | 1,571 |
def vxm_data_generator(x_data, batch_size=32):
"""
Generator that takes in data of size [N, H, W], and yields data for
our custom vxm model. Note that we need to provide numpy data for each
input, and each output.
inputs: moving [bs, H, W, 1], fixed image [bs, H, W, 1]
outputs: moved image [bs, H, W, 1], zero-gradient [bs, H, W, 2]
"""
# preliminary sizing
vol_shape = x_data.shape[1:] # extract data shape
ndims = len(vol_shape)
# prepare a zero array the size of the deformation
# we'll explain this below
zero_phi = np.zeros([batch_size, *vol_shape, ndims])
while True:
# prepare inputs:
# images need to be of the size [batch_size, H, W, 1]
idx1 = np.random.randint(0, x_data.shape[0], size=batch_size)
moving_images = x_data[idx1, ..., np.newaxis]
idx2 = np.random.randint(0, x_data.shape[0], size=batch_size)
fixed_images = x_data[idx2, ..., np.newaxis]
inputs = [moving_images, fixed_images]
# prepare outputs (the 'true' moved image):
# of course, we don't have this, but we know we want to compare
# the resulting moved image with the fixed image.
# we also wish to penalize the deformation field.
outputs = [fixed_images, zero_phi]
yield (inputs, outputs) | 1,572 |
def test_jinja2_required_extensions() -> None:
"""Test ``jinja2.ext`` has attrs needed for language support."""
# noinspection PyUnresolvedReferences
assert hasattr(jinja2_ext, "autoescape")
assert hasattr(jinja2_ext, "with_") | 1,573 |
def _make_block_conf_header(block, append_fn):
"""Returns a list of config.h strings for a block (list) of items."""
# Collect the substrings in a list and later use join() instead of += to
# build the final .config contents. With older Python versions, this yields
# linear instead of quadratic complexity.
for item in block:
item._make_conf_header(append_fn) | 1,574 |
def save_model_architecture(model, project_name, keras_model_type, cat_vocab_dict,
model_options, chart_name="model_before"):
"""
This function saves the model architecture in a PNG file in the artifacts sub-folder of project_name folder
"""
if isinstance(project_name,str):
if project_name == '':
project_name = "deep_autoviml"
else:
print('Project name must be a string and helps create a folder to store model.')
project_name = "deep_autoviml"
save_model_path = model_options['save_model_path']
save_artifacts_path = os.path.join(save_model_path, "artifacts")
try:
plot_filename = os.path.join(save_artifacts_path,chart_name)+".png"
print('\nSaving model architecture...')
tf.keras.utils.plot_model(model = model, to_file=plot_filename, dpi=72,
show_layer_names=True, rankdir="LR", show_shapes=True)
print(' model architecture saved in file: %s' %plot_filename)
except:
print('Model architecture not saved due to error. Continuing...')
plot_filename = ""
return plot_filename | 1,575 |
def sequential_to_momentum_net(module: torch.nn.Sequential,
split_dim=1,
coupling_forward: typing.Optional[typing.List[typing.Optional[typing.Callable]]] = None,
coupling_inverse: typing.Optional[typing.List[typing.Optional[typing.Callable]]] = None,
memory_mode: MemoryModes = MemoryModes.autograd_function,
target_device: str = "",
fused_optimizer: FUSED_OPTIMIZER = None,
residual: bool = False,
beta: float = 0.9) -> ReversibleSequential:
"""
Creates a sequential MomentumNet by unrolling a nn.Sequential module and dispatching to `momentum_net()`
:param module: An existing nn.Sequential module that should be converted into a ReversibleSequential module.
:param split_dim: RevNets require two streams. This parameter specifies which dimension to split in half to
create the two streams. `None` would mean the input gets replicated for both streams. It's usually best to split
along the features, which is why the default (1) is compatible with convolutions.
:param coupling_forward: RevNet uses y0 = (x0 + f(x1)) as a coupling function, but this allows you to set a
custom one. For example, MomentumNet (https://arxiv.org/abs/2102.07870) uses
y0 = (beta * x0 + (1 - beta) * f(x1)). The inputs to the coupling function are the residual stream and the
function output. For more information, look at the examples. default = revnet couplint
:param coupling_inverse: The inverse of the coupling function. default = revnet inverse
:param memory_mode: One of `MemoryModes`'s values. Some things are only supported in one mode while others
might only be supported in another. default = autograd function (highest coverage but spotty XLA support)
:param target_device: Specifies where the parameters should be moved to before computing the forward and
backward pass. This allows efficient CPU-offloading.
default = no offloading (keep parameters on the device they're on)
:param fused_optimizer: Allows an optimizer step to run while the model is computing its backward pass. This
means that the gradients don't have to be fully instantiated anymore and can improve speed when used with
cpu-offload due to asynchronous compute. It expects a function that generates an optimizer from a list of
parameters. (like Adam.__init__) default = no fused optimizer step
:param residual: Whether to "undo" a residual stream or not. Using y = f(x0) + x0 + x1 is generally not a good idea,
so this would subtract `x0` from y allowing you to patch existing residual modules without modifying their code.
:param beta: MomentumNet beta value that controls how much of the velocity stream is kept.
:return: Instantiated MomentumNet (instance of `ReversibleSequential`)
"""
return momentum_net(*maybe_residual_to_plain(module, residual), split_dim=split_dim,
coupling_forward=coupling_forward, coupling_inverse=coupling_inverse, memory_mode=memory_mode,
target_device=target_device, beta=beta, fused_optimizer=fused_optimizer) | 1,576 |
def version(): # static void version()
"""
TOWRITE
"""
print("%s %s\n" % (_appName_, _appVer_))
global exitApp
exitApp = True | 1,577 |
def user_view(request, name):
"""Render the view page for users"""
# argument is the login name, not the uuid in Cassandra
user = User.find(name)
if not user:
return redirect("users:home")
ctx = {
"req_user": request.user,
"user_obj": user,
"groups": [Group.find(gname) for gname in user.groups],
}
return render(request, "users/view.html", ctx) | 1,578 |
def grelha_nr_colunas(g):
"""
grelha_nr_colunas: grelha --> inteiro positivo
grelha_nr_colunas(g) devolve o numero de colunas da grelha g.
"""
return len(g[0]) | 1,579 |
def rmsd(array_a, array_b):
"""
Calculate the RMSD between two 1d arrays
Parameters
----------
array_a, array_b : 1d numpy arrays
The arrays to be compared
Returns
-------
rmsd : float
The Root Mean Square Deviation of the elements of the array
"""
diff = array_a - array_b
diff2 = np.square(diff)
diff2_sum = np.sum(diff2)
norm_diff2_sum = diff2_sum/len(array_a)
rmsd = np.sqrt(norm_diff2_sum)
return rmsd | 1,580 |
def readFile(sFile, sMode = 'rb'):
"""
Reads the entire file.
"""
oFile = open(sFile, sMode);
sRet = oFile.read();
oFile.close();
return sRet; | 1,581 |
def VI_cgivens_d( a, b):
"""
returns cos, sin, r
"""
c = vsip_cmplx_d(0.0,0.0)
s = vsip_cmplx_d(0.0,0.0)
r = vsip_cmplx_d(0.0,0.0)
am = vsip_cmag_d(a)
bm = vsip_cmag_d(b)
if am == 0.0:
r.r = b.r; r.i=b.i;
s.r = 1.0;
else:
scale = am + bm;
alpha = vsip_cmplx_d(a.r/am, a.i/am)
scalesq = scale * scale
norm = scale * sqrt((am*am)/scalesq + (bm * bm)/scalesq)
c.r =am/norm
s.r = (alpha.r * b.r + alpha.i * b.i)/norm
s.i = (-alpha.r * b.i + alpha.i * b.r)/norm
r.r = alpha.r * norm; r.i = alpha.i * norm
return (c,s,r) | 1,582 |
def _no_op(data):
"""
An on_load_batch callback that does nothing.
""" | 1,583 |
def documint_request_factory(request):
"""
Create a function that issues a request to a Documint endpoint.
Status codes outside the 2xx range are treated as errors. If error
responses are JSON then `DocumintError` is raised, otherwise
`MalformedDocumintError` is raised.
If the status code indicates success, the `IResponse` is returned.
"""
def _raise_error(data, response):
if content_type(response.headers) == b'application/json':
try:
causes = json.loads(data).get(u'causes', [])
raise DocumintError(
causes=[DocumintErrorCause(cause.get(u'type'),
cause.get(u'reason'),
cause.get(u'description'))
for cause in causes])
except ValueError:
pass
raise MalformedDocumintError(data)
def _check_status(response):
if 200 <= response.code < 300:
return response
d = response.content()
d.addCallback(_raise_error, response)
return d
def _request(*a, **kw):
d = request(*a, **kw)
d.addCallback(_check_status)
return d
return _request | 1,584 |
def roipac_header(file_path, params):
"""
Function to obtain a header for roipac interferogram file or converted
geotiff.
"""
rsc_file = os.path.join(params[cf.DEM_HEADER_FILE])
if rsc_file is not None:
projection = parse_header(rsc_file)[ifc.PYRATE_DATUM]
else:
raise RoipacException('No DEM resource/header file is '
'provided')
if file_path.endswith('_dem.tif'):
header_file = os.path.join(params[cf.DEM_HEADER_FILE])
elif file_path.endswith('_unw.tif'):
base_file = file_path[:-8]
header_file = base_file + '.unw.' + ROI_PAC_HEADER_FILE_EXT
else:
header_file = "%s.%s" % (file_path, ROI_PAC_HEADER_FILE_EXT)
header = manage_header(header_file, projection)
return header | 1,585 |
def find_exe_in_path(exe, bypass_permissions_check=None, add_exe_to_path=None):
"""
Check that an executable exists in $PATH
"""
paths = os.environ["PATH"].split(os.pathsep)
for path in paths:
fullexe = os.path.join(path,exe)
if os.path.exists(fullexe):
if not bypass_permissions_check:
check_file_executable(fullexe)
if add_exe_to_path:
path=fullexe
return path
elif os.path.isdir(path):
# allow for filename filter matching
exematch = fnmatch.filter(os.listdir(path),exe)
if exematch and os.path.exists(os.path.join(path,exematch[0])):
if not bypass_permissions_check:
check_file_executable(os.path.join(path,exematch[0]))
if add_exe_to_path:
path=os.path.join(path,exematch[0])
return path
return None | 1,586 |
def test_tb_pipeline():
"""
Test case to ensure that the Hi-C pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \\
--lang=python \\
--library_path=/home/compss/bin \\
--pythonpath=/<pyenv_virtenv_dir>//lib/python2.7/site-packages/ \\
--log_level=debug \\
process_hic.py \\
--taxon_id 9606 \\
--genome /<dataset_dir>/tb.Human.GCA_000001405.22_gem.fasta \\
--assembly GRCh38 \\
--file1 /<dataset_dir>/tb.Human.SRR1658573_1.fastq \\
--file2 /<dataset_dir>/tb.Human.SRR1658573_2.fastq \\
--genome_gem /<dataset_dir>/tb.Human.GCA_000001405.22_gem.fasta.gem \\
--taxon_id 9606 \\
--enzyme_name MboI \\
--resolutions 10000,100000 \\
--windows1 1,100 \\
--windows2 1,100 \\
--normalized 1 \\
--tag tb.Human.SRR1658573 \\
--window_type frag \\
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
files = [
resource_path + 'tb.Human.GCA_000001405.22_gem.fasta',
resource_path + 'tb.Human.GCA_000001405.22_gem.fasta.gem',
resource_path + 'tb.Human.SRR1658573_1.fastq',
resource_path + 'tb.Human.SRR1658573_2.fastq'
]
metadata = {
'assembly': 'GRCh38',
'expt_name': 'tb.Human.SRR1658573',
'enzyme_name': 'MboI',
'windows1': ((1, '100')),
'windows2': ((1, '100')),
'window_type': 'frag',
'resolutions': [10000, 100000],
'normalized': False,
'hdf5': True,
}
hic_handle = process_hic({"execution": resource_path})
hic_files, hic_meta = hic_handle.run(files, metadata, []) # pylint: disable=unused-variable
print(hic_files)
# Add tests for all files created
assert os.path.isfile(hic_files[0]) is True
assert os.path.getsize(hic_files[0]) > 0 | 1,587 |
def skip_for_tf2(f):
"""Decorator that skips tests when using TensorFlow 2."""
def test_wrapper(*args, **kwargs):
"""Wraps the decorated function to determine whether to skip."""
# Extract test case instance from args.
self = args[0]
try:
# If tf.contrib doesn't exist, we are in TF 2.0.
_ = tf.contrib
_ = tf.contrib.estimator.regression_head(
loss_reduction=tf.compat.v1.losses.Reduction.SUM_OVER_BATCH_SIZE)
except (AttributeError, ImportError):
self.skipTest("Skipping test in TF 2.0.")
return f(*args, **kwargs)
return test_wrapper | 1,588 |
def clean_logfile(logfile_lines,to_remove):
"""Remove yaml fields from a list of lines.
Removes from a set of lines the yaml_fields contained in the to_remove list.
Arguments:
logfile_lines (list): list of the lines of the logfile. Generated from a file by e.g. :py:meth:`~io.IOBase.readlines`.
to_remove (list): list of keys to remove from logfile_lines
Returns:
list of lines where the removed keys have as values the `"<folded>"` string
"""
line_rev=logfile_lines #list of the lines of the logfile
#loop in the reversed from (such as to parse by blocks)
extra_lines=20 #internal variable to be customized
line_rev.reverse()
#clean the log
cleaned_logfile=[]
removed=[]
#for line in line_rev: #line_iter:
while len(line_rev) >0:
line=line_rev.pop()
to_print=line
#check if the line contains interesting information
for remove_it in to_remove :
stream_list=[]
#line without comments
valid_line=line.split('#')[0]
spaces='nospace'
#control that the string between the key and the semicolon is only spaces
if remove_it in valid_line and ":" in valid_line:
#print "here",remove_it,remove_it in valid_line and ":" in valid_line,valid_line
starting_point=valid_line.find(remove_it)
tmp_buf=valid_line[:starting_point]
#find the closest comma to the staring point, if exists
tmp_buf=tmp_buf[::-1]
starting_comma=tmp_buf.find(',')
if starting_comma <0: st=0
tmp_buf=tmp_buf[st:]
tmp_buf=tmp_buf[::-1]
tmp_buf=tmp_buf.strip(' ')
#print "there",tmp_buf,'starting',starting_point,len(tmp_buf)
valid_line= valid_line[starting_point+len(remove_it):]
spaces= valid_line[1:valid_line.find(':')]
#if remove_it+':' in line.split('#')[0]:
if len(spaces.strip(' ')) == 0 and len(tmp_buf)==0: #this means that the key has been found
#creates a new Yaml document starting from the line
#treat the rest of the line following the key to be removed
header=''.join(line.split(':')[1:])
header=header.rstrip()+'\n'
#eliminate the anchor
header=header.lstrip(' ')
header=header.lstrip('*')
if len(header) > 0 :
stream_list.append(header)
#part to be printed, updated
to_print = line.split(':')[0] + ": <folded> \n"
#then check when the mapping will end:
while True:
#create a stream with extra_lines block
for i in range(0,min(extra_lines,len(line_rev))):
stream_list.append(line_rev.pop())
#create a stream to be parsed
stream=''.join(stream_list)
#then parse the stream until the last valid position has been found
try:
for i in yaml.parse(stream,Loader=yaml.CLoader):
endpos=i.end_mark.index
except Exception(e):
# print 'error',str(e),stream
#convert back the valid stream into a list
#if needed the stream can be loaded into a document
item_list=stream[:endpos].split('\n')
#if lengths are different there is no need to add lines
if len(item_list) != len(stream_list):
#last line might be shorter, therefore treat it separately
last_line=item_list.pop()
#purge the stream
for item in item_list:
stream_list.remove(item+'\n')
#extract the remaining line which should be compared with the last one
strip_size=len(last_line.rstrip())
if strip_size > 0:
first_line=stream_list.pop(0)[strip_size:]
if '*' in first_line or '&' in first_line:
first_line='' #eliminate anchors
else:
first_line=''
#then put the rest in the line to be treated
to_print.rstrip('\n')
to_print += first_line+'\n'
# the item has been found
break
stream_list.reverse()
#put back the unused part in the document
line_rev.extend(stream_list)
# mark that the key has been removed
if (remove_it not in removed):
removed.append(remove_it)
write('removed: ',remove_it)
# then print out the line
cleaned_logfile.append(to_print)
# check that everything has been removed, at least once
if (set(removed) != set(to_remove)):
write('WARNING, not all the requested items have been removed!')
write('To_remove : ',to_remove)
write('removed : ',removed)
write('Difference: ',list(set(to_remove) - set(removed) ))
return cleaned_logfile | 1,589 |
def callback_function(bus):
"""
Function we want to call from the background_thread function
This function will be called when an interrupt is triggered from
a state change on pin 1
"""
print("interrupt triggered")
if bus.read_pin(1) == 0:
print("pin 1 was set low")
else:
print("pin 1 was set high") | 1,590 |
def no_red_sum(tokens):
"""Using import json is cheating, let's parse it ourselves in a sinlge pass. Hope you like stacks."""
sums = [0]
stack = []
is_red = False
for token in tokens:
if token == 'red' and not is_red and stack[-1] == '{':
is_red = True
sums[-1] = 0
stack.append('red')
elif token == '{':
sums.append(0)
stack.append('{')
elif token == '}':
last_sum = sums.pop()
sums[-1] += last_sum
if stack[-1] == 'red':
stack.pop()
is_red = False
stack.pop()
elif token == '[':
stack.append('[')
sums.append(0)
elif token == ']':
stack.pop()
last_sum = sums.pop()
sums[-1] += last_sum
elif not is_red:
sums[-1] += neg_safe_cast(token)
assert len(sums) == 1
return sums.pop() | 1,591 |
def get_all_projects():
"""
Return a list with all the projects (open and closed).
"""
return gazu.project.all_projects() | 1,592 |
def test_view_permissions_has_all(authed_request, test_view):
"""Should be True if view lists multiple action permissions and all are on User"""
test = HasViewSetActionPermissions()
authed_request.user.perms.append('list_permission')
authed_request.user.perms.append('list_permission2')
test_view.action_user_permissions = {'list': ['list_permission', 'list_permission2']}
assert test.has_permission(authed_request, test_view) | 1,593 |
def install(opts):
"""
Install one or more resources.
"""
resources = _load(opts.resources, opts.output_dir)
if opts.all:
opts.resource_names = ALL
success = _install(resources, opts.resource_names, opts.mirror_url,
opts.destination, opts.skip_top_level)
if success:
if not opts.quiet:
print("All resources successfully installed")
return 0
else:
if not opts.quiet:
invalid = _invalid(resources, opts.resource_names)
print("Unable to install some resources: {}".format(', '.join(invalid)))
return 1 | 1,594 |
def seq2seq_att(mems, lengths, state, att_net=None):
"""
:param mems: [B, T, D_mem] This are the memories.
I call memory for this variable because I think attention is just like read something and then
make alignments with your memories.
This memory here is usually the input hidden state of the encoder.
:param lengths: [B]
:param state: [B, D_state]
I call state for this variable because it's the state I percepts at this time step.
:param att_net: This is the attention network that will be used to calculate the alignment score between
state and memories.
input of the att_net is mems and state with shape:
mems: [exB, D_mem]
state: [exB, D_state]
return of the att_net is [exB, 1]
So any function that map a vector to a scalar could work.
:return: [B, D_result]
"""
d_state = state.size(1)
if not att_net:
return state
else:
batch_list_mems = []
batch_list_state = []
for i, l in enumerate(lengths):
b_mems = mems[i, :l] # [T, D_mem]
batch_list_mems.append(b_mems)
b_state = state[i].expand(b_mems.size(0), d_state) # [T, D_state]
batch_list_state.append(b_state)
packed_sequence_mems = torch.cat(batch_list_mems, 0) # [sum(l), D_mem]
packed_sequence_state = torch.cat(batch_list_state, 0) # [sum(l), D_state]
align_score = att_net(packed_sequence_mems, packed_sequence_state) # [sum(l), 1]
# The score grouped as [(a1, a2, a3), (a1, a2), (a1, a2, a3, a4)].
# aligned_seq = packed_sequence_mems * align_score
start = 0
result_list = []
for i, l in enumerate(lengths):
end = start + l
b_mems = packed_sequence_mems[start:end, :] # [l, D_mems]
b_score = align_score[start:end, :] # [l, 1]
softed_b_score = F.softmax(b_score.transpose(0, 1)).transpose(0, 1) # [l, 1]
weighted_sum = torch.sum(b_mems * softed_b_score, dim=0, keepdim=False) # [D_mems]
result_list.append(weighted_sum)
start = end
result = torch.stack(result_list, dim=0)
return result | 1,595 |
def SaveImage(Im, fname, useCompression=True):
"""Save an image in any known format"""
# get file extension
ext = os.path.splitext(fname)[1].lower()
# dispatch based on file extension
if ext == '.npy':
SaveImageNPY(Im, fname)
elif ext == '.npz':
SaveImageNPZ(Im, fname, useCompression=useCompression)
elif ext == '.png':
common.SavePNGImage(Im, fname)
else:
try:
common.SaveITKImage(Im, fname, useCompression=useCompression)
except IOError:
raise Exception('File extension "'+ext+'" unknown.') | 1,596 |
def convert_for_webkit(new_path, filename, reference_support_info, host=Host()):
""" Converts a file's |contents| so it will function correctly in its |new_path| in Webkit.
Returns the list of modified properties and the modified text if the file was modifed, None otherwise."""
contents = host.filesystem.read_binary_file(filename)
converter = _W3CTestConverter(new_path, filename, reference_support_info, host)
if filename.endswith('.css'):
return converter.add_webkit_prefix_to_unprefixed_properties(contents.decode('utf-8'))
else:
converter.feed(contents.decode('utf-8'))
converter.close()
return converter.output() | 1,597 |
def test_atomic_positive_integer_min_exclusive_3_nistxml_sv_iv_atomic_positive_integer_min_exclusive_4_3(mode, save_output, output_format):
"""
Type atomic/positiveInteger is restricted by facet minExclusive with
value 506558727413711217.
"""
assert_bindings(
schema="nistData/atomic/positiveInteger/Schema+Instance/NISTSchema-SV-IV-atomic-positiveInteger-minExclusive-4.xsd",
instance="nistData/atomic/positiveInteger/Schema+Instance/NISTXML-SV-IV-atomic-positiveInteger-minExclusive-4-3.xml",
class_name="NistschemaSvIvAtomicPositiveIntegerMinExclusive4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 1,598 |
def create_partial_pickle(flnc_files, chunked_nfl_files, out_pickle):
"""
Parameters:
flnc_files -- full-length non-chimeric files in bins
chunked_nfl_files -- chunked non-chimeric files
out_pickle -- output pickle for saving PolishChunkTask objects
"""
n_bins = len(flnc_files)
n_nfl_chunks = max(1, len(chunked_nfl_files))
log.info("Writing %s ice_partial chunk tasks to %s.", str(n_bins * n_nfl_chunks), out_pickle)
p = ChunkTasksPickle()
for i, flnc_file in enumerate(flnc_files):
log.debug("Processing cluster bin index=%s.", i)
cluster_out_dir = _get_cluster_out_dir(flnc_file)
for j, nfl_file in enumerate(chunked_nfl_files):
# Create Partial chunk tasks.
task_ = PartialChunkTask(cluster_bin_index=i, flnc_file=flnc_file,
cluster_out_dir=cluster_out_dir,
nfl_file=nfl_file,
nfl_index=j, n_nfl_chunks=n_nfl_chunks)
p.append(task_)
p.write(out_pickle)
log.info("Saved %s partial chunk tasks to %s.", str(n_bins * n_nfl_chunks), out_pickle) | 1,599 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.