content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def plot_2_3d(data2,best_array,n,rates,noises,save):
"""3d version of plots 2 based on Minh's code
Parameters
------
data2,best_array : array_like
`data2` and `best_array` defined above
n,rates,noises: list
`n` `rates` `list` lists of each parameter defined above
save : bool
`save` plot?
"""
#first make list of plots
colours = ["yellow","orangered","skyblue"]
"init and some labelling"
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111,projection='3d')
ax.set_xlabel('Observation Noise', labelpad = 20,fontsize = 22)
ax.set_ylabel("Assimilation Rate", labelpad = 20)
ax.set_zlabel('Grand L2 Error')
ax.view_init(30,225)
"take each rate plot l2 error over each noise for preds obs and ukf"
for i,rate in enumerate(rates):
sub_data = data2.loc[data2["rates"]==rate]
preds=list(sub_data["prediction"])
ukf=list(sub_data["ukf"])
obs=list(sub_data["obs"])
l1=ax.plot(xs=noises,ys=[i]*len(noises),zs=obs,color=colours[0],linewidth=4,
path_effects=[pe.Stroke(linewidth=6, foreground='k',alpha=1), pe.Normal()],alpha=0.8)
l2=ax.plot(xs=noises,ys=[i]*len(noises),zs=preds,color=colours[1],linewidth=4,linestyle = "-.",
path_effects=[pe.Stroke(linewidth=6, foreground='k',alpha=1), pe.Normal()],alpha=0.6)
l3=ax.plot(xs=noises,ys=[i]*len(noises),zs=ukf,color=colours[2],linewidth=4,linestyle = "--",
path_effects=[pe.Stroke(offset=(2,0),linewidth=6, foreground='k',alpha=1), pe.Normal()],alpha=1)
"placeholder dummies for legend"
s1=lines.Line2D([-1],[-1],color=colours[0],label="obs",linewidth=4,linestyle = "-",
path_effects=[pe.Stroke(linewidth=6, foreground='k',alpha=1), pe.Normal()])
s2 = lines.Line2D([-1],[-1],color=colours[1],label="preds",linewidth=4,linestyle = "-.",
path_effects=[pe.Stroke(linewidth=6, foreground='k',alpha=1), pe.Normal()])
s3 = lines.Line2D([-1],[-1],color=colours[2],label="ukf",linewidth=4,linestyle = "--",
path_effects=[pe.Stroke(offset=(2,0),linewidth=6, foreground='k',alpha=1), pe.Normal()])
"rest of labelling"
ax.set_xticks(np.arange(0,len(noises)))
ax.set_xticklabels(noises)
ax.set_yticks(np.arange(0,len(rates)))
ax.set_yticklabels(rates)
ax.legend([s1,s2,s3],["obs","preds","ukf"])
plt.tight_layout()
"save?"
if save:
plt.savefig(f"3d_{n}_error_trajectories.pdf") | 2,100 |
def Float(request):
"""
A simple form with a single integer field
"""
schema = schemaish.Structure()
schema.add('myFloatField', schemaish.Float())
form = formish.Form(schema, 'form')
return form | 2,101 |
def AddMatcher(parser, required=True):
"""Adds the matcher arguments to the argparse."""
matcher = parser.add_group(
mutex=True, required=required, help='Security policy rule matcher.')
matcher.add_argument(
'--src-ip-ranges',
type=arg_parsers.ArgList(),
metavar='SRC_IP_RANGE',
help=('The source IPs/IP ranges to match for this rule. '
'To match all IPs specify *.'))
matcher.add_argument(
'--expression',
help='The Cloud Armor rules language expression to match for this rule.') | 2,102 |
def pca(X):
"""
Returns the eigenvectors U, the eigenvalues (on diagonal) in S.
Args:
X: array(# of training examples, n)
Returns:
U: array(n, n)
S: array(n, n)
"""
# Get some useful values
m, n, _, _ = X.shape
# Init U and S.
U = np.zeros(n)
S = np.zeros(n)
# When computing the covariance matrix, we have
# to divide by m (the number of examples).
sigma = (1. / m) * np.dot(X.T, X)
# Compute the eigenvectors and eigenvalues
# of the covariance matrix.
U, S, V = linalg.svd(sigma)
S = linalg.diagsvd(S, len(S), len(S))
return U, S | 2,103 |
def read_py_version(script_name, search_path):
"""Read the version of a script from a python file"""
file, pathname, desc = imp.find_module(script_name, [search_path])
try:
new_module = imp.load_module(script_name, file, pathname, desc)
if hasattr(new_module.SCRIPT, "version"):
return new_module.SCRIPT.version
except:
pass
return None | 2,104 |
def post_check_variable(team_id, source_id, check_id):
"""
.. :quickref: POST; Lorem ipsum."""
if not TeamPermission.is_manager_or_editor(team_id):
abort(403)
payload = get_payload()
payload.update({"team_id": team_id, "source_id": source_id, "check_id": check_id})
variable = VariableController.create(payload)
return jsonify(format_variable(variable)), 200 | 2,105 |
def diff_re(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
"""
A simple "diff" of two sets of lines when the expected lines
are regular expressions. This is a really dumb thing that
just compares each line in turn, so it doesn't look for
chunks of matching lines and the like--but at least it lets
you know exactly which line first didn't compare correctl...
"""
result = []
diff = len(a) - len(b)
if diff < 0:
a = a + [''] * (-diff)
elif diff > 0:
b = b + [''] * diff
i = 0
for aline, bline in zip(a, b):
s = "^" + aline + "$"
try:
expr = re.compile(s)
except re.error as e:
msg = "Regular expression error in %s: %s"
raise re.error(msg % (repr(s), e.args[0]))
if not expr.search(bline):
result.append("%sc%s" % (i + 1, i + 1))
result.append('< ' + repr(a[i]))
result.append('---')
result.append('> ' + repr(b[i]))
i = i + 1
return result | 2,106 |
def storeAgent(sess, agentObj):
"""
INPUT : session object
OUTPUT : Updated agent Onject
DESCRIPTION : Updates the agent object in that session
"""
currAgents = getCurrGen(sess)
lock(sess)
try:
if(sess.mode == 'SAFE'):
tpfp = open(GA_UTIL_DIR+"/utilFiles/tmp"+str(agentObj.sessID)+"/dnaPool/dna"
+str(agentObj.agentID)+".dna", "wb")
pickle.dump(agentObj, tpfp)
tpfp.close()
currAgents.add(agentObj.agentID)
else:
sess.agentBasket[agentObj.agentID] = agentObj
currAgents.add(agentObj.agentID)
except Exception:
print("error in store agent, couldnt wb")
return(0)
setCurrGen( sess, currAgents)
unlock(sess)
return(agentObj.agentID) | 2,107 |
def info(device):
"""
Get filesystem geometry information.
CLI Example:
.. code-block:: bash
salt '*' xfs.info /dev/sda1
"""
out = __salt__["cmd.run_all"]("xfs_info {}".format(device))
if out.get("stderr"):
raise CommandExecutionError(out["stderr"].replace("xfs_info:", "").strip())
return _parse_xfs_info(out["stdout"]) | 2,108 |
def serialize_framework_build_config(dict_: Union[Dict[str, str], str]) -> Tuple[Any, ...]:
"""Serialize a dict to a hashable tuple.
Parameters
----------
dict_: Dict[str, str]
Returns
-------
hashable_tuple: Tuple[Any, ...]
A hashable tuple.
"""
if isinstance(dict_, dict):
return tuple(sorted(list(dict_.items())))
return (dict_,) | 2,109 |
def _check_columns(data: pd.DataFrame,
features: list) -> pd.DataFrame:
"""
Given a dataframe and a list of expected features, print missing columns and return new dataframe
with only valid features
Parameters
-----------
data: Pandas.DataFrame
DataFrame for checking
features: list
list of features (column names)
Returns
---------
Pandas.DataFrame
new 'valid' DataFrame
"""
valid_features = [f for f in features if f in data.columns]
if len(valid_features) < len(features):
print(f'The following features are missing from the training data and will be excluded from the '
f'model {list(set(features) - set(valid_features))}')
return data[valid_features] | 2,110 |
def get_confidence(imgfilename):
"""
1003_c60.jpg -> c6
"""
if not imgfilename:
return ''
return 'c' + imgfilename.split('/')[-1][0:1] | 2,111 |
def test_connection_proxy_api_wrong_certs(app):
"""Connecting to the proxy api fails without correct certs"""
with pytest.raises(SSLError):
kwargs = {'verify': False}
r = yield async_requests.get(app.proxy.api_url, **kwargs)
r.raise_for_status() | 2,112 |
def get_connection(hostname: str,
port: int,
username: str,
password: str):
"""
DBへのコネクションを取得します。
Returns:
Connection: コネクション
"""
return pymysql.connect(
host=hostname,
port=port,
user=username,
password=password,
cursorclass=cursors.DictCursor
) | 2,113 |
def test_generate_init_open_alchemy():
"""
GIVEN name and version
WHEN generate_init_open_alchemy is called with the name and version
THEN the __init__.py file contents with the name and version are returned.
"""
returned_contents = build.generate_init_open_alchemy()
expected_contents = """import pathlib
from open_alchemy import init_json
parent_path = pathlib.Path(__file__).parent.absolute()
init_json(parent_path / "spec.json")"""
assert returned_contents == expected_contents | 2,114 |
def get_alleles_existing_alleleinterpretation(
session, allele_filter, user=None, page=None, per_page=None
):
"""
Returns allele_ids that has connected AlleleInterpretations,
given allele_filter from argument.
Supports pagination.
"""
# Apply filter using Allele table as base
allele_ids = session.query(allele.Allele.id).filter(allele_filter)
# Now get the ones that are actually connected to AlleleInterpretation
# (distinct allele_ids sorted by date_last_update)
alleleinterpretation_allele_ids = (
session.query(workflow.AlleleInterpretation.allele_id)
.filter(workflow.AlleleInterpretation.allele_id.in_(allele_ids))
.group_by(workflow.AlleleInterpretation.allele_id)
.order_by(func.max(workflow.AlleleInterpretation.date_last_update).desc())
)
count = alleleinterpretation_allele_ids.count()
if page and per_page:
start = (page - 1) * per_page
end = page * per_page
alleleinterpretation_allele_ids = alleleinterpretation_allele_ids.slice(start, end)
alleleinterpretation_allele_ids = alleleinterpretation_allele_ids.all()
return alleleinterpretation_allele_ids, count | 2,115 |
def _patched_copy_file(
src_file, dest_file, hide_listing=True, preserve_mode=True
):
"""Copy ``src_file`` to ``dest_file`` ensuring parent directory exists.
By default, message like `creating directory /path/to/package` and
`copying directory /src/path/to/package -> path/to/package` are displayed
on standard output. Setting ``hide_listing`` to False avoids message from
being displayed.
NB: Patched here to not follows symbolic links
"""
# Create directory if needed
dest_dir = os.path.dirname(dest_file)
if dest_dir != "" and not os.path.exists(dest_dir):
if not hide_listing:
print("creating directory {}".format(dest_dir))
skbuild.utils.mkdir_p(dest_dir)
# Copy file
if not hide_listing:
print("copying {} -> {}".format(src_file, dest_file))
shutil.copyfile(src_file, dest_file, follow_symlinks=False)
shutil.copymode(src_file, dest_file, follow_symlinks=False) | 2,116 |
def _get_session(db_uri, use_batch_mode=True, echo=False):
"""Helper to get an SQLAlchemy DB session"""
# `use_batch_mode` is experimental currently, but needed for `executemany`
#engine = create_engine(db_uri, use_batch_mode=use_batch_mode, echo=echo)
engine = create_engine(db_uri, echo=echo)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
try:
connection = session.connection()
logging.info('Successfully connected to database.')
except:
raise RuntimeError(f'Couldn\'t connect to db: {db_uri}')
return session | 2,117 |
def has_extension(experiment: Experiment, name: str) -> bool:
"""
Check if an extension is declared in this experiment.
"""
return get_extension(experiment, name) is not None | 2,118 |
def test_get_plaid_accounts(lunch_money_obj: LunchMoney):
"""
Get Plaid Account and Assert it's a Plaid Account
"""
plaid_accounts = lunch_money_obj.get_plaid_accounts()
assert len(plaid_accounts) >= 1
for plaid_account in plaid_accounts:
assert isinstance(plaid_account, PlaidAccountObject)
logger.info("%s Plaid Accounts returned", len(plaid_accounts)) | 2,119 |
def machine_is_valid(cloud_machine, accounts):
"""
As the criteria for "what makes a glance image an atmosphere ProviderMachine" changes, we can use this function to hook out to external plugins, etc.
Filters out:
- ChromoSnapShot, eri-, eki-
- Private images not shared with atmosphere accounts
- Domain-specific image catalog(?)
"""
provider = accounts.core_provider
# If the name of the machine indicates that it is a Ramdisk, Kernel, or Chromogenic Snapshot, skip it.
if any(cloud_machine.name.startswith(prefix) for prefix in ['eri-','eki-', 'ChromoSnapShot']):
celery_logger.info("Skipping cloud machine %s" % cloud_machine)
return False
# If the metadata 'skip_atmosphere' is found, do not add the machine.
if cloud_machine.get('skip_atmosphere', False):
celery_logger.info("Skipping cloud machine %s - Includes 'skip_atmosphere' metadata" % cloud_machine)
return False
# If the metadata indicates that the image-type is snapshot -- skip it.
if cloud_machine.get('image_type', 'image') == 'snapshot':
celery_logger.info("Skipping cloud machine %s - Image type indicates a snapshot" % cloud_machine)
return False
owner_project = _get_owner(accounts, cloud_machine)
# If the image is private, ensure that an owner can be found inside the system.
if cloud_machine.get('visibility', '') == 'private':
shared_with_projects = accounts.shared_images_for(cloud_machine.id)
shared_with_projects.append(owner_project)
project_names = [p.name for p in shared_with_projects if p] # TODO: better error handling here
identity_matches = provider.identity_set.filter(
credential__key='ex_project_name', credential__value__in=project_names).count() > 0
if not identity_matches:
celery_logger.info("Skipping private machine %s - The owner does not exist in Atmosphere" % cloud_machine)
return False
if accounts.provider_creds.get('ex_force_auth_version', '2.0_password') != '3.x_password':
return True
# NOTE: Potentially if we wanted to do 'domain-restrictions' *inside* of atmosphere,
# we could do that (based on the domain of the image owner) here.
domain_id = owner_project.domain_id
config_domain = accounts.get_config('user', 'domain', 'default')
owner_domain = accounts.openstack_sdk.identity.get_domain(domain_id)
account_domain = accounts.openstack_sdk.identity.get_domain(config_domain)
if owner_domain.id != account_domain.id: # and if FLAG FOR DOMAIN-SPECIFIC ATMOSPHERE
celery_logger.info("Skipping private machine %s - The owner belongs to a different domain (%s)" % (cloud_machine, owner_domain))
return False
return True | 2,120 |
def remove_dups(list, key):
"""Returns a new list without duplicates.
Given two elements e1, e2 from list, e1 is considered to be a duplicate of e2
if key(e1) == key(e2).
Args:
list: the list to read from.
key: a function that receives an element from list and returns its key.
Yields:
unique elements of the given list
"""
keys = set()
for a in list:
k_a = key(a)
if k_a not in keys:
keys.add(k_a)
yield a | 2,121 |
def ReadPickledNetworkxGraphs() -> Iterable[Tuple[str, nx.MultiDiGraph]]:
"""Read the pickled networkx graphs."""
with NETWORKX_GRAPHS_ARCHIVE as pickled_dir:
for path in pickled_dir.iterdir():
with open(path, "rb") as f:
yield path.name, pickle.load(f) | 2,122 |
def min_max_median(lst):
""" a function that takes a simple list of numbers lst as a parameter and returns a list with the min, max, and the median of lst. """
s = sorted(lst)
n = len(s)
return [ s[0], s[-1], s[n//2] if n % 2 == 1 else (s[n//2 - 1] + s[n//2]) / 2] | 2,123 |
def Tree(tree=tree):
"""the main recursive fonction that is responsible of reading the tree and deciding witch node is next
This fonction takes the cuurent position in the tree (current node), do the processing and end up with a recursive call
with the next node
Args:
tree (obj): a node of the tree (start by default)
"""
# make sure no temp file is left in _data
clean_cache()
# if tree is a tag, search it
if type(tree)==str:
tree= get_tree_by_tag(tree)
if not tree:
return
# check for screen mode
if not str_to_bool(config['screen_mode']) and "screen" in tree['config']:
text_to_speech("Cette action n'est pas possible car le mode écran est désactivé")
Tree("start")
# Do a random choices of sentences to say
text= tree['text']
say=""
for choices in text:
choice= random.choice(choices)
say+= analyse_var(choice,tree)
# add an end of sentence point where needed
if say.strip()[-1]!="?" and say.strip()[-1]!="!":
say+=","
text_to_speech(say)
# Choose the next step based on the request of the user
action= tree['action']
step = take_action(action,tree)
# stop the fonction if the user asked for
if step==EXIT_TREE:
return
else:
Tree(step) | 2,124 |
def _CreateRJavaSourceFile(srcjar_dir, package, resources_by_type,
rjava_build_options):
"""Generates an R.java source file."""
package_r_java_dir = os.path.join(srcjar_dir, *package.split('.'))
build_utils.MakeDirectory(package_r_java_dir)
package_r_java_path = os.path.join(package_r_java_dir, 'R.java')
java_file_contents = _RenderRJavaSource(package, resources_by_type,
rjava_build_options)
with open(package_r_java_path, 'w') as f:
f.write(java_file_contents) | 2,125 |
async def test_unaviable_on_update_failure(hass: HomeAssistant) -> None:
"""Test entity unaviable on update failure."""
await setup_uptimerobot_integration(hass)
entity = hass.states.get(UPTIMEROBOT_SENSOR_TEST_ENTITY)
assert entity.state == STATE_UP
with patch(
"pyuptimerobot.UptimeRobot.async_get_monitors",
side_effect=UptimeRobotAuthenticationException,
):
async_fire_time_changed(hass, dt.utcnow() + COORDINATOR_UPDATE_INTERVAL)
await hass.async_block_till_done()
entity = hass.states.get(UPTIMEROBOT_SENSOR_TEST_ENTITY)
assert entity.state == STATE_UNAVAILABLE | 2,126 |
def _extract_decimal_with_text_az(tokens, short_scale, ordinals):
"""
Extract decimal numbers from a string.
This function handles text such as '2 nöqtə 5'.
Notes:
While this is a helper for extractnumber_az, it also depends on
extractnumber_az, to parse out the components of the decimal.
This does not currently handle things like:
number dot number number number
Args:
tokens [Token]: The text to parse.
short_scale boolean:
ordinals boolean:
Returns:
(float, [Token])
The value found and relevant tokens.
(None, None) if no decimal value is found.
"""
for c in _DECIMAL_MARKER_AZ:
partitions = partition_list(tokens, lambda t: t.word == c)
if len(partitions) == 3:
numbers1 = \
_extract_numbers_with_text_az(partitions[0], short_scale,
ordinals, fractional_numbers=False)
numbers2 = \
_extract_numbers_with_text_az(partitions[2], short_scale,
ordinals, fractional_numbers=False)
if not numbers1 or not numbers2:
return None, None
number = numbers1[-1]
decimal = numbers2[0]
# TODO handle number dot number number number
if "." not in str(decimal.text):
return number.value + float('0.' + str(decimal.value)), \
number.tokens + partitions[1] + decimal.tokens
return None, None | 2,127 |
def assign_read_kmers_to_contigs_new(kmer_ii, ambiguous_kmer_counts, unambiguous_contig_counts, contig_abundances):
"""
Assign ambiguous read k-mers based on contig averages counts.
"""
contig_counts = copy.deepcopy(unambiguous_contig_counts)
contig_location_tuples = []
total_abundance = 0
# Cycle through all ambiguous k-mers and assign them.
for kmer in ambiguous_kmer_counts.keys():
# and randomly assign the count to one of the items.
contig_location_tuples = kmer_ii[kmer]
#print 'Kmer:\t' + kmer
#print 'Count:\t' + str(ambiguous_kmer_counts[kmer])
#print 'Contig_locations:'
#pprint.pprint(contig_location_tuples)
# and randomly assign the count to one of the items.
#contigs_containing_kmer = accumulate(kmer_ii[kmer])
#print kmer +'\t',
contigs_containing_kmer = list(accumulate(contig_location_tuples))
#print contigs_containing_kmer
# Calculate total abundance
for contig in contigs_containing_kmer:
total_abundance += contig_abundances[contig[0]]
# Assign fractional counts based on total abundances.
for contig in contigs_containing_kmer:
#total_abundance += contig_abundances[contig[0]]
#print 'Assigning\t' + str(contig_abundances[contig[0]] * ambiguous_kmer_counts[kmer] / total_abundance) + '\tto\t' + contig[0]
contig_counts[contig[0]] += (contig_abundances[contig[0]] * ambiguous_kmer_counts[kmer] / total_abundance)
total_abundance = 0
#for i in xrange(0, ambiguous_kmer_counts[kmer]):
# contig = random.choice(contig_location_tuples)[0]
# #print "Selecting contig:\t" + contig
# contig_counts[contig] += 1
return contig_counts | 2,128 |
def test_file_analyser(
tmpdir: "StrPathLike",
contents: str,
expected_n_lines: int,
expected_n_definitions: int,
expected_avg_lines: float,
) -> None:
"""Tests the results of a file analyser.
Args:
contents: Source code to be analysed
expected_n_lines: Expected total number of lines
expected_n_definitions: Expected total number of definitions
expected_avg_lines: Expected average number of lines per definition
tmpdir: temporary directory
"""
from funk_lines.core.analysers import file_analyser
tmp_fp = pathlib.Path(tmpdir) / "foo.py"
tmp_fp.write_text(contents)
analyser = file_analyser.FileAnalyser(tmp_fp)
results = analyser.analyse()
assert results.total_lines == expected_n_lines
assert results.nbr_definitions == expected_n_definitions
assert results.lines_per_function == expected_avg_lines | 2,129 |
def cremi_scores(seg, gt, border_threshold=None, return_all=True):
"""
Compute the cremi scores (Average of adapted rand error, vi-split, vi-merge)
Parameters
----------
seg: np.ndarray - the candidate segmentation
gt: np.ndarray - the groundtruth
border_threshold: value by which the border is eroded (default: None = no erosion)
Returns
-------
cremi-score: average of rand error, vi-split, vi-merge
vi-split: variation of information, split score
vi-merge: variation of information, merge score
adapted rand: adapted rand error
"""
assert seg.shape == gt.shape, "%s, %s" % (str(seg.shape, gt.shape))
# compute border threshold if specified
if border_threshold is not None:
xy_resolution = 4.
gt_ = create_border_mask(gt, border_threshold / xy_resolution, np.uint64(-1))
# add 1 to map back to 0 as lowest label
gt_ += 1
else:
gt_ = gt
## Try except because sometimes both have nothing in them.
try:
vi_s, vi_m = voi(seg, gt_)
are = adapted_rand(seg, gt_)
cs = (vi_s + vi_m + are) / 3
except:
cs = np.nan
vi_s = np.nan
vi_m = np.nan
are = np.nan
if return_all:
return {'cremi-score': cs, 'vi-split': vi_s, 'vi-merge': vi_m, 'adapted_rand': are}
else:
return cs | 2,130 |
def run_testlauncher(package):
"""Run test launcher"""
from qtpy.QtWidgets import QApplication
app = QApplication([])
win = TestLauncherWindow(package)
win.show()
app.exec_() | 2,131 |
def test_force_single_line_imports() -> None:
"""Test to ensure forcing imports to each have their own line works as expected."""
test_input = (
"from third_party import lib1, lib2, \\\n"
" lib3, lib4, lib5, lib6, lib7, \\\n"
" lib8, lib9, lib10, lib11, lib12, \\\n"
" lib13, lib14, lib15, lib16, lib17, \\\n"
" lib18, lib20, lib21, lib22\n"
)
test_output = isort.code(
code=test_input, multi_line_output=WrapModes.GRID, line_length=40, force_single_line=True
)
assert test_output == (
"from third_party import lib1\n"
"from third_party import lib2\n"
"from third_party import lib3\n"
"from third_party import lib4\n"
"from third_party import lib5\n"
"from third_party import lib6\n"
"from third_party import lib7\n"
"from third_party import lib8\n"
"from third_party import lib9\n"
"from third_party import lib10\n"
"from third_party import lib11\n"
"from third_party import lib12\n"
"from third_party import lib13\n"
"from third_party import lib14\n"
"from third_party import lib15\n"
"from third_party import lib16\n"
"from third_party import lib17\n"
"from third_party import lib18\n"
"from third_party import lib20\n"
"from third_party import lib21\n"
"from third_party import lib22\n"
)
test_input = (
"from third_party import lib_a, lib_b, lib_d\n" "from third_party.lib_c import lib1\n"
)
test_output = isort.code(
code=test_input, multi_line_output=WrapModes.GRID, line_length=40, force_single_line=True
)
assert test_output == (
"from third_party import lib_a\n"
"from third_party import lib_b\n"
"from third_party import lib_d\n"
"from third_party.lib_c import lib1\n"
) | 2,132 |
def process_work_records(max_rows=20, record_id=None):
"""Process uploaded work records."""
set_server_name()
task_ids = set()
work_ids = set()
"""This query is to retrieve Tasks associated with work records, which are not processed but are active"""
tasks = (Task.select(
Task, WorkRecord, WorkInvitee, User,
UserInvitation.id.alias("invitation_id"), OrcidToken).where(
WorkRecord.processed_at.is_null(), WorkInvitee.processed_at.is_null(),
WorkRecord.is_active,
(OrcidToken.id.is_null(False)
| ((WorkInvitee.status.is_null())
| (WorkInvitee.status.contains("sent").__invert__())))).join(
WorkRecord, on=(Task.id == WorkRecord.task_id).alias("record")).join(
WorkInvitee,
on=(WorkRecord.id == WorkInvitee.record_id).alias("invitee")).join(
User,
JOIN.LEFT_OUTER,
on=((User.email == WorkInvitee.email)
| ((User.orcid == WorkInvitee.orcid)
& (User.organisation_id == Task.org_id)))).join(
Organisation,
JOIN.LEFT_OUTER,
on=(Organisation.id == Task.org_id)).join(
UserOrg,
JOIN.LEFT_OUTER,
on=((UserOrg.user_id == User.id)
& (UserOrg.org_id == Organisation.id))).
join(UserInvitation,
JOIN.LEFT_OUTER,
on=((UserInvitation.email == WorkInvitee.email)
& (UserInvitation.task_id == Task.id))).join(
OrcidToken,
JOIN.LEFT_OUTER,
on=((OrcidToken.user_id == User.id)
& (OrcidToken.org_id == Organisation.id)
& (OrcidToken.scopes.contains("/activities/update")))))
if record_id:
tasks = tasks.where(WorkRecord.id == record_id)
tasks = tasks.order_by(Task.id, Task.org_id, WorkRecord.id, User.id).limit(max_rows)
tasks = list(tasks)
for (task_id, org_id, record_id, user), tasks_by_user in groupby(tasks, lambda t: (
t.id,
t.org_id,
t.record.id,
t.record.invitee.user,)):
# If we have the token associated to the user then update the work record,
# otherwise send him an invite
if (user.id is None or user.orcid is None or not OrcidToken.select().where(
(OrcidToken.user_id == user.id) & (OrcidToken.org_id == org_id)
& (OrcidToken.scopes.contains("/activities/update"))).exists()): # noqa: E127, E129
for k, tasks in groupby(
tasks_by_user,
lambda t: (
t.created_by,
t.org,
t.record.invitee.email,
t.record.invitee.first_name,
t.record.invitee.last_name, )
): # noqa: E501
email = k[2]
token_expiry_in_sec = 2600000
status = "The invitation sent at " + datetime.utcnow().isoformat(
timespec="seconds")
try:
# For researcher invitation the expiry is 30 days, if it is reset then it is 2 weeks.
if WorkInvitee.select().where(
WorkInvitee.email == email, WorkInvitee.status ** "%reset%").count() != 0:
token_expiry_in_sec = 1300000
send_user_invitation(
*k,
task_id=task_id,
token_expiry_in_sec=token_expiry_in_sec)
(WorkInvitee.update(status=WorkInvitee.status + "\n" + status).where(
WorkInvitee.status.is_null(False), WorkInvitee.email == email).execute())
(WorkInvitee.update(status=status).where(
WorkInvitee.status.is_null(), WorkInvitee.email == email).execute())
except Exception as ex:
(WorkInvitee.update(
processed_at=datetime.utcnow(),
status=f"Failed to send an invitation: {ex}.").where(
WorkInvitee.email == email,
WorkInvitee.processed_at.is_null())).execute()
else:
create_or_update_work(user, org_id, tasks_by_user)
task_ids.add(task_id)
work_ids.add(record_id)
for record in WorkRecord.select().where(WorkRecord.id << work_ids):
# The Work record is processed for all invitees
if not (WorkInvitee.select().where(
WorkInvitee.record_id == record.id,
WorkInvitee.processed_at.is_null()).exists()):
record.processed_at = datetime.utcnow()
if not record.status or "error" not in record.status:
record.add_status_line("Work record is processed.")
record.save()
for task in Task.select().where(Task.id << task_ids):
# The task is completed (Once all records are processed):
if not (WorkRecord.select().where(WorkRecord.task_id == task.id, WorkRecord.processed_at.is_null()).exists()):
task.completed_at = datetime.utcnow()
task.save()
error_count = WorkRecord.select().where(
WorkRecord.task_id == task.id, WorkRecord.status**"%error%").count()
row_count = task.record_count
with app.app_context():
export_url = flask.url_for(
"workrecord.export",
export_type="json",
_scheme="http" if EXTERNAL_SP else "https",
task_id=task.id,
_external=True)
send_email(
"email/work_task_completed.html",
subject="Work Process Update",
recipient=(task.created_by.name, task.created_by.email),
error_count=error_count,
row_count=row_count,
export_url=export_url,
task_name="Work",
filename=task.filename) | 2,133 |
def test_IterateOverTuples():
"""
@brief Demonstrates that we can iterate over a tuple.
"""
test_on_data_1 = IterateOverTuples.get_data_test() == (1, 7, 2)
test_on_data_2 = IterateOverTuples.get_data_tswift() == (2008, 2014, 5)
assert(test_on_data_1 and test_on_data_2) | 2,134 |
def _coio_rebase(helper_module):
"""Rebase classes `tasklet' and `bomb' from those in the helper_module."""
global tasklet
global bomb
global current
global main
global _tasklets_created
is_tasklet_ok = list(tasklet.__bases__) == [helper_module.tasklet]
if is_tasklet_ok and bomb is helper_module.bomb:
return
if main is not current:
raise ImportTooLateError
if main.next is not main:
raise ImportTooLateError
# We should check for the number of bombs as well, but that would be too
# much work.
if _tasklets_created != 1:
raise ImportTooLateError
if not is_tasklet_ok:
# This would be easier: tasklet.__bases__ = (helper_module.tasklet,)
# But it doesn't work: TypeError("__bases__ assignment: 'tasklet' deallocator differs from 'object'")
dict_obj = dict(tasklet.__dict__)
dict_obj['__slots__'] = _process_slots(
dict_obj['__slots__'], helper_module.tasklet, dict_obj)
#old_tasklet = tasklet
tasklet.__new__ = classmethod(_new_too_late)
tasklet = type(tasklet.__name__, (helper_module.tasklet,), dict_obj)
current = main = _get_new_main()
_tasklets_created = 1
assert type(main) is tasklet
#del old_tasklet
if bomb is not helper_module.bomb:
bomb.__new__ = classmethod(_new_too_late)
bomb = helper_module.bomb | 2,135 |
def sign_transaction(transaction_dict, private_key) -> SignedTransaction:
"""
Sign a (non-staking) transaction dictionary with the specified private key
Parameters
----------
transaction_dict: :obj:`dict` with the following keys
nonce: :obj:`int` Transaction nonce
gasPrice: :obj:`int` Transaction gas price in Atto
gas: :obj:`int` Gas limit in Atto
to: :obj:`str` Destination address
value: :obj:`int` Amount to be transferred in Atto
data: :obj:`str` Transaction data, used for smart contracts
from: :obj:`str` From address, optional (if passed, must match the
public key address generated from private_key)
chainId: :obj:`int` One of util.chainIds.keys(), optional
If you want to replay your transaction across networks, do not pass it
shardID: :obj:`int` Originating shard ID, optional (needed for cx shard transaction)
toShardID: :obj:`int` Destination shard ID, optional (needed for cx shard transaction)
r: :obj:`int` First 32 bytes of the signature, optional
s: :obj:`int` Next 32 bytes of the signature, optional
v: :obj:`int` Recovery value, optional
private_key: :obj:`str` The private key
Returns
-------
A SignedTransaction object, which is a named tuple
rawTransaction: :obj:`str` Hex bytes of the raw transaction
hash: :obj:`str` Hex bytes of the transaction hash
r: :obj:`int` First 32 bytes of the signature
s: :obj:`int` Next 32 bytes of the signature
v: :obj:`int` Recovery value
Raises
------
TypeError, if the from address specified is not the same
one as derived from the the private key
AssertionError, if the fields for the transaction are missing,
or if the chainId supplied is not a string,
or if the chainId is not a key in util.py
API Reference
-------------
https://readthedocs.org/projects/eth-account/downloads/pdf/stable/
"""
account, sanitized_transaction = sanitize_transaction(transaction_dict, private_key)
if 'to' in sanitized_transaction and sanitized_transaction[ 'to' ] is not None:
sanitized_transaction[ 'to' ] = convert_one_to_hex( sanitized_transaction[ 'to' ] )
filled_transaction = pipe( # https://github.com/ethereum/eth-account/blob/00e7b10005c5fa7090086fcef37a76296c524e17/eth_account/_utils/transactions.py#L39
sanitized_transaction,
dict,
partial(merge, TRANSACTION_DEFAULTS),
chain_id_to_v,
apply_formatters_to_dict(HARMONY_FORMATTERS)
)
unsigned_transaction = serialize_transaction(filled_transaction)
transaction_hash = unsigned_transaction.hash()
if isinstance(unsigned_transaction, (UnsignedEthereumTxData, UnsignedHarmonyTxData)):
chain_id = None # https://github.com/ethereum/eth-account/blob/00e7b10005c5fa7090086fcef37a76296c524e17/eth_account/_utils/signing.py#L26
else:
chain_id = unsigned_transaction.v
(v, r, s) = sign_transaction_hash(
account._key_obj, transaction_hash, chain_id)
encoded_transaction = encode_transaction(unsigned_transaction, vrs=(v, r, s))
signed_transaction_hash = keccak(encoded_transaction)
return SignedTransaction(
rawTransaction=HexBytes(encoded_transaction),
hash=HexBytes(signed_transaction_hash),
r=r,
s=s,
v=v,
) | 2,136 |
def integralHesapla(denklem):
"""
Polinom kullanarak integral hesaplar.
:param denklem: İntegrali hesaplanacak polinom.
"""
a,b=5,len(anaVeriler)
deltax = 0.1
integral = 0
n = int((b - a) / deltax)
for i in range(n):
integral += deltax * (denklem.subs({x:a}) + denklem.subs({x:a+deltax})) / 2
a += deltax
return integral | 2,137 |
def fmt(text,*args,**kwargs):
"""
String formatting made easy
text - pattern
Examples
fmt("The is one = %ld", 1)
fmt("The is text = %s", 1.3)
fmt("Using keywords: one=%(one)d, two=%(two)d", two=2, one=1)
"""
return _fmt(text,args,kwargs) | 2,138 |
def test_mask_bad_input_color_img(threshold_test_data):
"""Test for PlantCV."""
# Read in test data
bad_img = cv2.imread(threshold_test_data.small_rgb_img)
with pytest.raises(RuntimeError):
_ = mask_bad(bad_img, bad_type='nan') | 2,139 |
def value_to_class(v):
"""
Return the label of the pixel patch, by comparing the ratio of foreground
to FOREGROUND_THRESHOLD
Input:
patch (numpy.ndarray): patch of a groundtruth image
size:(PATCH_SIZE, PATCH_SIZE)
Output:
the label of the patch:
1: foreground
0: background
"""
df = np.sum(v)
if df > FOREGROUND_THRESHOLD:
return 1
else:
return 0 | 2,140 |
def namelist_path(output_dir):
"""Given directory containing TC results, return path to `*.in` file."""
file_paths = [os.path.join(output_dir, y) for x in os.walk(output_dir) for y in glob(os.path.join(x[0], '*.in'))]
if len(file_paths) > 1:
raise Exception("Multiple *.in files found in directory.")
return file_paths[0] | 2,141 |
def mc(cfg):
""" Return the MC (multi-corpus) AAI model, trained on the dysarthric and cross corpora.
3 BLSTM layers, and two single linear regression output layers to obtain articulatory trajectories corresponding to each corpus.
Parameters
----------
cfg: main.Configuration
user configuration file
Returns
-------
Model
"""
mdninput_Lstm = keras.Input(shape = (None, cfg.mfcc_dim))
lstm_1 = Bidirectional(CuDNNLSTM(cfg.hyperparameters['BLSTM_units'],
return_sequences=True))(mdninput_Lstm)
lstm_2a = Bidirectional(CuDNNLSTM(cfg.hyperparameters['BLSTM_units'],
return_sequences=True))(lstm_1)
lstm_2 = Bidirectional(CuDNNLSTM(cfg.hyperparameters['BLSTM_units'],
return_sequences=True))(lstm_2a)
output_1 = TimeDistributed(Dense(cfg.ema_dim,
activation='linear'))(lstm_2)
output_2 = TimeDistributed(Dense(cfg.ema_dim,
activation='linear'))(lstm_2)
model = keras.models.Model(mdninput_Lstm, [output_1, output_2])
return model | 2,142 |
def gradient_dxyz(fxyz: tf.Tensor, fn: Callable) -> tf.Tensor:
"""
Function to calculate gradients on x,y,z-axis of a tensor using central finite difference.
It calculates the gradient along x, y, z separately then stack them together
:param fxyz: shape = (..., 3)
:param fn: function to call
:return: shape = (..., 3)
"""
return tf.stack([fn(fxyz[..., i]) for i in [0, 1, 2]], axis=4) | 2,143 |
def get_chunk_range():
"""
Get the range of partitions to try.
"""
n_chunks = multiprocessing.cpu_count()
if n_chunks > 128:
raise NotImplementedError('Currently we consider the num. procs in machine to '
'be < 128')
chunk_range = [n_chunks]
while n_chunks < 128:
n_chunks *= 2
chunk_range += [n_chunks]
return chunk_range | 2,144 |
def test_iou(boxes_1, boxes_2, expected_iou):
"""Test IOU calculation."""
boxes_1 = torch.tensor(boxes_1)
boxes_2 = torch.tensor(boxes_2)
expected_iou = torch.tensor(expected_iou)
assert (
torch.round(100 * iou(boxes_1, boxes_2)) == torch.round(100 * expected_iou)
).all() | 2,145 |
def probabilityEval(Mub,Mbu,PSD,ID_basal,Mub_notBleached=None,Mbu_notBleached=None,ID_notBleached=None):
"""Returns the updated PSD Matrix and the corresponding number of receptors that got bound and unbound. To types, "basal" and "not bleached" can be considered, which is necessary when simulation FRAP.
Parameters
----------
Mub : array_like
Matrix containing binding probabilities for the type "basal".
Mbu : array_like
Matrix containing unbinding probabilities for the type "basal".
Mub_notBleached : array_like, optional
By default None. Matrix containing binding probabilities for the type "not bleached".
Mbu_notBleached : array_like, optional
By default None. Matrix containing unbinding probabilities for the type "not bleached".
PSD : array_like
Matrix representing the PSD grid and its bound receptors.
ID_basal : float
Receptor ID of the basal pool.
ID_notBleached: float
Receptor ID of the not bleached pool.
Returns
-------
out: float, float, float, float, array_like
Number of receptors that got bound and unbound of the two types "basal" and "not bleached" and the updated PSD matrix.
Examples
--------
Import libraries:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> import ampartrafficking.stochastic_model as sm
Set parameters:
>>> U=10
>>> U_notBleached=10
>>> kUB=0.005
>>> kBU=1
>>> N=10
>>> ID_basal=1
>>> ID_notBleached=2
>>> dt=0.5
Create and populate grid and calculate nearest neighbour matrix:
>>> PSD=np.zeros((N,N))
>>> while np.sum(PSD)<20*ID_basal:
>>> i=np.random.randint(0,N)
>>> j=np.random.randint(0,N)
>>> if PSD[i,j]==0:
>>> PSD[i,j]=ID_basal
>>>
>>> while np.sum(PSD)<20*ID_basal+20*ID_notBleached:
>>> i=np.random.randint(0,N)
>>> j=np.random.randint(0,N)
>>> if PSD[i,j]==0:
>>> PSD[i,j]=ID_notBleached
>>>
>>> NN=sm.nearestNeighbours(PSD)
Plot PSD:
>>> plt.figure()
>>> plt.imshow(PSD)
>>> plt.colorbar()
Calculate probability Matrices and update the PSD Matrix:
>>> Mbu=sm.kBUcoop(kBU, NN, PSD, ID_basal)*dt
>>> Mub=sm.kUBcoop(kUB*U, NN, PSD)*dt
>>> Mbu_notBleached=sm.kBUcoop(kBU, NN, PSD, ID_notBleached)*dt
>>> Mub_notBleached=sm.kUBcoop(kUB*U_notBleached, NN, PSD)*dt
>>>
>>> PSD,dBoff,dBon,dBoff_notBleached,dBon_notBleached=sm.probabilityEval(Mub,Mbu,PSD,ID_basal,Mub_notBleached,Mbu_notBleached,ID_notBleached)
Plot PSD:
>>> plt.figure()
>>> plt.imshow(PSD)
>>> plt.colorbar()
Output: (left: before, right: after)
.. image:: images/example1_probabilityEval.png
:width: 45%
.. image:: images/example2_probabilityEval.png
:width: 45%
"""
n=np.shape(PSD)[0]
m=np.shape(PSD)[1]
R=np.random.rand(n,m)
Mask_ub=R<Mub
Mask_bu=R<Mbu
if Mub_notBleached is not None:
R=np.random.rand(n,m)
Mask_ub_notBleached=R<Mub_notBleached
Mask_bu_notBleached=R<Mbu_notBleached
if Mub_notBleached is not None:
R2=np.random.rand(n,m)
ii_basal=np.where((Mask_ub==True)&(Mask_ub_notBleached==True)&(R2<0.5))
ii_notBleached=np.where((Mask_ub==True)&(Mask_ub_notBleached==True)&(R2>=0.5))
Mask_ub[ii_basal]=False
Mask_ub_notBleached[ii_notBleached]=False
dBoff=np.sum(Mask_bu)
dBon=np.sum(Mask_ub)
if Mub_notBleached is not None:
dBoff_notBleached=np.sum(Mask_bu_notBleached)
dBon_notBleached=np.sum(Mask_ub_notBleached)
PSD[Mask_ub]=ID_basal
PSD[Mask_bu]=0
if Mub_notBleached is not None:
PSD[Mask_ub_notBleached]=ID_notBleached
PSD[Mask_bu_notBleached]=0
if Mub_notBleached is not None:
return PSD,dBoff, dBon, dBoff_notBleached, dBon_notBleached
else:
return PSD,dBoff, dBon | 2,146 |
def train_on_data_once(
model_path,
cv_folds=0,
frames_path=None,
annotations_path=None,
species=None,
fold=0,
fraction=None,
perform_evaluation=True,
debug=0,
):
"""Performs training for the segmentation moduel of SIPEC (SIPEC:SegNet).
Parameters
----------
model_path : str
Path to model, can be either where a new model should be stored or a path to an existing model to be retrained.
cv_folds : int
Number of cross_validation folds, use 0 for a normal train/test split.
frames_path : str
Path to the frames used for training.
annotations_path : str
Path to the annotations used for training.
species : str
Species to perform segmentation on (can be any species, but "mouse" or "primate" have more specialised parameters). If your species is neither "mouse" nor "primate", use "default".
fold : int
If cv_folds > 1, fold is the number of fold to be tested on.
fraction : float
Factor by which to decimate the training data points.
perform_evaluation : bool
Perform subsequent evaluation of the model
debug : bool
Debug verbosity.
Returns
-------
model
SIPEC:SegNet model
mean_ap
Mean average precision score achieved by this model
"""
dataset_train, dataset_val = get_segmentation_data(
frames_path=frames_path,
annotations_path=annotations_path,
name=species,
cv_folds=cv_folds,
fold=fold,
fraction=fraction,
)
# initiate mouse model
model = SegModel(species)
# initiate training
model.init_training(model_path=model_path, init_with="coco")
model.init_augmentation()
# start training
print("training on #NUM images : ", str(len(dataset_train.image_ids)))
model.train(dataset_train, dataset_val)
# evaluate model
if perform_evaluation:
model = SegModel(species)
model_path = model.set_inference(model_path=model_path)
mean_ap = model.evaluate(dataset_val)
# if species == "primate" or species == "mouse":
# debug = 1
if debug:
helper = model_path.split("mask_rcnn_primate_0")
epochs = [
"010",
"020",
"030",
]
print(helper)
print(helper[0] + "mask_rcnn_primate_0" + "001" + ".h5")
for epoch in epochs:
model = SegModel("primate")
model.set_inference(
model_path=helper[0] + "mask_rcnn_primate_0" + epoch + ".h5"
)
mean_ap = model.evaluate(dataset_val)
print(epoch)
print(mean_ap)
return model, mean_ap | 2,147 |
def is_dict_like(obj: Dict[Literal["day", "month", "year"], List[int]]):
"""
usage.modin: 2
"""
... | 2,148 |
def train(data_base_path, output_dir, label_vocab_path, hparams_set_name,
train_fold, eval_fold):
"""Constructs trains, and evaluates a model on the given input data.
Args:
data_base_path: str. Directory path containing tfrecords named like "train",
"dev" and "test"
output_dir: str. Path to save checkpoints.
label_vocab_path: str. Path to tsv file containing columns
_VOCAB_ITEM_COLUMN_NAME and _VOCAB_INDEX_COLUMN_NAME. See
testdata/label_vocab.tsv for an example.
hparams_set_name: name of a function in the hparams module which returns a
tf.contrib.training.HParams object.
train_fold: fold to use for training data (one of
protein_dataset.DATA_FOLD_VALUES)
eval_fold: fold to use for training data (one of
protein_dataset.DATA_FOLD_VALUES)
Returns:
A tuple of the evaluation metrics, and the exported objects from Estimator.
"""
hparams = get_hparams(hparams_set_name)
label_vocab = parse_label_vocab(label_vocab_path)
(estimator, train_spec, eval_spec) = _make_estimator_and_inputs(
hparams=hparams,
label_vocab=label_vocab,
data_base_path=data_base_path,
output_dir=output_dir,
train_fold=train_fold,
eval_fold=eval_fold)
return tf.estimator.train_and_evaluate(
estimator=estimator, train_spec=train_spec, eval_spec=eval_spec) | 2,149 |
def get_seven_seg_light_pattern(img, seven_seg_pts, base=[0, 0]):
"""入力画像imgに対してseg_ptsで指定した座標群"""
ptn = 0x00
# TODO: この座標群を全て事前計算しておくとさらに高速化できそう。
seven_seg_pts_based = [
np.array(seven_seg_pts[0]) + np.array(base),
np.array(seven_seg_pts[1]) + np.array(base),
np.array(seven_seg_pts[2]) + np.array(base),
np.array(seven_seg_pts[3]) + np.array(base),
np.array(seven_seg_pts[4]) + np.array(base),
np.array(seven_seg_pts[5]) + np.array(base),
np.array(seven_seg_pts[6]) + np.array(base),
np.array(seven_seg_pts[7]) + np.array(base),
]
for i in range(8):
if (is_seg_light(img, seven_seg_pts_based[i])):
bit = 1
else:
bit = 0
ptn |= (bit << (7 - i))
return ptn | 2,150 |
def _enzyme_path_to_sequence(path, graph, enzymes_sites):
"""Converts a path of successive enzymes into a sequence."""
return "".join(
[enzymes_sites[path[0]]]
+ [graph[(n1, n2)]["diff"] for n1, n2 in zip(path, path[1:])]
) | 2,151 |
def profile_to_section(profile_name):
"""Converts a profile name to a section header to be used in the config."""
if any(c in _WHITESPACE for c in profile_name):
profile_name = shlex_quote(profile_name)
return 'profile %s' % profile_name | 2,152 |
def BytesFromFile(filename: str) -> ByteList:
"""Read the EDID from binary blob form into list form.
Args:
filename: The name of the binary blob.
Returns:
The list of bytes that make up the EDID.
"""
with open(filename, "rb") as f:
chunk = f.read()
return [int(x) for x in bytes(chunk)] | 2,153 |
def array_to_patches(arr, patch_shape=(3,3,3), extraction_step=1, normalization=False):
#Make use of skleanr function extract_patches
#https://github.com/scikit-learn/scikit-learn/blob/51a765a/sklearn/feature_extraction/image.py
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content.
Parameters
----------
arr : 3darray
3-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
patches = extract_patches(arr, patch_shape, extraction_step)
patches = patches.reshape(-1, patch_shape[0],patch_shape[1],patch_shape[2])
# patches = patches.reshape(patches.shape[0], -1)
if normalization==True:
patches -= np.mean(patches, axis=0)
patches /= np.std(patches, axis=0)
print('%.2d patches have been extracted' % patches.shape[0]) ,
return patches | 2,154 |
def directory_select(message: str, default: Optional[str] = None, cli_flag: Optional[str] = None,
force_interactive: bool = False) -> Tuple[int, str]:
"""Display a directory selection screen.
:param str message: prompt to give the user
:param default: default value to return (if one exists)
:param str cli_flag: option used to set this value with the CLI
:param bool force_interactive: True if it's safe to prompt the user
because it won't cause any workflow regressions
:returns: tuple of the form (`code`, `string`) where
`code` - display exit code
`string` - input entered by the user
"""
return obj.get_display().directory_select(message, default=default, cli_flag=cli_flag,
force_interactive=force_interactive) | 2,155 |
def trade_from_kraken(kraken_trade):
"""Turn a kraken trade returned from kraken trade history to our common trade
history format"""
currency_pair = kraken_to_world_pair(kraken_trade['pair'])
quote_currency = get_pair_position(currency_pair, 'second')
return Trade(
# Kraken timestamps have floating point ...
timestamp=convert_to_int(kraken_trade['time'], accept_only_exact=False),
pair=currency_pair,
type=kraken_trade['type'],
rate=FVal(kraken_trade['price']),
cost=FVal(kraken_trade['cost']),
cost_currency=quote_currency,
fee=FVal(kraken_trade['fee']),
fee_currency=quote_currency,
amount=FVal(kraken_trade['vol']),
location='kraken'
) | 2,156 |
def load_ipython_extension(ipython):
"""Register extension with IPython."""
ipython.register_magics(PDCache) | 2,157 |
def _create_instancer_mesh(positions: np.ndarray, name="mesh_points", *, bpy):
"""Create mesh with where each point is a pseudo face
(three vertices at the same position.
"""
assert positions.ndim == 2
assert positions.shape[1] == 3
if name in bpy.data.meshes:
raise RuntimeError("Mesh '{}' already exists.".format(name))
mesh = bpy.data.meshes.new(name=name)
num_vertices = len(positions)
mesh.vertices.add(num_vertices * 3)
mesh.vertices.foreach_set("co", np.repeat(positions, 3, axis=0).reshape((-1)))
mesh.loops.add(num_vertices * 3)
mesh.loops.foreach_set("vertex_index", np.arange(0, 3 * num_vertices))
loop_start = np.arange(0, 3 * num_vertices, 3, np.int32)
loop_total = np.full(fill_value=3, shape=(num_vertices,), dtype=np.int32)
num_loops = loop_start.shape[0]
mesh.polygons.add(num_loops)
mesh.polygons.foreach_set("loop_start", loop_start)
mesh.polygons.foreach_set("loop_total", loop_total)
mesh.update()
mesh.validate()
logger.info("Created instancer mesh with {} vertices.".format(len(positions)))
return mesh | 2,158 |
def bound_to_nitorch(bound, as_type='str'):
"""Convert boundary type to niTorch's convention.
Parameters
----------
bound : [list of] str or bound_like
Boundary condition in any convention
as_type : {'str', 'enum', 'int'}, default='str'
Return BoundType or int rather than str
Returns
-------
bound : [list of] str or BoundType
Boundary condition in NITorch's convention
"""
intype = type(bound)
if not isinstance(bound, (list, tuple)):
bound = [bound]
obound = []
for b in bound:
b = b.lower() if isinstance(b, str) else b
if b in ('replicate', 'repeat', 'border', 'nearest', BoundType.replicate):
obound.append('replicate')
elif b in ('zero', 'zeros', 'constant', BoundType.zero):
obound.append('zero')
elif b in ('dct2', 'reflect', 'reflection', 'neumann', BoundType.dct2):
obound.append('dct2')
elif b in ('dct1', 'mirror', BoundType.dct1):
obound.append('dct1')
elif b in ('dft', 'wrap', 'circular', BoundType.dft):
obound.append('dft')
elif b in ('dst2', 'antireflect', 'dirichlet', BoundType.dst2):
obound.append('dst2')
elif b in ('dst1', 'antimirror', BoundType.dst1):
obound.append('dst1')
else:
raise ValueError(f'Unknown boundary condition {b}')
if as_type in ('enum', 'int', int):
obound = list(map(lambda b: getattr(BoundType, b), obound))
if as_type in ('int', int):
obound = [b.value for b in obound]
if issubclass(intype, (list, tuple)):
obound = intype(obound)
else:
obound = obound[0]
return obound | 2,159 |
def distance_transform_edt(
base_region_raster_path_band, target_distance_raster_path,
sampling_distance=(1., 1.), working_dir=None,
raster_driver_creation_tuple=DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS):
"""Calculate the euclidean distance transform on base raster.
Calculates the euclidean distance transform on the base raster in units of
pixels multiplied by an optional scalar constant. The implementation is
based off the algorithm described in: Meijster, Arnold, Jos BTM Roerdink,
and Wim H. Hesselink. "A general algorithm for computing distance
transforms in linear time." Mathematical Morphology and its applications
to image and signal processing. Springer, Boston, MA, 2002. 331-340.
The base mask raster represents the area to distance transform from as
any pixel that is not 0 or nodata. It is computationally convenient to
calculate the distance transform on the entire raster irrespective of
nodata placement and thus produces a raster that will have distance
transform values even in pixels that are nodata in the base.
Args:
base_region_raster_path_band (tuple): a tuple including file path to a
raster and the band index to define the base region pixels. Any
pixel that is not 0 and nodata are considered to be part of the
region.
target_distance_raster_path (string): path to the target raster that
is the exact euclidean distance transform from any pixel in the
base raster that is not nodata and not 0. The units are in
``(pixel distance * sampling_distance)``.
sampling_distance (tuple/list): an optional parameter used to scale
the pixel distances when calculating the distance transform.
Defaults to (1.0, 1.0). First element indicates the distance
traveled in the x direction when changing a column index, and the
second element in y when changing a row index. Both values must
be > 0.
working_dir (string): If not None, indicates where temporary files
should be created during this run.
raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver
name string as the first element and a GDAL creation options
tuple/list as the second. Defaults to a GTiff driver tuple
defined at geoprocessing.DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS.
Return:
None
"""
working_raster_paths = {}
for raster_prefix in ['region_mask_raster', 'g_raster']:
with tempfile.NamedTemporaryFile(
prefix=raster_prefix, suffix='.tif', delete=False,
dir=working_dir) as tmp_file:
working_raster_paths[raster_prefix] = tmp_file.name
nodata = (get_raster_info(base_region_raster_path_band[0])['nodata'])[
base_region_raster_path_band[1]-1]
nodata_out = 255
def mask_op(base_array):
"""Convert base_array to 1 if not 0 and nodata, 0 otherwise."""
if nodata is not None:
return ~numpy.isclose(base_array, nodata) & (base_array != 0)
else:
return base_array != 0
if not isinstance(sampling_distance, (tuple, list)):
raise ValueError(
"`sampling_distance` should be a tuple/list, instead it's %s" % (
type(sampling_distance)))
sample_d_x, sample_d_y = sampling_distance
if sample_d_x <= 0. or sample_d_y <= 0.:
raise ValueError(
"Sample distances must be > 0.0, instead got %s",
sampling_distance)
raster_calculator(
[base_region_raster_path_band], mask_op,
working_raster_paths['region_mask_raster'], gdal.GDT_Byte, nodata_out,
calc_raster_stats=False,
raster_driver_creation_tuple=raster_driver_creation_tuple)
geoprocessing_core._distance_transform_edt(
working_raster_paths['region_mask_raster'],
working_raster_paths['g_raster'], sampling_distance[0],
sampling_distance[1], target_distance_raster_path,
raster_driver_creation_tuple)
for path in working_raster_paths.values():
try:
os.remove(path)
except OSError:
LOGGER.warning("couldn't remove file %s", path) | 2,160 |
def test_encoder_on_nonfull(numerical, cyclical, categorical):
"""The test checks that if the dataframe has more columns than columns map - extra columns are ignored."""
columns_map = defaultdict(list)
result = {}
for feature, category_type in [(numerical, "numerical"), (cyclical, "cyclical"), (categorical, "categorical")]:
for i in range(feature.shape[1]):
column_name = f"{category_type} {i}"
result[column_name] = feature[:, i]
if category_type == "numerical":
result[column_name] = feature[:, i]
element = column_name
elif category_type == "categorical":
result[column_name] = feature[:, i]
element = column_name
else:
element = (column_name, MAX_VALUE - MIN_VALUE)
columns_map[category_type] += [element]
for category_type in columns_map:
columns_map[category_type].pop()
df = pd.DataFrame(result)
encoder = GeneralEncoder(columns_map)
transformed = encoder.fit_transform(df)
# We add "unknow category" => number of categories in encoder should be +1 to the ones in df
for column in columns_map["categorical"]:
assert df[column].nunique() + 1 == len(encoder.encoders[column].set_classes)
for category_type in encoder.category_types:
for encoded in transformed[category_type]:
if category_type == "cyclical":
assert (df.shape[0], 2) == encoded.shape, f"{category_type} {encoded.shape}"
else:
assert (df.shape[0],) == encoded.shape, f"{category_type} {encoded.shape} {df.values.shape}"
assert set(columns_map.keys()) == set(transformed.keys()), f"{transformed.keys()} {columns_map.keys()}"
inverse_transform = encoder.inverse_transform(transformed)
assert len(df.columns) == len(inverse_transform.columns) + len(columns_map) | 2,161 |
def Amp(f: jnp.ndarray, theta: jnp.ndarray) -> jnp.ndarray:
"""
Computes the Taylor F2 Frequency domain strain waveform with non-standard
spin induced quadrupoole moment for object two.
Note that this waveform assumes object 1 is a BH and therefore uses the
chi * M_total relation to find C
Note that this waveform also assumes that object one is the more massive.
Therefore the more massive object is always considered a BH
Returns:
Strain (array):
"""
# (
# th0,
# th3,
# _,
# _,
# _,
# _,
# ) = theta
# M_chirp = (
# 1 / (16 * pi * f[0]) * (125 / (2 * th0 ** 3)) ** (1 / 5) * C ** 3 / G
# ) / MSUN
# eta = (16 * pi ** 5 / 25 * th0 ** 2 / th3 ** 5) ** (1 / 3)
# Mt = M_chirp / eta ** (3 / 5)
# (
# Mt,
# eta,
# _,
# _,
# ) = theta
m1, m2, _, _ = theta
Mt = m1 + m2
eta = m1 * m2 / (m1 + m2) ** 2
distance = 1.0
pre = 3.6686934875530996e-19 # (GN*Msun/c^3)^(5/6)/Hz^(7/6)*c/Mpc/sec
Mchirp = Mt * eta ** 0.6
A0 = (
Mchirp ** (5.0 / 6.0)
/ (f + 1e-100) ** (7.0 / 6.0)
/ distance
/ pi ** (2.0 / 3.0)
* jnp.sqrt(5.0 / 24.0)
)
return pre * A0 | 2,162 |
def get_forecastgroup_path(dscript_name):
"""
get forecast group from dispatcher init file
:param dscript_name: filepath of dispatcher init file
:return: string containing the path of the forecast group
"""
# create object to represent init file
try:
df = DispatcherInitFile(dscript_name)
except RuntimeError:
print("Warning: Could not create Dispatcher script object from script using configFile={}." \
.format(dscript_name))
if debug:
raise
return None
# extract from init file
try:
dcf_name = df.elementValue(DispatcherInitFile.ForecastGroupElement)
except RuntimeError:
print("Warning: Could not extract ForecastGroup from Dispatcher config file {}".format(dscript_name))
if debug:
raise
return None
return dcf_name | 2,163 |
def spol(f, g):
"""
Compute the S-polynomial of f and g.
INPUT:
- ``f, g`` -- polynomials
OUTPUT: the S-polynomial of f and g
EXAMPLES::
sage: R.<x,y,z> = PolynomialRing(QQ)
sage: from sage.rings.polynomial.toy_buchberger import spol
sage: spol(x^2 - z - 1, z^2 - y - 1)
x^2*y - z^3 + x^2 - z^2
"""
fg_lcm = LCM(LM(f), LM(g))
return fg_lcm//LT(f)*f - fg_lcm//LT(g)*g | 2,164 |
def test_create_resource():
"""Should create resource successfully when no error is caught."""
response = execution.create_resource(MagicMock(), echo=True)
assert response.symbol == "+"
assert response.reason == "Created" | 2,165 |
def evaluate_baselines(experiment,
seed,
num_pairs,
samples_per_pair,
loop_size=None):
"""Helper function to evaluate the set of baselines."""
gumbel_max_joint_fn = functools.partial(
coupling_util.joint_from_samples,
coupling_util.gumbel_max_sampler,
num_samples=samples_per_pair,
loop_size=loop_size)
return {
"Independent":
evaluate_joint(
lambda p, q, _: coupling_util.independent_coupling(p, q),
experiment, seed, num_pairs),
"ICDF":
evaluate_joint(
lambda p, q, _: coupling_util.inverse_cdf_coupling(p, q),
experiment, seed, num_pairs),
"ICDF (permuted)":
evaluate_joint(
lambda p, q, _: coupling_util.permuted_inverse_cdf_coupling(p, q),
experiment, seed, num_pairs),
"Gumbel-max":
evaluate_joint(
gumbel_max_joint_fn,
experiment,
seed,
num_pairs,
joint_correction_num_samples=samples_per_pair),
} | 2,166 |
def add_command(subparsers):
"""Add specific subcommands that the action "create" can use"""
parser = subparsers.add_parser('create', help=create.__doc__)
parser.add_argument('-r', '--recreate', action='store_true', help='If set, I\'ll first erase the current database')
parser.add_argument('-v', '--verbose', action='count', help='Increase verbosity?')
parser.add_argument('-d', '--image-dir', default='/idiap/resource/database/cbsr_nir_vis_2/', help="Change the relative path to the directory containing the images of the CBSR NIR VIS 2 database.")
parser.add_argument('-a', '--annotations-dir', default='/idiap/resource/database/cbsr_nir_vis_2/annotations/', help="Change the relative path to the directory containing the images of the CBSR NIR VIS 2 database.")
parser.set_defaults(func=create) | 2,167 |
def generate_template_mask(protein):
"""Generate template mask."""
protein['template_mask'] = np.ones(shape_list(protein['template_domain_names']),
dtype=np.float32)
return protein | 2,168 |
def minimal_rotation(R, t, iterations=2):
"""Adjust frame so that there is no rotation about z' axis
The output of this function is a frame that rotates the z axis onto the same z' axis as the
input frame, but with minimal rotation about that axis. This is done by pre-composing the input
rotation with a rotation about the z axis through an angle gamma, where
dgamma/dt = 2*(dR/dt * z * R.conjugate()).w
This ensures that the angular velocity has no component along the z' axis.
Note that this condition becomes easier to impose the closer the input rotation is to a
minimally rotating frame, which means that repeated application of this function improves its
accuracy. By default, this function is iterated twice, though a few more iterations may be
called for.
Parameters
==========
R: quaternion array
Time series describing rotation
t: float array
Corresponding times at which R is measured
iterations: int [defaults to 2]
Repeat the minimization to refine the result
"""
from scipy.interpolate import InterpolatedUnivariateSpline as spline
if iterations == 0:
return R
R = quaternion.as_float_array(R)
Rdot = np.empty_like(R)
for i in range(4):
Rdot[:, i] = spline(t, R[:, i]).derivative()(t)
R = quaternion.from_float_array(R)
Rdot = quaternion.from_float_array(Rdot)
halfgammadot = quaternion.as_float_array(Rdot * quaternion.z * R.conjugate())[:, 0]
halfgamma = spline(t, halfgammadot).antiderivative()(t)
Rgamma = np.exp(quaternion.z * halfgamma)
return minimal_rotation(R * Rgamma, t, iterations=iterations-1) | 2,169 |
def test_sigterm_log(create_project):
"""Test that a log file is generated when a SIGTERM is received."""
with create_project(
'''
import sys
import time
def nothing():
"""usage: say nothing"""
print('ready')
sys.stdout.flush()
while 1:
time.sleep(1)
'''
) as project:
process = subprocess.Popen(
["say", "nothing"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
end = time.time() + 30
try:
while "ready" not in process.stdout.read(5) and time.time() < end:
pass
process.terminate()
assert process.wait()
finally:
process.stdout.close()
process.stderr.close()
logs = glob.glob(str(project / r"say*.log"))
assert logs
for log in logs:
with open(log) as log_file:
contents = log_file.read()
assert "DEBUG" in contents
assert "ERROR" in contents | 2,170 |
def get_info(api_key: hug.types.text, hug_timer=20):
"""Return 'getinfo' data from the Gridcoin Research client!"""
if (api_key == api_auth_key):
# Valid API Key!
response = request_json("getinfo", None)
if (response == None):
return {'success': False, 'api_key': True}
else:
return {'success': True, 'api_key': True, 'result': response, 'time_taken': hug_timer}
else:
# Invalid API Key!
return {'success': False, 'api_key': False} | 2,171 |
def clean_all():
"""清理所有二维码图片"""
for file_name in os.listdir(QR_CODE_PATH):
file_path = os.path.join(QR_CODE_PATH, file_name)
if os.path.isfile(file_path):
os.remove(file_path) | 2,172 |
def stats_proc(optlist, hduids, hdulist):
"""print statistics for region according to options
"""
# Process each HDU in the list "hduids"
for hduid in hduids:
hdu = hdulist[hduid]
pix = hdu.data
name = hdu.name
if optlist.bias:
iu.subtract_bias(optlist.bias, optlist.btype, hdu)
slices = []
(datasec, soscan, poscan) = iu.get_data_oscan_slices(hdu)
if optlist.datasec:
slices.append(datasec)
if optlist.overscan:
slices.append(soscan)
if optlist.poverscan:
slices.append(poscan)
if optlist.region:
for reg in optlist.region: # if there are regions
logging.debug('processing %s', reg)
slice_spec = iu.parse_region(reg)
if slice_spec:
slices.append(slice_spec)
else:
logging.error('skipping region %s', reg)
if len(slices) == 0:
stats_print(optlist, hduid, name, pix, None)
for slice_spec in slices:
y1, y2 = slice_spec[0].start + 1, slice_spec[0].stop
x1, x2 = slice_spec[1].start + 1, slice_spec[1].stop
reg = "{}:{},{}:{}".format(x1, x2, y1, y2)
stats_print(optlist, hduid, name,
pix[slice_spec], reg) | 2,173 |
def get_output_col_names(perils, factors):
"""Column names of the output data frame that contains `perils` and `factors`"""
return (
PCon.RAW_STRUCT['stem']['col_names'] +
[per + PCon.OUTPUT_DEFAULTS['pf_sep'] + fac
for per, fac in pd.MultiIndex.from_product(
[perils, [PCon.RAW_STRUCT['bp_name']] + factors]
)]
) | 2,174 |
def item_chosen(button, choice):
"""Respond to button clicks and enter or space presses."""
# from pudb import set_trace; set_trace()
mark_menu_item(cli.main_menu)
advance_menu(cli.main_menu)
send_wrapper(choice, cli.tty) | 2,175 |
def _get_real_path(workspace_path):
"""Converts the given workspace path into an absolute path.
A tuple of a real path and an error is returned. In this tuple, either
the real path or error is present. The error is present in the returned tuple
either if no workspace dir is given or the generated real path is not under
the working directory.
"""
if not workspace_path:
return (None, 'No path is given')
root_dir = _get_root_dir(trailing_separator=False)
path = _to_real_path(root_dir, workspace_path)
return (path, None) if path.startswith(root_dir) else (None, 'Not authorized') | 2,176 |
def create_alpha_graph(experiments, filename, alphas, just_plot=False):
"""
Save experiment results to CSV file and graph
:param experiments: List of (data, dictionary) for experiments ('no_normalization', 'd_normalization', 'x_normalization', 'full_normalization')
:param filename: filename for the CSV and graph
:param alphas: tuple of (start, end, denominator). The graph will be over a the range(start / denominator, end / denominator).
:param just_plot: just_plot: Bool. Use cached results if True, else recalculate results.
"""
if not just_plot:
with open('{}.csv'.format(filename), 'w') as f:
title_row = 'Alpha,' \
'no_normalization_score,no_normalization_std,no_normalization_percentile,' \
'd_normalization_score,d_normalization_std,d_normalization_percentile,' \
'x_normalization_score,x_normalization_std,x_normalization_percentile,' \
'full_normalization_score,full_normalization_std,full_normalization_percentile\n'
f.write(title_row)
alphas = list([float(a) / alphas[2] for a in range(alphas[0], alphas[1])])
for alpha in alphas:
process_alpha(alpha, experiments, filename)
create_result_graph(filename) | 2,177 |
async def ws_send(websocket: WebSocket, chat_info: dict) -> None:
"""
wait for new items on chat stream and
send data from server to client over a WebSocket
:param websocket:
:type websocket:
:param chat_info:
:type chat_info:
"""
rprint("ws_send first line")
pool = await get_redis_pool()
latest_ids = ['$']
ws_connected = True
first_run = True
while pool and ws_connected:
try:
rprint("ws_send first line loop")
if first_run:
rprint("ws_send first_run")
# fetch some previous chat history
events = await pool.xrevrange(
stream=cvar_tenant.get() + ":stream",
count=NUM_PREVIOUS,
start='+',
stop='-'
)
first_run = False
events.reverse()
for e_id, e in events:
e['e_id'] = e_id
rprint("ws_send first_run" + str(e))
await websocket.send_json(e)
else:
events = await pool.xread(
streams=[cvar_tenant.get() + ":stream"],
count=XREAD_COUNT,
timeout=XREAD_TIMEOUT,
latest_ids=latest_ids
)
# just for testing purposes
rprint("ENVIANDO MENSAJES A: "+cvar_chat_info.get()['username'])
####################################
for _, e_id, e in events:
e['e_id'] = e_id
rprint("ws_send e=" + str(e))
await websocket.send_json(e)
latest_ids = [e_id]
rprint("ws_send latest_ids = " + str(latest_ids))
rprint("ws_send last line loop")
#rprint('################contextvar ', cvar_tenant.get())
except ConnectionClosedError:
ws_connected = False
except ConnectionClosedOK:
ws_connected = False
except ServerConnectionClosedError:
rprint('redis server connection closed')
return
pool.close() | 2,178 |
def simple_caesar(txt, rot=7):
"""Caesar cipher through ASCII manipulation, lowercase only."""
alphabet = string.ascii_lowercase # pick alphabet
shifted_alphabet = alphabet[rot:] + alphabet[:rot] # shift it
table = str.maketrans(alphabet, shifted_alphabet) # create mapping table
return txt.lower().translate(table) # apply | 2,179 |
def target(streamlines, target_mask, voxel_size):
"""Retain tracks that pass though target_mask
This function loops over the streamlines and returns streamlines that pass
though target_mask.
Parameters:
-----------
streamlines : iterable
A squence of streamlines. Each streamline should be a (N, 3) array,
where N is the length of the streamline.
target_mask : array-like
A mask used as a target
voxel_size
Size of the voxels in the target_mask
Returns:
streamlines : generator
A sequence of streamlines that pass though target_mask
Raises:
-------
IndexError
When the points of the streamlines lie outside of the target_mask
See Also:from numpy import linalg as LA
---------
density_map
"""
voxel_size = asarray(voxel_size, 'float')
for sl in streamlines:
ind = sl // voxel_size
if ind.min() < 0:
raise IndexError('streamline has negative values, these values ' +
'are outside target_mask')
i, j, k = ind.T.astype('int')
try:
state = target_mask[i, j, k]
except IndexError:
volume_size = tuple(voxel_size * target_mask.shape)
raise IndexError('streamline has values greater than the size of ' +
'the target mask, ' + str(volume_size))
if state.any():
yield sl | 2,180 |
def weave(devicePairs):
"""
"""
routers = [x[0] for x in devicePairs if x[0][1] == "router.PNG"]
selected = []
for devicePair in devicePairs:
starterDevice = devicePair[0]
if starterDevice[1] == "router.PNG":
continue
starterPosition = maths.getCenter(tuple(starterDevice[0]))
distances = []
for (endPosition, endDevice) in devicePair[1:]:
distances.append(maths.getDistance(starterPosition, maths.getCenter(endPosition)))
#if starterDevice[1] == "router.PNG":
# distances[distances.index(min(distances))] = np.Infinity
closestIndex = distances.index(min(distances))
closestDevice = devicePair[closestIndex + 1]
selected.append((starterDevice, closestDevice))
return selected | 2,181 |
def plot_xmhm(result, gals_bf, halos_bf, bf_chi2):
"""
Plot SMHM from data, best fit param values, param values corresponding to
68th percentile 100 lowest chi^2 values.
Parameters
----------
result: multidimensional array
Array of SMF, blue fraction and SMHM information
gals_bf: array
Array of y-axis stellar mass values for best fit SMHM
halos_bf: array
Array of x-axis halo mass values for best fit SMHM
bf_chi2: float
Chi-squared value associated with the best-fit model
Returns
---------
Plot displayed on screen.
"""
if survey == 'resolvea':
line_label = 'RESOLVE-A'
elif survey == 'resolveb':
line_label = 'RESOLVE-B'
elif survey == 'eco':
line_label = 'ECO'
# x_bf,y_bf,y_std_bf,y_std_err_bf = Stats_one_arr(halos_bf,\
# gals_bf,base=0.4,bin_statval='center')
y_bf,x_bf,binnum = bs(halos_bf,\
gals_bf,'mean',bins=np.linspace(10, 15, 7))
i_outer = 0
mod_x_arr = []
mod_y_arr = []
while i_outer < 5:
for idx in range(len(result[i_outer][0])):
mod_x_ii = result[i_outer][19][idx]
mod_y_ii = result[i_outer][18][idx]
y,x,binnum = bs(mod_x_ii,mod_y_ii,'mean',
bins=np.linspace(10, 15, 7))
mod_x_arr.append(x)
mod_y_arr.append(y)
i_outer += 1
for idx in range(len(result[i_outer][0])):
mod_x_ii = result[i_outer][19][idx]
mod_y_ii = result[i_outer][18][idx]
y,x,binnum = bs(mod_x_ii,mod_y_ii,'mean',
bins=np.linspace(10, 15, 7))
mod_x_arr.append(x)
mod_y_arr.append(y)
i_outer += 1
for idx in range(len(result[i_outer][0])):
mod_x_ii = result[i_outer][19][idx]
mod_y_ii = result[i_outer][18][idx]
y,x,binnum = bs(mod_x_ii,mod_y_ii,'mean',
bins=np.linspace(10, 15, 7))
mod_x_arr.append(x)
mod_y_arr.append(y)
i_outer += 1
for idx in range(len(result[i_outer][0])):
mod_x_ii = result[i_outer][19][idx]
mod_y_ii = result[i_outer][18][idx]
y,x,binnum = bs(mod_x_ii,mod_y_ii,'mean',
bins=np.linspace(10, 15, 7))
mod_x_arr.append(x)
mod_y_arr.append(y)
i_outer += 1
for idx in range(len(result[i_outer][0])):
mod_x_ii = result[i_outer][19][idx]
mod_y_ii = result[i_outer][18][idx]
y,x,binnum = bs(mod_x_ii,mod_y_ii,'mean',
bins=np.linspace(10, 15, 7))
mod_x_arr.append(x)
mod_y_arr.append(y)
i_outer += 1
y_max = np.nanmax(mod_y_arr, axis=0)
y_min = np.nanmin(mod_y_arr, axis=0)
# for idx in range(len(result[0][0])):
# x_model,y_model,y_std_model,y_std_err_model = \
# Stats_one_arr(result[0][19][idx],result[0][18][idx],base=0.4,\
# bin_statval='center')
# plt.plot(x_model,y_model,color='lightgray',linestyle='-',alpha=0.5,\
# zorder=0,label='Models')
# for idx in range(len(result[1][0])):
# x_model,y_model,y_std_model,y_std_err_model = \
# Stats_one_arr(result[1][19][idx],result[1][18][idx],base=0.4,\
# bin_statval='center')
# plt.plot(x_model,y_model,color='lightgray',linestyle='-',alpha=0.5,\
# zorder=1)
# for idx in range(len(result[2][0])):
# x_model,y_model,y_std_model,y_std_err_model = \
# Stats_one_arr(result[2][19][idx],result[2][18][idx],base=0.4,\
# bin_statval='center')
# plt.plot(x_model,y_model,color='lightgray',linestyle='-',alpha=0.5,\
# zorder=2)
# for idx in range(len(result[3][0])):
# x_model,y_model,y_std_model,y_std_err_model = \
# Stats_one_arr(result[3][19][idx],result[3][18][idx],base=0.4,\
# bin_statval='center')
# plt.plot(x_model,y_model,color='lightgray',linestyle='-',alpha=0.5,\
# zorder=3)
# for idx in range(len(result[4][0])):
# x_model,y_model,y_std_model,y_std_err_model = \
# Stats_one_arr(result[4][19][idx],result[4][18][idx],base=0.4,\
# bin_statval='center')
# plt.plot(x_model,y_model,color='lightgray',linestyle='-',alpha=0.5,\
# zorder=4)
fig1 = plt.figure(figsize=(10,10))
x_cen = 0.5 * (mod_x_arr[0][1:] + mod_x_arr[0][:-1])
plt.fill_between(x=x_cen, y1=y_max,
y2=y_min, color='lightgray',alpha=0.4,label='Models')
x_cen = 0.5 * (x_bf[1:] + x_bf[:-1])
plt.plot(x_cen, y_bf, color='k', lw=4, label='Best-fit', zorder=10)
plt.fill([13.5, plt.gca().get_xlim()[1], plt.gca().get_xlim()[1], 13.5],
[plt.gca().get_ylim()[0], plt.gca().get_ylim()[0],
plt.gca().get_ylim()[1], plt.gca().get_ylim()[1]], fill=False,
hatch='\\')
if survey == 'resolvea' and mf_type == 'smf':
plt.xlim(10,14)
else:
plt.xlim(10,14.5)
plt.xlabel(r'\boldmath$\log_{10}\ M_{h} \left[\mathrm{M_\odot}\, \mathrm{h}^{-1} \right]$',fontsize=30)
if mf_type == 'smf':
if survey == 'eco' and quenching == 'hybrid':
plt.ylim(np.log10((10**8.9)/2.041),11.9)
elif survey == 'eco' and quenching == 'halo':
plt.ylim(np.log10((10**8.9)/2.041),11.56)
elif survey == 'resolvea':
plt.ylim(np.log10((10**8.9)/2.041),13)
elif survey == 'resolveb':
plt.ylim(np.log10((10**8.7)/2.041),)
plt.ylabel(r'\boldmath$\log_{10}\ M_\star \left[\mathrm{M_\odot}\, \mathrm{h}^{-1} \right]$',fontsize=30)
elif mf_type == 'bmf':
if survey == 'eco' or survey == 'resolvea':
plt.ylim(np.log10((10**9.4)/2.041),)
elif survey == 'resolveb':
plt.ylim(np.log10((10**9.1)/2.041),)
plt.ylabel(r'\boldmath$\log_{10}\ M_{b} \left[\mathrm{M_\odot}\, \mathrm{h}^{-1} \right]$',fontsize=30)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys(), loc='best',prop={'size': 30})
plt.annotate(r'$\boldsymbol\chi ^2 / dof \approx$ {0}'.
format(np.round(bf_chi2/dof,2)),
xy=(0.02, 0.8), xycoords='axes fraction', bbox=dict(boxstyle="square",
ec='k', fc='lightgray', alpha=0.5), size=25)
if quenching == 'hybrid':
plt.title('Hybrid quenching model | ECO')
elif quenching == 'halo':
plt.title('Halo quenching model | ECO')
plt.show() | 2,182 |
def change_image_ani(image: _Surface,
name: _Optional[str] = None,
id_: _Optional[int] = None) -> _TextureAni:
"""
change_image_ani(image, name=None, id_None)
Type: function
Description: returns a TextureAni that simply changes the image of
an AniElement
Args:
'image' (pygame.Surface): the image to change the element to
'name' (str?): the name of the animation, defaults to None
'id_' (int?): the ID of the animation, defaults to None
Return type: TextureAni
"""
return _TextureAni(
name=name,
frames=[image],
time=0,
id_=id_,
reset_on_end=False
) | 2,183 |
def import_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError as err:
raise ImportError("%s doesn't look like a module path" % dotted_path) from err
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError as err:
raise ImportError('Module "%s" does not define a "%s" attribute/class' % (
module_path, class_name)
) from err | 2,184 |
def read_u16(f):
"""Reads a two byte unsigned value from the file object f.
"""
temp = f.read(2)
if not temp:
raise EOFError("EOF")
return int.from_bytes(temp, byteorder='little', signed=False) | 2,185 |
def encrypt_file(input_file, output_file, write_mode, encryption_key, scrypt_n=14, scrypt_r=8, scrypt_p=1,
remove_input=True):
"""Taking an input file as well as the ASCII encryption key, the file is encrypted with AES-256, and outputted to
output_file. The input file is removed by default.
"""
if write_mode == 'write':
mode = 'wb'
elif write_mode == 'append':
mode = 'ab'
else:
raise ValueError('\'write\' and \'append\' are the only allowed strings for write_mode.')
backend = default_backend()
initialization_vector = os.urandom(AES.block_size // 8)
salt = os.urandom(AES.block_size // 8)
key = _derive_key(encryption_key, salt, scrypt_n, scrypt_r, scrypt_p, backend)
with open(output_file, mode) as encrypted:
with open(input_file, 'rb') as decrypted:
encrypted.write(initialization_vector)
encrypted.write(salt)
chunk_size = 1048576
while True:
chunk = decrypted.read(chunk_size)
if chunk:
encrypted.write(_encrypt_bytes_chunk(key, initialization_vector, chunk, backend))
else:
break
if remove_input:
os.remove(input_file) | 2,186 |
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasQTAgg(figure)
return FigureManagerQT(canvas, num) | 2,187 |
def process_image(sample, settings, mode, color_jitter, rotate):
""" process_image """
mean = settings.image_mean
std = settings.image_std
crop_size = settings.crop_size
img_path = sample[0]
img = cv2.imread(img_path)
if mode == 'train':
if rotate:
img = rotate_image(img)
if crop_size > 0:
img = random_crop(
img, crop_size, settings, interpolation=settings.interpolation)
if color_jitter:
img = distort_color(img)
if np.random.randint(0, 2) == 1:
img = img[:, ::-1, :]
else:
if crop_size > 0:
target_size = settings.resize_short_size
img = resize_short(
img, target_size, interpolation=settings.interpolation)
img = crop_image(img, target_size=crop_size, center=True)
img = img[:, :, ::-1]
if 'use_aa' in settings and settings.use_aa and mode == 'train':
img = np.ascontiguousarray(img)
img = Image.fromarray(img)
img = policy(img)
img = np.asarray(img)
img = img.astype('float32').transpose((2, 0, 1)) / 255
img_mean = np.array(mean).reshape((3, 1, 1))
img_std = np.array(std).reshape((3, 1, 1))
img -= img_mean
img /= img_std
if mode == 'train' or mode == 'val':
return (img, sample[1])
elif mode == 'test':
return (img, ) | 2,188 |
def flag_spot(states: np.ndarray, x: int, y: int,
annotations: np.ndarray) -> None:
"""Put a flag in (x, y) coordinate."""
global _marked_mines_count
x = x + 1
y = y + 1
if is_marked(states[y, x]):
states[y, x] = states_id.CLOSED
annotations[y, x] = ""
_marked_mines_count -= 1
elif is_closed(states[y, x]):
states[y, x] = states_id.MARKED
annotations[y, x] = "F"
_marked_mines_count += 1 | 2,189 |
def loglinear(F, X, confmeth, conftype=1, alpha=0.75, t_star=0.0): # pylint: disable=C0103
"""
Function to estimate the parameters (gamma0 and gamma1) of the NHPP
loglinear model. There is no regression function for this model.
:param list F: list of failure counts.
:param list X: list of individual failures times.
:param int confmeth: the method for calculating confidence bounds.
:param int conftype: the confidence level type
1 = lower one-sided
2 = upper one-sided
3 = two-sided (default)
:param float alpha: the confidence level.
:param float t_star: the end of the observation period for time terminated,
or Type I, tests. Defaults to 0.0.
:return: [_gamma0_lower, _gamma0_hat, _gamma0_upper],
[_gamma1_lower, _gamma1_hat, _gamma1_upper]
:rtype: tuple of lists
"""
# Define the function that will be set equal to zero and solved for gamma1.
def _gamma1(gamma1, T, r, Ta): # pylint: disable=C0103
"""
Function for estimating the gamma1 value.
:param float gamma1:
:param float T: the sum of individual failure times.
:param int r: the total number of failures observed.
:param float Ta: the latest observed failure time.
:return: _g1; the starting estimate of the gamma1 parameter.
:rtype: float
"""
# Calculate interim values.
_a = r / gamma1
_b = r * Ta * np.exp(gamma1 * Ta)
_c = np.exp(gamma1 * Ta) - 1.0
_g1 = T + _a - (_b / _c)
return _g1
# Initialize variables.
_g0 = [0.0, 0.0, 0.0]
_g1 = [0.0, 0.0, 0.0]
_typeii = False
# Ensure failure times are of type float.
X = [float(x) for x in X]
# Ensure the confidence level is expressed as a decimal, then find the
# standard normal and student-t critical values for constructing
# confidence bounds on the parameters.
if alpha > 1.0:
alpha = alpha / 100.0
# If no observation time was passed, use the maximum failure time and set
# the _typeii variable True to indicate this is a failure truncated
# dataset.
if t_star == 0.0:
t_star = sum(X)
_typeii = True
if not _typeii:
_N = sum(F) - 1
else:
_N = sum(F) - 2
_T = sum(X)
# Calculate the Loglinear parameters.
_g1[1] = fsolve(_gamma1, 0.001, args=(_T, _N, t_star))[0]
_g0[1] = np.log((_N * _g1[1]) / (np.exp(_g1[1] * t_star) - 1.0))
# TODO: Add support for one-sided bounds.
#if confmeth == 1: # Crow bounds.
#elif confmeth == 3: # Fisher matrix bounds.
print _g0, _g1
return(_g0, _g1) | 2,190 |
def convert_pressures(a, from_units, to_units):
"""Converts values in numpy array (or a scalar) from one pressure unit to another, in situ if array.
arguments:
a (numpy float array, or float): array of pressure values to undergo unit conversion in situ, or a scalar
from_units (string): the units of the data before conversion
to_units (string): the required units
returns:
a after unit conversion
note:
To see supported units, use: `valid_uoms(quantity='pressure')`
"""
return convert(a, from_units, to_units, quantity = 'pressure', inplace = True) | 2,191 |
def extend(best):
""" Extend current best sum for next triangle row offering best value from previous row.
E.g. for triangle: 1 / 2, 3 / 4, 1, 5 best values second row are: 3, 4. Emit 3, 4, 4 for
updating third row which gives best values as: 7, 5, 9, which gives 7, 7, 9, 9 for next row.
:param best: current best value sum for previous row
:return: select best value from previous row for current row
"""
prev = 0
for num in best:
yield max(prev, num)
prev = num
yield prev | 2,192 |
def split_train_hdf(size_SB=4000):
"""
split master hdf5 file into smaller hdf5s in order to load completely into memory with
latd_generator which was originally created for loading the cached augmented data,
by splitting the larger hdf5, we can load non-augmented files into memory and run the network more
quickly than loading a batch size of 15 each time.
:return:
"""
hdf5_file_train = h5py.File(HDF5_PATH_TRAIN, "r")
data_num_train = hdf5_file_train["train_img"].shape[0]
data_num_train = range(0, data_num_train)
random.shuffle(data_num_train)
dt = h5py.special_dtype(vlen=str)
for k in range(0, int(len(data_num_train)), int(size_SB)):
image_accumulator = []
label_accumulator = []
acn_accumulator = []
report_accumulator = []
path_accumulator = []
for i in range(0, int(size_SB), int(BATCH_SIZE)):
i = i + k
batch_indices = data_num_train[i:i + BATCH_SIZE]
batch_indices.sort()
images_train = HDF5_FILE_TRAIN["train_img"][batch_indices, ...]
labels_train = HDF5_FILE_TRAIN["train_labels"][batch_indices]
acns_train = HDF5_FILE_TRAIN["train_acns"][batch_indices, ...]
reports_train = HDF5_FILE_TRAIN["train_reports"][batch_indices, ...]
paths_train = HDF5_FILE_TRAIN["train_paths"][batch_indices, ...]
image_accumulator.append(images_train)
label_accumulator.append(labels_train)
acn_accumulator.append(acns_train)
report_accumulator.append(reports_train)
path_accumulator.append(paths_train)
image_accumulator = np.concatenate(image_accumulator, axis=0)
label_accumulator = np.concatenate(label_accumulator, axis=0)
acn_accumulator = np.concatenate(acn_accumulator, axis=0)
report_accumulator = np.concatenate(report_accumulator, axis=0)
path_accumulator = np.concatenate(path_accumulator, axis=0)
filename = ORIG_DATA_TEMPLATE.format(k)
with h5py.File(filename, mode='w') as the_file:
# NOTE: this might be a good place to coerce the images to a specific dtype
the_file.create_dataset(ORIG_DATA_IMAGE_NAME, data=image_accumulator)
the_file.create_dataset(ORIG_DATA_LABEL_NAME, data=label_accumulator)
the_file.create_dataset(ORIG_DATA_ACN_NAME, data=acn_accumulator)
the_file.create_dataset(ORIG_DATA_REPORTS_NAME, data=report_accumulator, dtype=dt)
the_file.create_dataset(ORIG_DATA_PATHS_NAME, data=path_accumulator, dtype=dt) | 2,193 |
def amovie(stream: Stream, *args, **kwargs) -> FilterableStream:
"""https://ffmpeg.org/ffmpeg-filters.html#amovie"""
return filter(stream, amovie.__name__, *args, **kwargs) | 2,194 |
def kutta_condition(A_source, B_vortex):
"""
Builds the Kutta condition array.
Parameters
----------
A_source: 2D Numpy array of floats
Source contribution matrix for the normal velocity.
B_vortex: 2D Numpy array of floats
Vortex contribution matrix for the normal velocity.
Returns
-------
b: 1D Numpy array of floats
The left-hand side of the Kutta-condition equation.
"""
b = numpy.empty(A_source.shape[0]+1, dtype=float)
# matrix of source contribution on tangential velocity
# is the same than
# matrix of vortex contribution on normal velocity
b[:-1] = B_vortex[0, :] + B_vortex[-1, :]
# matrix of vortex contribution on tangential velocity
# is the opposite of
# matrix of source contribution on normal velocity
b[-1] = - numpy.sum(A_source[0, :] + A_source[-1, :])
print(b)
return b | 2,195 |
def absorption_sinogram(p, anglelist):
"""Generates the absorption sinogram for absorption by the full
elemental content of the Phantom2d object.
Parameters
----------
p : Phantom2d object
anglelist : list of float
Ordered list of sinogram projection angles in degrees.
Returns
-------
array of float
Sinogram of requested scattering or fluorescence.
This is a 2d x-theta map of dimensionless values.
"""
sinogram = np.empty((p.cols, len(anglelist)))
if config.show_progress:
pbar = ProgressBar(maxval=max(1, len(anglelist)-1), term_width=80).start()
for i, angle in enumerate(anglelist):
if config.show_progress:
pbar.update(i)
increasing_ix = True # Set True to accumulate cmam along increasing y
n_map = irradiance_map(p, angle, n0=1.0, increasing_ix=increasing_ix)
if increasing_ix:
sinogram[:, i] = np.log(n_map[0] / n_map[-1])
else:
sinogram[:, i] = np.log(n_map[-1] / n_map[0])
return sinogram | 2,196 |
def trace_feature_vector_from_nodes(embeddings, traces, dimension):
"""
Computes average feature vector for each trace
Parameters
-----------------------
embeddings,
Text-based model containing the computed encodings
traces: List,
List of traces treated as sentences by the model
Returns
-----------------------
vectors: List
list of vector encodings for each trace
"""
vectors_average, vectors_max = [], []
for trace in traces:
trace_vector = []
for token in trace:
try:
trace_vector.append(embeddings[token])
except KeyError:
pass
if len(trace_vector) == 0:
trace_vector.append(np.zeros(dimension))
vectors_average.append(np.array(trace_vector).mean(axis=0))
vectors_max.append(np.array(trace_vector).max(axis=0))
return vectors_average, vectors_max | 2,197 |
def jitter_rotate(drawing, sigma=0.2):
"""
Rotate an entire drawing about 0,0 by a random gaussian.
"""
rotation = np.random.randn(1) * sigma
matrix = create_rotation_matrix(rotation)
return [np.dot(stroke, matrix).squeeze() for stroke in drawing] | 2,198 |
def reset_matrix(node):
"""Reset the offsetParentMatrix attribute to identity.
Arguments:
node (str): The node to reset.
"""
matrix_ = OpenMaya.MMatrix()
cmds.setAttr(node + ".offsetParentMatrix", matrix_, type="matrix") | 2,199 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.