content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def process_singletask(
tf_path,
dnase_path,
genome_path,
data_path,
experiment,
window=200,
alphabet="ACGT",
compression="gzip",
max_len=300,
gc_match=True,
valid_frac=0.1,
test_frac=0.2,
):
"""Preprocess data for a single-class task."""
# remove extremely large peaks
tf_filtered_path = os.path.join(data_path, experiment + "_pos_filtered.bed")
wrangle.filter_max_length(tf_path, tf_filtered_path, max_len)
# create new bed file with window size enforced
pos_bed_path = os.path.join(data_path, experiment + "_pos_" + str(window) + ".bed")
wrangle.enforce_constant_size(tf_filtered_path, pos_bed_path, window)
# extract sequences from bed file and save to fasta file
pos_fasta_path = os.path.join(data_path, experiment + "_pos.fa")
wrangle.bedtools_getfasta(
pos_bed_path, genome_path, output_path=pos_fasta_path, strand=True
)
# parse sequence from fasta file
pos_seq, pos_names = wrangle.parse_fasta(pos_fasta_path)
# filter sequences with absent nucleotides
pos_seq, good_index = wrangle.filter_nonsense_sequences(pos_seq)
pos_names = pos_names[good_index]
# convert filtered sequences to one-hot representation
pos_one_hot = wrangle.convert_one_hot(pos_seq, alphabet)
# get non-overlap between pos peaks and neg peaks
neg_bed_path = os.path.join(data_path, experiment + "_nonoverlap.bed")
wrangle.bedtools_intersect(
dnase_path, tf_path, neg_bed_path, write_a=True, nonoverlap=True
)
# create new bed file with window enforced
neg_bed_path2 = os.path.join(data_path, experiment + "_neg_" + str(window) + ".bed")
wrangle.enforce_constant_size(neg_bed_path, neg_bed_path2, window)
# extract sequences from bed file and save to fasta file
neg_fasta_path = os.path.join(data_path, experiment + "_neg.fa")
wrangle.bedtools_getfasta(
neg_bed_path2, genome_path, output_path=neg_fasta_path, strand=True
)
# parse sequence and chromosome from fasta file
neg_seq, neg_names = wrangle.parse_fasta(neg_fasta_path)
# filter sequences with absent nucleotides
neg_seq, good_index = wrangle.filter_nonsense_sequences(neg_seq)
neg_names = neg_names[good_index]
# convert filtered sequences to one-hot representation
neg_one_hot = wrangle.convert_one_hot(neg_seq, alphabet)
if len(neg_one_hot) > len(pos_one_hot):
# subselect background sequences according to gc-content
if gc_match:
# calling match_gc function to balance neg sequences with pos by GC content:
f_pos = np.mean(pos_one_hot, axis=1)
f_neg = np.mean(neg_one_hot, axis=1)
# get GC content for pos and neg sequences
gc_pos = np.sum(f_pos[:, 1:3], axis=1)
gc_neg = np.sum(f_neg[:, 1:3], axis=1)
index = wrangle.sample_b_matched_to_a(gc_pos, gc_neg)
else:
index = np.random.permutation(len(neg_one_hot))[: len(pos_one_hot)]
neg_one_hot = neg_one_hot[index]
neg_names = neg_names[index]
# merge positive and negative labels
one_hot = np.vstack([pos_one_hot, neg_one_hot])
labels = np.vstack(
[np.ones((len(pos_one_hot), 1)), np.zeros((len(neg_one_hot), 1))]
)
names = np.concatenate([pos_names, neg_names])
names = names.astype("S")
# shuffle indices for train, validation, and test sets
train, valid, test, indices = wrangle.split_dataset(
one_hot, labels, valid_frac=valid_frac, test_frac=test_frac
)
# save to hdf5 file
file_path = os.path.join(data_path, experiment + "_" + str(window) + ".h5")
with h5py.File(file_path, "w") as fout:
fout.create_dataset("x_train", data=train[0], compression="gzip")
fout.create_dataset("y_train", data=train[1], compression="gzip")
fout.create_dataset("train_names", data=names[indices[0]], compression="gzip")
fout.create_dataset("x_valid", data=valid[0], compression="gzip")
fout.create_dataset("y_valid", data=valid[1], compression="gzip")
fout.create_dataset("valid_names", data=names[indices[1]], compression="gzip")
fout.create_dataset("x_test", data=test[0], compression="gzip")
fout.create_dataset("y_test", data=test[1], compression="gzip")
fout.create_dataset("test_names", data=names[indices[2]], compression="gzip")
print("Saved to: " + file_path) | 2,000 |
def update_topic_rule_destination(arn=None, status=None):
"""
Updates a topic rule destination. You use this to change the status, endpoint URL, or confirmation URL of the destination.
See also: AWS API Documentation
Exceptions
:example: response = client.update_topic_rule_destination(
arn='string',
status='ENABLED'|'IN_PROGRESS'|'DISABLED'|'ERROR'
)
:type arn: string
:param arn: [REQUIRED]\nThe ARN of the topic rule destination.\n
:type status: string
:param status: [REQUIRED]\nThe status of the topic rule destination. Valid values are:\n\nIN_PROGRESS\nA topic rule destination was created but has not been confirmed. You can set status to IN_PROGRESS by calling UpdateTopicRuleDestination . Calling UpdateTopicRuleDestination causes a new confirmation challenge to be sent to your confirmation endpoint.\n\nENABLED\nConfirmation was completed, and traffic to this destination is allowed. You can set status to DISABLED by calling UpdateTopicRuleDestination .\n\nDISABLED\nConfirmation was completed, and traffic to this destination is not allowed. You can set status to ENABLED by calling UpdateTopicRuleDestination .\n\nERROR\nConfirmation could not be completed, for example if the confirmation timed out. You can call GetTopicRuleDestination for details about the error. You can set status to IN_PROGRESS by calling UpdateTopicRuleDestination . Calling UpdateTopicRuleDestination causes a new confirmation challenge to be sent to your confirmation endpoint.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
IoT.Client.exceptions.InternalException
IoT.Client.exceptions.InvalidRequestException
IoT.Client.exceptions.ServiceUnavailableException
IoT.Client.exceptions.UnauthorizedException
IoT.Client.exceptions.ConflictingResourceUpdateException
:return: {}
:returns:
(dict) --
"""
pass | 2,001 |
def test_server():
"""Testing the server executable"""
asyncio.get_event_loop().run_until_complete(server.handler()) | 2,002 |
def _get_dataset_domain(
dataset_folder: str,
is_periodic: bool,
spotlight_id: Optional[Union[str, List]] = None,
time_unit: Optional[str] = "day",
):
"""
Returns a domain for a given dataset as identified by a folder. If a
time_unit is passed as a function parameter, the function will assume
that the domain is periodic and with only return the min/max dates,
otherwise ALL dates available for that dataset/spotlight will be returned.
Params:
------
dataset_folder (str): dataset folder to search within
time_unit (Optional[str]): time_unit from the dataset's metadata json file
spotlight_id (Optional[str]): a dictionary containing the
`spotlight_id` of a spotlight to restrict the
domain search to.
time_unit (Optional[str] - one of ["day", "month"]):
Wether the {date} object in the S3 filenames should be matched
to YYYY_MM_DD (day) or YYYYMM (month)
Return:
------
List[datetime]
"""
s3_keys_args: Dict[str, Any] = {"prefix": dataset_folder}
if spotlight_id:
s3_keys_args["spotlight_id"] = spotlight_id
keys = _gather_s3_keys(**s3_keys_args)
if not keys:
raise NoKeysFoundForSpotlight
dates = []
for key in keys:
# matches either dates like: YYYYMM or YYYY_MM_DD
pattern = re.compile(
r"[^a-zA-Z0-9]((?P<YEAR>\d{4})_(?P<MONTH>\d{2})_(?P<DAY>\d{2}))[^a-zA-Z0-9]"
)
if time_unit == "month":
pattern = re.compile(
r"[^a-zA-Z0-9](?P<YEAR>(\d{4}))(?P<MONTH>(\d{2}))[^a-zA-Z0-9]"
)
result = pattern.search(key, re.IGNORECASE,)
if not result:
continue
date = None
try:
date = datetime.datetime(
int(result.group("YEAR")),
int(result.group("MONTH")),
int(result.groupdict().get("DAY", 1)),
)
except ValueError:
# Invalid date value matched - skip date
continue
# Some files happen to have 6 consecutive digits (likely an ID of sorts)
# that sometimes gets matched as a date. This further restriction of
# matched timestamps will reduce the number of "false" positives (although
# ID's between 201011 and 203011 will slip by)
if not datetime.datetime(2010, 1, 1) < date < datetime.datetime(2030, 1, 1):
continue
dates.append(date.strftime("%Y-%m-%dT%H:%M:%SZ"))
if is_periodic and len(dates):
return [min(dates), max(dates)]
return sorted(set(dates)) | 2,003 |
def randomize_quaternion_along_z(
mujoco_simulation: RearrangeSimulationInterface, random_state: RandomState
):
""" Rotate goal along z axis and return the rotated quat of the goal """
quat = _random_quat_along_z(mujoco_simulation.num_objects, random_state)
return rotation.quat_mul(quat, mujoco_simulation.get_target_quat(pad=False)) | 2,004 |
def init(model):
"""
Initialize the server. Loads pyfunc model from the path.
"""
app = flask.Flask(__name__)
@app.route("/ping", methods=["GET"])
def ping(): # pylint: disable=unused-variable
"""
Determine if the container is working and healthy.
We declare it healthy if we can load the model successfully.
"""
health = model is not None
status = 200 if health else 404
return flask.Response(response="\n", status=status, mimetype="application/json")
@app.route("/invocations", methods=["POST"])
def transformation(): # pylint: disable=unused-variable
"""
Do an inference on a single batch of data. In this sample server,
we take data as CSV or json, convert it to a Pandas DataFrame,
generate predictions and convert them back to CSV.
"""
# Convert from CSV to pandas
if flask.request.content_type == CONTENT_TYPE_CSV:
data = flask.request.data.decode("utf-8")
csv_input = StringIO(data)
data = parse_csv_input(csv_input=csv_input)
elif flask.request.content_type == CONTENT_TYPE_JSON:
global logged_pandas_records_format_warning
if not logged_pandas_records_format_warning:
_logger.warning(
"**IMPORTANT UPDATE**: Starting in MLflow 0.9.0, requests received with a"
" `Content-Type` header value of `%s` will be interpreted"
" as JSON-serialized Pandas DataFrames with the `split` orientation, instead"
" of the `records` orientation. The `records` orientation is unsafe because"
" it may not preserve column ordering. Client code should be updated to"
" either send serialized DataFrames with the `split` orientation and the"
" `%s` content type (recommended) or use the `%s` content type with the"
" `records` orientation. For more information, see"
" https://www.mlflow.org/docs/latest/models.html#pyfunc-deployment.\n",
CONTENT_TYPE_JSON,
CONTENT_TYPE_JSON_SPLIT_ORIENTED,
CONTENT_TYPE_JSON_RECORDS_ORIENTED,
)
logged_pandas_records_format_warning = True
data = parse_json_input(
json_input=flask.request.data.decode("utf-8"), orientation="records"
)
elif flask.request.content_type == CONTENT_TYPE_JSON_RECORDS_ORIENTED:
data = parse_json_input(
json_input=flask.request.data.decode("utf-8"), orientation="records"
)
elif flask.request.content_type == CONTENT_TYPE_JSON_SPLIT_ORIENTED:
data = parse_json_input(
json_input=flask.request.data.decode("utf-8"), orientation="split"
)
else:
return flask.Response(
response=(
"This predictor only supports the following content types,"
" {supported_content_types}. Got '{received_content_type}'.".format(
supported_content_types=CONTENT_TYPES,
received_content_type=flask.request.content_type,
)
),
status=415,
mimetype="text/plain",
)
# Do the prediction
# pylint: disable=broad-except
try:
raw_predictions = model.predict(data)
except Exception:
_handle_serving_error(
error_message=(
"Encountered an unexpected error while evaluating the model. Verify"
" that the serialized input Dataframe is compatible with the model for"
" inference."
)
)
predictions = get_jsonable_obj(raw_predictions, pandas_orientation="records")
result = json.dumps(predictions, cls=NumpyEncoder)
return flask.Response(response=result, status=200, mimetype="application/json")
return app | 2,005 |
def sample_tag(user, name='Comedy'):
"""Creates a sample Tag"""
return Tag.objects.create(user=user, name=name) | 2,006 |
def _middle_point(p1: np.ndarray, p2: np.ndarray) -> Tuple[int, int]:
"""Returns the middle point (x,y) between two points
Arguments:
p1 (np.ndarray): First point
p2 (np.ndarray): Second point
"""
return tuple((p1 + p2) // 2) | 2,007 |
def filter_with_prefixes(value, prefixes):
"""
Returns true if at least one of the prefixes exists in the value.
Arguments:
value -- string to validate
prefixes -- list of string prefixes to validate at the beginning of the value
"""
for prefix in prefixes:
if value.startswith(prefix):
return False
return True | 2,008 |
def get_idl_parser(*, allow_cache=True):
"""Get the global IdlParser object."""
# Singleton pattern
global _parser
if _parser and allow_cache:
return _parser
# Get source
with open(os.path.join(lib_dir, "resources", "webgpu.idl"), "rb") as f:
source = f.read().decode()
# Create parser
idl = IdlParser(source)
idl.parse()
_parser = idl
return idl | 2,009 |
def delete_database_entry():
"""
Wrapper function for database.delete_entry() since database hast to be set
:return:
"""
global database
if not isinstance(database, db_manager.Database):
print("Database not initialized yet! Please select a database to read from from the database menu!")
input("\nEnter to continue")
return
database.show_all_entries(database.delete_entry) | 2,010 |
def model_training_full_experiment():
"""
This creates the plot for figure 5B in the Montague paper. Figure 5B shows
the 'entire time course of model responses (trials 1-150).' The setup is
the same as in Figure 5A, except that training begins at trial 10.
"""
sample = pnl.TransferMechanism(
default_variable=np.zeros(60),
name=pnl.SAMPLE
)
action_selection = pnl.TransferMechanism(
default_variable=np.zeros(60),
function=pnl.Linear(slope=1.0, intercept=1.0),
name='Action Selection'
)
stimulus_onset = 41
reward_delivery = 54
samples = np.zeros(60)
samples[stimulus_onset:] = 1
samples = np.tile(samples, (120, 1))
targets = np.zeros(60)
targets[reward_delivery] = 1
targets = np.tile(targets, (120, 1))
# training begins at trial 11
# no reward given every 15 trials to simulate a wrong response
no_reward_trials = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 14, 29, 44, 59, 74,
89, 104, 119]
for t in no_reward_trials:
targets[t][reward_delivery] = 0
pnl.MappingProjection(
sender=sample,
receiver=action_selection,
matrix=np.zeros((60, 60))
)
learning_projection = pnl.LearningProjection(
learning_function=pnl.TDLearning(learning_rate=0.3)
)
p = pnl.Process(
default_variable=np.zeros(60),
pathway=[sample, action_selection],
learning=learning_projection,
size=60,
target=np.zeros(60)
)
trial = 0
def print_header():
nonlocal trial
print("\n\n*** EPISODE: {}".format(trial))
def store_delta_vals():
nonlocal trial
delta_vals[trial] = s.mechanisms[2].value
trial += 1
input_list = {
sample: samples
}
target_list = {
action_selection: targets
}
s = pnl.System(processes=[p])
delta_vals = np.zeros((120, 60))
s.run(
num_trials=120,
inputs=input_list,
targets=target_list,
learning=True,
call_before_trial=print_header,
call_after_trial=store_delta_vals
)
with plt.style.context('seaborn'):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x_vals, y_vals = np.meshgrid(np.arange(120), np.arange(40, 60, step=1))
ax.plot_surface(x_vals, y_vals, delta_vals[:, 40:60].transpose())
ax.invert_yaxis()
ax.set_xlabel("Trial")
ax.set_ylabel("Timestep")
ax.set_zlabel("∂")
ax.set_title("Montague et. al. (1996) -- Figure 5B")
plt.show() | 2,011 |
def swarm_post_deploy(deploy_results, swarm_id, swarm_trace_id):
"""
Chord callback run after deployments. Should check for exceptions, then
launch uptests.
"""
if any(isinstance(r, Exception) for r in deploy_results):
swarm = Swarm.objects.get(id=swarm_id)
msg = "Error in deployments for swarm %s" % swarm
send_event('Swarm %s aborted' % swarm, msg,
tags=['failed'], swarm_id=swarm_trace_id)
raise Exception(msg)
swarm_assign_uptests(swarm_id, swarm_trace_id) | 2,012 |
def insert_tables(cur, conn):
"""
Inserts data into tables using the queries in `insert_table_queries` list.
"""
for query in insert_table_queries:
try:
cur.execute(query)
conn.commit()
except psycopg2.Error as e:
print('Fail to execute the query: {}'.format(query))
print(e) | 2,013 |
def is_seq(x, step=1):
"""Checks if the elements in a list-like object are increasing by step
Parameters
----------
x: list-like
step
Returns
-------
True if elements increase by step, else false and the index at which the condition is violated.
"""
for i in range(1, len(x)):
if not x[i] == (x[i - 1] + step):
print('Not seq at: ', i)
return False
return True | 2,014 |
def test_lambda_leaf():
""" Tests correct structure of lambda leaf.
"""
my_lambda = LambdaLeaf(leaf_title='MyLambda',
tree_name='testtree',
template=template,
dependencies=['MyDb:5432'],
lambda_config=lambda_config,
availability_zones=availability_zones,
public_cidr=public_cidr
)
assert_equals(my_lambda.trop_lambda_function.Code.S3Bucket, 'bucket_name')
assert_equals(my_lambda.trop_lambda_function.Code.S3Key, 'key_name')
assert_equals(my_lambda.trop_lambda_function.Description, 'blah')
assert_is(type(my_lambda.trop_lambda_function.FunctionName), Join)
assert_equals(my_lambda.trop_lambda_function.Handler, 'main')
assert_equals(my_lambda.trop_lambda_function.MemorySize, 128)
assert_equals(my_lambda.trop_lambda_function.Role, 'test_arn')
assert_equals(my_lambda.trop_lambda_function.Runtime, 'python2.7')
assert_equals(my_lambda.trop_lambda_function.Timeout, 1)
assert_equals(my_lambda.trop_cw_rule.ScheduleExpression, 'cron(0/5 * * * ? *)')
assert_equals(len(my_lambda.trop_lambda_function.VpcConfig.SubnetIds), 3)
assert_equals(len(my_lambda.trop_lambda_function.VpcConfig.SecurityGroupIds), 1) | 2,015 |
def imatmul(a, b): # real signature unknown; restored from __doc__
""" a = imatmul(a, b) -- Same as a @= b. """
pass | 2,016 |
def create_patient(record: dict) -> tuple:
""" Returns a FHIR Patient resource entry and reference. """
gender = map_sex(record["sex_new"] or record["sex"])
patient_id = generate_patient_hash(
names = participant_names(record),
gender = gender,
birth_date = record['birthday'],
postal_code = participant_zipcode(record))
if not patient_id:
# Some piece of information was missing, so we couldn't generate a
# hash. Fallback to treating this individual as always unique by using
# the REDCap record id.
patient_id = generate_hash(f"{REDCAP_URL}{PROJECT_ID}/{record['record_id']}")
LOG.debug(f"Generated individual identifier {patient_id}")
patient_identifier = create_identifier(f"{SFS}/individual",patient_id)
patient_resource = create_patient_resource([patient_identifier], gender)
return create_entry_and_reference(patient_resource, "Patient") | 2,017 |
def read_files(filenames):
"""Returns an iterator over files in a list of files"""
for filename in filenames:
with open(filename, 'r') as filehandle:
yield filehandle.read() | 2,018 |
def test_post_cve_id_wrong_header(org_admin_headers):
""" org_admin_headers cannot post for 'mitre' org """
res = requests.post(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=org_admin_headers,
params={
'amount': '10',
'batch_type': 'sequential',
'cve_year': f'{utils.CURRENT_YEAR}',
'short_name': 'mitre'
}
)
# NOTE: this error also occurs when short_name is empty, expected error is 'NO_ORG_SHORTNAME'
# and when short_name is invalid, expected error is 'ORG_DNE'
assert res.status_code == 403
response_contains_json(res, 'error', 'ORG_CANNOT_RESERVE_FOR_OTHER') | 2,019 |
def delete_editor(userid):
"""
:param userid: a string representing the user's UW NetID
:return: True if request is successful, False otherwise.
raise DataFailureException or a corresponding TrumbaException
if the request failed or an error code has been returned.
"""
url = _make_del_account_url(userid)
return _process_resp(url,
get_sea_resource(url),
_is_editor_deleted
) | 2,020 |
def sanity_check(file):
"""Runs a few checks to ensure data quality and integrity
Args:
names_df (pd.DataFrame): DataFrame containing transformed data.
"""
# Here you must add all the checks you consider important regarding the
# state of the data
# assert names_df.columns.tolist() == ["First Name", "Last Name"], "Column mismatch"
# assert names_df["First Name"].isna().sum() == 0, "There are empty fields"
# assert names_df["Last Name"].isna().sum() == 0, "There are empty fields"
files_df = os.listdir(file)
synapse_i = os.path.join(file, files_df[0])
synapse_i = np.load(synapse_i)
# def check_label(i, l):
print("Checking Label Dimensions:")
print(synapse_i.shape)
# print(l.shape)
print("Checking image view")
for index in range(synapse_i.shape[0]):
if np.sum(synapse_i[index, :, :] * 255) > 0:
print(index)
fig, ax = plt.subplots(figsize=(10, 10))
# plt.imshow(i[index, :, :]*255, cmap='gray')
plt.imshow(synapse_i[index, :, :], cmap="jet", alpha=0.5)
plt.show() | 2,021 |
def ecio_quality_rating(value, unit):
"""
ECIO (Ec/Io) - Energy to Interference Ratio (3G, CDMA/UMTS/EV-DO)
"""
if unit != "dBm":
raise ValueError("Unsupported unit '{:}'".format(unit))
rating = 0
if value > -2:
rating = 4
elif -2 >= value > -5:
rating = 3
elif -5 >= value > -10:
rating = 2
elif value <= -10:
rating = 1
return rating | 2,022 |
def normalizeFilename(filename):
"""normalizeFilename(filename)
Replace characters that are illegal in the Window's environment"""
res = filename
rep = { "*":"_", "\"":"\'", "/":" per ", "\\":"_", ",":"_", "|":"_", ":":";" }
for frm, to in rep.iteritems():
res = res.replace(frm, to)
return res.strip() | 2,023 |
def __virtual__():
"""
Check if macOS and PyObjC is available
"""
if not salt.utils.platform.is_darwin():
return (False, 'module: mac_wifi only available on macOS.')
if not PYOBJC:
return (False, 'PyObjC not available.')
return __virtualname__ | 2,024 |
def test_data_non_df():
"""Test the raise of error of check data."""
dataloader = DummySoccerDataLoader().set_data([4, 5])
with pytest.raises(
TypeError,
match='Data should be a pandas dataframe. Got list instead.',
):
dataloader.extract_train_data() | 2,025 |
def rearrange_kernel(kernel, data_shape=None):
"""Rearrange kernel
This method rearanges the input kernel elements for vector multiplication.
The input kernel is padded with zeroes to match the image size.
Parameters
----------
kernel : np.ndarray
Input kernel array
data_shape : tuple
Shape of the data
Returns
-------
numpy.ndarray
Rearanged matrix of kernel elements
"""
# Define kernel shape.
kernel_shape = np.array(kernel.shape)
# Set data shape if not provided.
if isinstance(data_shape, type(None)):
data_shape = kernel_shape
else:
data_shape = np.array(data_shape)
# Set the length of the output matrix rows.
vec_length = np.prod(data_shape)
# Find the diffrence between the shape of the data and the kernel.
shape_diff = data_shape - kernel_shape
if np.any(shape_diff < 0):
raise ValueError('Kernel shape must be less than or equal to the '
'data shape')
# Set the kernel radius.
kernel_rad = kernel_shape // 2
# Rotate, pad and roll the input kernel.
kernel_rot = np.pad(np.rot90(kernel, 2), ((0, shape_diff[0]),
(0, shape_diff[1])), 'constant')
kernel_rot = np.roll(np.roll(kernel_rot, -kernel_rad[1], axis=1),
-kernel_rad[0], axis=0)
return np.array([np.roll(np.roll(kernel_rot, i, axis=0), j,
axis=1).reshape(vec_length) for i in range(data_shape[0])
for j in range(data_shape[1])]) | 2,026 |
def exportTable(request_id, params):
"""Starts a table export task running.
This is a low-level method. The higher-level ee.batch.Export.table object
is generally preferred for initiating table exports.
Args:
request_id (string): A unique ID for the task, from newTaskId. If you are
using the cloud API, this does not need to be from newTaskId, (though
that's a good idea, as it's a good source of unique strings). It can also
be empty, but in that case the request is more likely to fail as it cannot
be safely retried.
params: The object that describes the export task. If you are using the
cloud API, this should be an ExportTableRequest. However, the "expression"
parameter can be the actual FeatureCollection to be exported, not its
serialized form.
Returns:
A dict with information about the created task.
If you are using the cloud API, this will be an Operation.
"""
params = params.copy()
return _prepare_and_run_export(
request_id, params,
_get_cloud_api_resource().projects().table().export) | 2,027 |
def add_parents_to_frame(qs):
"""
There seems to be bug in the django-pandas api that self-foreign keys are not returned properly
This is a workaround
:param qs:
:return:
"""
tn_parent_ids = qs.values_list("tn_parent_id", flat=True).all()
df = read_frame(qs.all(), fieldnames=get_standard_field_names(), verbose=True)
df['tn_parent_id'] = tn_parent_ids
df['tn_parent_id'] = df['tn_parent_id'].astype('Int64')
df['in_reply_to_user_id'] = df['in_reply_to_user_id'].astype('Int64')
return df | 2,028 |
def template_check(value):
"""Check if a rendered template string equals true.
If value is not a string, return value as is.
"""
if isinstance(value, str):
return value.lower() == "true"
return value | 2,029 |
def print_top_20_disallow():
"""Makes file with top 20 disallows from files"""
print_text = ""
item_list = Counter(disallow).most_common(20)
file_name = "top_20_robot_txt_disallows.txt"
for item in item_list:
item_text = "Item: " + item[0] + " :: Count: " + str(item[1])
print_text = print_text + item_text + "\n"
write_to_file(file_name, print_text) | 2,030 |
async def test_if_fires_on_change_with_for_0_advanced(hass, start_ha, calls):
"""Test for firing on change with for: 0 advanced."""
context = Context()
await hass.async_block_till_done()
hass.states.async_set("test.entity", "world", context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context.parent_id == context.id
assert calls[0].data["some"] == "template - test.entity - hello - world - 0:00:00" | 2,031 |
def return_npc(mcc, mnc):
"""
Format MCC and MNC into a NPC.
:param mcc: Country code.
:type mcc: int
:param mnc: Network code.
:type mnc: int
"""
return "{0}{1}30".format(str(mcc).zfill(3), str(mnc).zfill(3)) | 2,032 |
def class_to_bps(bw_cls):
"""
Convert a SIBRA bandwidth class to bps (Bits Per Second). Class 0 is a
special case, and is mapped to 0bps.
:param float bw_cls: SIBRA bandwidth class.
:returns: Kbps of bandwidth class
:rtype: float
"""
if bw_cls == 0:
return 0
bw_base = math.sqrt(pow(2, bw_cls - 1))
return SIBRA_BW_FACTOR * bw_base | 2,033 |
def _get_capacity():
"""Return constant values for dam level capacities.
Storage capacity values are measured in million cubic metres
i.e. Megalitres or Ml.
Source: https://en.wikipedia.org/wiki/Western_Cape_Water_Supply_System
@return capacity: Dict object containing maximum capacities of Western
Cape dams. Includes aggregate values for small dams, big six dams
and all dams.
"""
big_six_capacity = {
'Theewaterskloof': 480188,
'Wemmershoek': 58644,
'Steensbras Lower': 33517,
'Steenbras Upper': 31757,
'Voëlvlei': 164095,
'Berg River': 130010,
}
small_capacity = {
'Hely-Hutchinson': 925,
'Woodhead': 954,
'Victoria': 128,
'De Villiers': 243,
'Kleinplaats': 1368,
'Lewis Gay': 182,
'Land-en-Zeezicht': 451,
}
capacity = {**big_six_capacity, **small_capacity}
capacity['Big Six Dams'] = sum(big_six_capacity.values())
capacity['Small Dams'] = sum(small_capacity.values())
capacity['All Dams'] = capacity['Small Dams'] + capacity['Big Six Dams']
return capacity | 2,034 |
def create_NISMOD1_data(path_to_zip_file, path_out, path_geography):
"""
Arguments
----------
"""
print("... start running initialisation scripts", flush=True)
# Extract NISMOD population data
path_extraction = os.path.join(path_out, "MISTRAL_pop_gva")
zip_ref = zipfile.ZipFile(path_to_zip_file, 'r')
zip_ref.extractall(path_extraction)
zip_ref.close()
# Complete gva and pop data for every sector
path_pop = os.path.join(path_extraction, "data")
geography_name = "lad_uk_2016"
# All MISTRAL scenarios to prepare with correct config
scenarios_to_generate = [
'pop-baseline16_econ-c16_fuel-c16',
'pop-f_econ-c_fuel-c',
'pop-d_econ-c_fuel-c',]
script_data_preparation_MISTRAL_pop_gva.run(
path_to_folder=path_pop,
path_MSOA_baseline=path_geography,
MSOA_calculations=False,
geography_name=geography_name,
scenarios_to_generate=scenarios_to_generate)
print("... successfully finished setup")
return | 2,035 |
def login_required(f):
"""
Decorator to use if a view needs to be protected by a login.
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if not 'username' in session:
return redirect(url_for('login'))
return f(*args, **kwargs)
return decorated_function | 2,036 |
def test_bare_except() -> None:
"""Bare `except` to handle any uncaught exceptions."""
def reciprocal_of(value: float) -> float:
try:
return 1 / value
except ZeroDivisionError:
raise
except:
raise
pytest.raises(TypeError, reciprocal_of, "a") | 2,037 |
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
# print('cfg.TEST.SCALES: {}'.format(cfg.TEST.SCALES)),
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
# blob /= 255.0
return blob, np.array(im_scale_factors) | 2,038 |
def check_args():
"""Checks the arguments passed by the command line
By passing one or more parameters, you can disable a single module source.
Actual parameters allowed are:
* `-no-instagram`: disables Instagram source
* `-no-youtube`: disables YouTube source
* `-no-spotify`: disables Spotify source
* `-no-birthday`: disables birthdays events source
* `-no-twitter`: disables Twitter source (used for reposting)
Remember that `-no-twitter` is different than `-no-tweet`:
`-no-tweet` actually prevents the bot from tweeting any update from the enabled sources. The output will still be visible on the console. This is really useful for **testing**.
Returns:
A dictionary that contains all the sources and their state (enabled or disabled, True or False)
"""
source = {"instagram": True, "youtube": True, "spotify": True, "birthday": True, "twitter": True, "billboard": True}
if len(sys.argv) > 1:
for arg in sys.argv:
if arg == "-no-tweet":
print("-no-tweet parameter passed!\nTest mode enabled: the bot won't tweet anything")
set_test_mode()
if arg == "-no-instagram":
print("-no-instagram parameter passed!")
source["instagram"] = False
if arg == "-no-spotify":
print("-no-spotify parameter passed!")
source["spotify"] = False
if arg == "-no-youtube":
print("-no-youtube parameter passed!")
source["youtube"] = False
if arg == "-no-birthday":
print("-no-birthday parameter passed!")
source["birthday"] = False
if arg == "-no-billboard":
print("-no-billboard parameter passed!")
source["billboard"] = False
if arg == "-no-twitter":
print("-no-twitter parameter passed!")
source["twitter"] = False
print()
return source | 2,039 |
def toiter(x):
"""Convert to iterable. If input is iterable, returns it. Otherwise returns it in a list.
Useful when you want to iterate over something (like in a for loop),
and you don't want to have to do type checking or handle exceptions
when it isn't a sequence"""
if iterable(x):
return x
else:
return [x] | 2,040 |
def ap_date(value):
"""
Converts a date string in m/d/yyyy format into AP style.
"""
if not value:
return ''
bits = unicode(value).split('/')
month, day, year = bits
output = AP_MONTHS[int(month) - 1]
output += ' ' + unicode(int(day))
output += ', ' + year
return output | 2,041 |
def start_gui():
"""Initialize graphical interface in order to render board.
You must first call this function once before making calls to :py:func:`blokus.blokus_env.display_board`.
See Also
--------
blokus.blokus_env.display_board
blokus.blokus_env.terminate_gui
"""
gui.start_gui() | 2,042 |
def dump_sql(fp, query: str, encoding="utf8"):
"""
Write a given query into a file path.
"""
query = ljustify_sql(query)
for line in query:
fp.write(bytes(line, encoding=encoding))
return fp | 2,043 |
def make_design_matrix(stim, d=25):
"""Create time-lag design matrix from stimulus intensity vector.
Args:
stim (1D array): Stimulus intensity at each time point.
d (number): Number of time lags to use.
Returns
X (2D array): GLM design matrix with shape T, d
"""
padded_stim = np.concatenate([np.zeros(d - 1), stim])
T = len(stim)
X = np.zeros((T, d))
for t in range(T):
X[t] = padded_stim[t:t + d]
return X | 2,044 |
def delete_user_word(word_list: list, instance):
"""
Delete words in temporary dictionary, more information shows in :func:`import_dict`
:param instance: instance to execute the function
:param word_list: list of words want to delete
"""
if not hasattr(instance, "del_usr_word"):
raise NLPIRException("This instance not support this method")
for word in word_list:
instance.del_usr_word(word) | 2,045 |
def publisher():
"""Publishes simulated event messages to a Redis Channel"""
# Create our connection object
r = redis.StrictRedis(host=redis_host, port=redis_port, password=redis_password, decode_responses=True)
for id in range(50):
# create the event as an dict with fields type and id
event = {}
event['type'] = event_types[ random.randint(0, len(event_types) - 1) ]
event['id'] = id
# convert the event to json and log to the console
event_message = json.dumps(event)
channel = event_channels[event['type']]
print("Sending message {} -> {}".format(event_message, channel))
cnt = r.publish(channel, event_message)
print("Delivered message to {} subscribers".format(cnt))
time.sleep(0.001)
# send a terminate message to all clients
term = { 'type': 'terminate'}
term_message = json.dumps(term)
channel = 'process:terminate'
print("Sending terminate message")
cnt = r.publish(channel, term_message)
print("Delivered message to {} subscribers".format(cnt)) | 2,046 |
def _fit_subpixel_2d(image, coord, radius, voxel_size_yx, psf_yx):
"""Fit a gaussian in a 2-d image.
Parameters
----------
image : np.ndarray
Image with shape (y, x).
coord : np.ndarray, np.int64
Coordinate of the spot detected, with shape (2,). One coordinate per
dimension (yx coordinates).
radius : Tuple[float]
Radius in pixels of the detected spots, one element per dimension.
voxel_size_yx : int or float
Size of a voxel on the yx plan, in nanometer.
psf_yx : int or float
Theoretical size of the PSF emitted by a spot in the yx plan,
in nanometer.
Returns
-------
new_coord : List[float]
Coordinates of the spot centroid with a subpixel accuracy (one element
per dimension).
"""
# extract spot image
image_spot, bbox_low = _get_spot_surface(
image, coord[0], coord[1], radius[0])
# fit gaussian
try:
parameters = modelize_spot(image_spot,
voxel_size_z=None,
voxel_size_yx=voxel_size_yx,
psf_z=None, psf_yx=psf_yx,
return_coord=True)
# format coordinates and ensure it is fitted within the spot image
y_max, x_max = image_spot.shape
coord_y = parameters[0] / voxel_size_yx
if coord_y < 0 or coord_y > y_max:
coord_y = coord[0]
else:
coord_y += bbox_low[0]
coord_x = parameters[1] / voxel_size_yx
if coord_x < 0 or coord_x > x_max:
coord_x = coord[1]
else:
coord_x += bbox_low[1]
new_coord = [coord_y, coord_x]
# if a spot is ill-conditioned, we simply keep its original coordinates
except RuntimeError:
new_coord = list(coord)
return new_coord | 2,047 |
def density(sisal,temp,pres,salt=None,dliq=None,chkvals=False,
chktol=_CHKTOL,salt0=None,dliq0=None,chkbnd=False,useext=False,
mathargs=None):
"""Calculate sea-ice total density.
Calculate the total density of a sea-ice parcel.
:arg float sisal: Total sea-ice salinity in kg/kg.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:arg salt: Seawater salinity in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type salt: float or None
:arg dliq: Seawater liquid water density in kg/m3. If unknown, pass
None (default) and it will be calculated.
:type dliq: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the seawater salinity in kg/kg. If
None (default) then `_approx_tp` is used.
:type salt0: float or None
:arg dliq0: Initial guess for the seawater liquid water density in
kg/m3. If None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Density in kg/m3.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:raises RuntimeWarning: If the equilibrium seawater salinity is
lower than the total parcel salinity.
:Examples:
>>> density(0.035,270.,1e5)
993.156434117
"""
g_p = seaice_g(0,0,1,sisal,temp,pres,salt=salt,dliq=dliq,chkvals=chkvals,
chktol=chktol,salt0=salt0,dliq0=dliq0,chkbnd=chkbnd,useext=useext,
mathargs=mathargs)
rho = g_p**(-1)
return rho | 2,048 |
def prod_non_zero_diag(x):
"""Compute product of nonzero elements from matrix diagonal.
input:
x -- 2-d numpy array
output:
product -- integer number
Not vectorized implementation.
"""
n = len(x)
m = len(x[0])
res = 1
for i in range(min(n, m)):
if (x[i][i] != 0):
res *= x[i][i]
return res
pass | 2,049 |
def LoadSparse(inputfile, verbose=False):
"""Loads a sparse matrix stored as npz file to its dense represent."""
npzfile = np.load(inputfile)
mat = sp.csr_matrix((npzfile['data'], npzfile['indices'],
npzfile['indptr']),
shape=tuple(list(npzfile['shape'])))
if verbose:
print 'Loaded sparse matrix from %s of shape %s' % (inputfile,
mat.shape.__str__())
return mat.todense() | 2,050 |
def _get_sw_loader_logger():
""" Setup a new logger with passed skywalking CLI env vars,
don't import from skywalking, it may not be on sys.path
if user misuses the CLI to run programs out of scope
"""
from logging import getLogger
logger = getLogger('skywalking-loader')
ch = logging.StreamHandler()
formatter = logging.Formatter('%(name)s [%(threadName)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.propagate = False
if os.environ.get('SW_PYTHON_CLI_DEBUG_ENABLED') == 'True': # set from the original CLI runner
logger.setLevel(level=logging.DEBUG)
return logger | 2,051 |
def chain_decomposition(G, root=None):
"""Return the chain decomposition of a graph.
The *chain decomposition* of a graph with respect a depth-first
search tree is a set of cycles or paths derived from the set of
fundamental cycles of the tree in the following manner. Consider
each fundamental cycle with respect to the given tree, represented
as a list of edges beginning with the nontree edge oriented away
from the root of the tree. For each fundamental cycle, if it
overlaps with any previous fundamental cycle, just take the initial
non-overlapping segment, which is a path instead of a cycle. Each
cycle or path is called a *chain*. For more information, see [1]_.
Parameters
----------
G : undirected graph
root : node (optional)
A node in the graph `G`. If specified, only the chain
decomposition for the connected component containing this node
will be returned. This node indicates the root of the depth-first
search tree.
Yields
------
chain : list
A list of edges representing a chain. There is no guarantee on
the orientation of the edges in each chain (for example, if a
chain includes the edge joining nodes 1 and 2, the chain may
include either (1, 2) or (2, 1)).
Raises
------
NodeNotFound
If `root` is not in the graph `G`.
Notes
-----
The worst-case running time of this implementation is linear in the
number of nodes and number of edges [1]_.
References
----------
.. [1] Jens M. Schmidt (2013). "A simple test on 2-vertex-
and 2-edge-connectivity." *Information Processing Letters*,
113, 241–244. Elsevier. <https://doi.org/10.1016/j.ipl.2013.01.016>
"""
def _dfs_cycle_forest(G, root=None):
"""Builds a directed graph composed of cycles from the given graph.
`G` is an undirected simple graph. `root` is a node in the graph
from which the depth-first search is started.
This function returns both the depth-first search cycle graph
(as a :class:`~cynetworkx.DiGraph`) and the list of nodes in
depth-first preorder. The depth-first search cycle graph is a
directed graph whose edges are the edges of `G` oriented toward
the root if the edge is a tree edge and away from the root if
the edge is a non-tree edge. If `root` is not specified, this
performs a depth-first search on each connected component of `G`
and returns a directed forest instead.
If `root` is not in the graph, this raises :exc:`KeyError`.
"""
# Create a directed graph from the depth-first search tree with
# root node `root` in which tree edges are directed toward the
# root and nontree edges are directed away from the root. For
# each node with an incident nontree edge, this creates a
# directed cycle starting with the nontree edge and returning to
# that node.
#
# The `parent` node attribute stores the parent of each node in
# the DFS tree. The `nontree` edge attribute indicates whether
# the edge is a tree edge or a nontree edge.
#
# We also store the order of the nodes found in the depth-first
# search in the `nodes` list.
H = nx.DiGraph()
nodes = []
for u, v, d in nx.dfs_labeled_edges(G, source=root):
if d == 'forward':
# `dfs_labeled_edges()` yields (root, root, 'forward')
# if it is beginning the search on a new connected
# component.
if u == v:
H.add_node(v, parent=None)
nodes.append(v)
else:
H.add_node(v, parent=u)
H.add_edge(v, u, nontree=False)
nodes.append(v)
# `dfs_labeled_edges` considers nontree edges in both
# orientations, so we need to not add the edge if it its
# other orientation has been added.
elif d == 'nontree' and v not in H[u]:
H.add_edge(v, u, nontree=True)
else:
# Do nothing on 'reverse' edges; we only care about
# forward and nontree edges.
pass
return H, nodes
def _build_chain(G, u, v, visited):
"""Generate the chain starting from the given nontree edge.
`G` is a DFS cycle graph as constructed by
:func:`_dfs_cycle_graph`. The edge (`u`, `v`) is a nontree edge
that begins a chain. `visited` is a set representing the nodes
in `G` that have already been visited.
This function yields the edges in an initial segment of the
fundamental cycle of `G` starting with the nontree edge (`u`,
`v`) that includes all the edges up until the first node that
appears in `visited`. The tree edges are given by the 'parent'
node attribute. The `visited` set is updated to add each node in
an edge yielded by this function.
"""
while v not in visited:
yield u, v
visited.add(v)
u, v = v, G.nodes[v]['parent']
yield u, v
# Create a directed version of H that has the DFS edges directed
# toward the root and the nontree edges directed away from the root
# (in each connected component).
H, nodes = _dfs_cycle_forest(G, root)
# Visit the nodes again in DFS order. For each node, and for each
# nontree edge leaving that node, compute the fundamental cycle for
# that nontree edge starting with that edge. If the fundamental
# cycle overlaps with any visited nodes, just take the prefix of the
# cycle up to the point of visited nodes.
#
# We repeat this process for each connected component (implicitly,
# since `nodes` already has a list of the nodes grouped by connected
# component).
visited = set()
for u in nodes:
visited.add(u)
# For each nontree edge going out of node u...
edges = ((u, v) for u, v, d in H.out_edges(u, data='nontree') if d)
for u, v in edges:
# Create the cycle or cycle prefix starting with the
# nontree edge.
chain = list(_build_chain(H, u, v, visited))
yield chain | 2,052 |
def test_init_params(
data,
creator,
dtype,
numpy_dtype,
ndmin: int,
copy: Optional[bool],
):
"""Check for bad combinations of init parameters leading to unexpected behavior"""
elements = (
(lambda x, y: st.floats(x, y, width=8 * np.dtype(numpy_dtype).itemsize))
if np.issubdtype(numpy_dtype, np.floating)
else st.integers
)
array_strat_args = dict(
shape=hnp.array_shapes(max_side=3, max_dims=5),
dtype=numpy_dtype,
elements=elements(-100, 100),
)
a = data.draw(
hnp.arrays(**array_strat_args) | tensors(**array_strat_args),
label="a",
)
arr = np.array(a, dtype=dtype, ndmin=ndmin)
constant = data.draw(valid_constant_arg(arr.dtype), label="constant")
tensor = Tensor(
a,
_creator=creator,
constant=constant,
dtype=dtype,
ndmin=ndmin,
copy=copy,
)
if constant is None:
constant = issubclass(tensor.dtype.type, np.integer)
assert tensor.creator is creator
assert tensor.constant is constant
assert tensor.dtype is arr.dtype
assert_equal(tensor.data, arr)
assert tensor.grad is None
assert tensor.base is None
assert tensor.ndim >= ndmin | 2,053 |
def managed(flag=True):
"""
Puts the transaction manager into a manual state: managed transactions have
to be committed explicitly by the user. If you switch off transaction
management and there is a pending commit/rollback, the data will be
commited.
"""
thread_ident = thread.get_ident()
top = state.get(thread_ident, None)
if top:
top[-1] = flag
if not flag and is_dirty():
connection._commit()
set_clean()
else:
raise TransactionManagementError("This code isn't under transaction management") | 2,054 |
def dup_lcm(f, g, K):
"""Computes polynomial LCM of `f` and `g` in `K[x]`. """
if K.has_Field or not K.is_Exact:
return dup_ff_lcm(f, g, K)
else:
return dup_rr_lcm(f, g, K) | 2,055 |
def test_dice_rolln(dice, n):
"""Test rolling many dice."""
rolls = dice.rolln(n)
assert len(rolls) == n
assert isinstance(rolls, list)
assert isinstance(rolls[0], int)
# Sum between n and s*n
assert sum(rolls) <= n * dice.sides
assert sum(rolls) >= n * 1 | 2,056 |
def alignment_guide(path, lut):
"""
Generate image alignment guide and save to disk.
"""
image = Image.new('RGBA',(lut.image_size, lut.image_size),(255,0,0,0))
draw = ImageDraw.Draw(image)
for i in range(lut.swatch_count):
image.putpixel(lut.cell_center(i),(255,0,0))
draw.rectangle(lut.cell_bounds(i), outline=(0,0,255,255))
draw.rectangle(lut.cell_center(i, 6), outline=(0,255,0))
image.save(path,'PNG') | 2,057 |
def add_tag_translation(request, tag_id, lang, text):
"""Adds a translation to the given Tag."""
tag = get_object_or_404(Tag, id=tag_id)
text = urllib.unquote(text)
data = {}
langs = tag.site.get_languages(lang)
if len(langs) == 0:
data['error'] = 'No languages defined'
else:
TagTranslation.objects.create(tag=tag, language=langs[0], text=text)
return HttpResponse(json.dumps(data), content_type='application/json') | 2,058 |
def etc_hosts_update(output_file=None, **kwargs):
"""Update /etc/hosts with all nodes available in configured projects
:param output_file: destination file, default is /etc/hosts
"""
update_etc_hosts_file(etc_hosts_generator(**kwargs), output_file) | 2,059 |
def simpson(x, with_replacement=False):
"""For computing simpson index directly from counts (or frequencies, if with_replacement=True)
Parameters
----------
x :
with_replacement :
(Default value = False)
Returns
-------
"""
total = np.sum(x)
if with_replacement:
return np.sum([(y / total) * (y / total) for y in x])
else:
return np.sum([(y / total) * ((y - 1) / (total - 1)) for y in x]) | 2,060 |
async def setup_automation(hass, device_id, trigger_type):
"""Set up an automation trigger for testing triggering."""
return await async_setup_component(
hass,
AUTOMATION_DOMAIN,
{
AUTOMATION_DOMAIN: [
{
"trigger": {
CONF_PLATFORM: "device",
CONF_DOMAIN: DOMAIN,
CONF_DEVICE_ID: device_id,
CONF_TYPE: trigger_type,
},
"action": {
"service": "test.automation",
"data": DATA_MESSAGE,
},
},
]
},
) | 2,061 |
def generate_html_tutor_constraints(sai):
"""
Given an SAI, this finds a set of constraints for the SAI, so it don't
fire in nonsensical situations.
"""
constraints = set()
args = get_vars(sai)
# selection constraints, you can only select something that has an
# empty string value.
if len(args) == 0:
return frozenset()
# get action
action = sai[2]
if action == "ButtonPressed":
# Constrain the selection to be of type button
# constraints.add(('type', selection, 'MAIN::button'))
selection = args[0]
constraints.add(('id', selection, 'done'))
else:
# print("SAI", sai)
# print("ARGS", args)
selection = args[0]
constraints.add(('contentEditable', selection, True))
# constraints.add(('value', selection, '?selection-value'))
# constraints.add((is_empty_string, '?selection-value'))
# value constraints, don't select empty values
for i, arg in enumerate(args[1:]):
constraints.add(('value', arg, '?foa%ival' % (i+1)))
constraints.add((is_not_empty_string, '?foa%ival' % (i+1)))
# constraints.add(('type', a, 'MAIN::cell'))
return frozenset(constraints) | 2,062 |
def main():
"""Entry point."""
flags = _parse_args(sys.argv[1:])
logging.basicConfig(level=flags.log_level.upper())
run_experiment(flags) | 2,063 |
def run_test(request):
"""
运行用例
:param request:
:return:
"""
kwargs = {
"failfast": False,
}
runner = HttpRunner(**kwargs)
# 测试用例的路径
test_case_dir_path = os.path.join(os.getcwd(), "suite")
test_case_dir_path = os.path.join(test_case_dir_path, get_time_stamp())
if request.is_ajax():
kwargs = json.loads(request.body.decode('utf-8'))
test_id = kwargs.pop('id')
base_url = kwargs.pop('env_name')
test_type = kwargs.pop('type')
run_test_by_type(test_id, base_url, test_case_dir_path, test_type)
report_name = kwargs.get('report_name', None)
main_hrun.delay(test_case_dir_path, report_name)
return HttpResponse('用例执行中,请稍后查看报告即可,默认时间戳命名报告')
else:
test_id = request.POST.get('id')
base_url = request.POST.get('env_name')
test_type = request.POST.get('type', 'test')
run_test_by_type(test_id, base_url, test_case_dir_path, test_type)
runner.run(test_case_dir_path)
shutil.rmtree(test_case_dir_path)
runner.summary = timestamp_to_datetime(runner.summary, data_type=False)
return render_to_response('report_template.html', runner.summary) | 2,064 |
def set_up_CB(inp_d):
"""
Setting up directories and altered files
In this function we create all the needed directories and transfer
the files that were changed in order to get the program to work in KBase
into the PaperBLAST directory.
inp_d (input dict) must contain the following keys:
pb_home
genome_protein_filepath
genome_nucleotide_filepath
genome_dir
"""
pb_home = inp_d['pb_home']
tmp_dir = os.path.join(pb_home, "tmp")
fbrowse_data_dir = os.path.join(pb_home,"fbrowse_data")
data_dir = os.path.join(pb_home, "data")
private_dir = os.path.join(pb_home,"private")
genome_dir = os.path.join(pb_home, inp_d['genome_dir'])
blast_dir = os.path.join(pb_home, "bin/blast")
alt_file_dir = '/kb/module/lib/curated_blast/altered_files'
# Creating Directories in PaperBLAST directory
for d in [tmp_dir, fbrowse_data_dir, data_dir, private_dir, genome_dir,
blast_dir]:
if not os.path.exists(d):
os.mkdir(d)
else:
logging.warning("Directory {} already exists".format(d))
# Copying Files Over to PaperBLAST directory
pb_bin = os.path.join(pb_home, "bin")
for base_file in ["clear_dir.py","fastx_findorfs.py", "main.py", "usearch"]:
copyfile(os.path.join(alt_file_dir, base_file ),os.path.join(pb_bin, base_file))
os.chmod(os.path.join(pb_bin, base_file), 0o111)
# Copying the main CGI file
copyfile(os.path.join(alt_file_dir, 'dbg_genomeSearch.cgi' ),
os.path.join(pb_home,"cgi/dbg_genomeSearch.cgi"))
os.chmod(os.path.join(pb_home,"cgi/dbg_genomeSearch.cgi"), 0o111)
# Changing file mode
for fn in ["bl2seq", "blast/fastacmd", "blast/blastall", "blast/formatdb"]:
os.chmod(os.path.join(pb_bin, fn), 0o111)
#Copying Altered PaperBLAST files to appropriate directories within PaperBLAST
logging.debug("Altered files Dir: ")
new_files = os.listdir(alt_file_dir)
logging.debug(new_files)
#Removing current FetchAssembly (from github) and replacing with newer version
os.unlink(os.path.join(pb_home, "lib/FetchAssembly.pm"))
copyfile(os.path.join(alt_file_dir, 'FetchAssembly.pm'), os.path.join(pb_home, "lib/FetchAssembly.pm"))
os.chmod(os.path.join(pb_home, "lib/FetchAssembly.pm"), 0o111)
#We copy the genome files to their location within PaperBLAST
genome_p_location_pb = os.path.join(genome_dir,"faa")
genome_n_location_pb = os.path.join(genome_dir, "fna")
copyfile(inp_d['genome_protein_filepath'], genome_p_location_pb)
copyfile(inp_d['genome_nucleotide_filepath'], genome_n_location_pb)
#CODE
#We copy the reference data in the Docker data directory
data_dir = "/data"
pb_data_dir = os.path.join(pb_home, "data")
for f in os.listdir(data_dir):
# We only copy files and not directories
if os.path.isfile(os.path.join(data_dir,f)):
copyfile(os.path.join(data_dir, f),os.path.join(pb_data_dir,f))
logging.info("Succesfully completed creation of dirs and transfer of files")
return None | 2,065 |
def info(email):
"""Information about a specific email."""
with db_session() as db:
user = db.query(User).filter(User.email == email).first()
if user:
return [user.email, user.api_key, user.grabs]
else:
return None | 2,066 |
def TetrahedralGraph():
"""
Returns a tetrahedral graph (with 4 nodes).
A tetrahedron is a 4-sided triangular pyramid. The tetrahedral
graph corresponds to the connectivity of the vertices of the
tetrahedron. This graph is equivalent to a wheel graph with 4 nodes
and also a complete graph on four nodes. (See examples below).
PLOTTING: The tetrahedral graph should be viewed in 3 dimensions.
We chose to use the default spring-layout algorithm here, so that
multiple iterations might yield a different point of reference for
the user. We hope to add rotatable, 3-dimensional viewing in the
future. In such a case, a string argument will be added to select
the flat spring-layout over a future implementation.
EXAMPLES: Construct and show a Tetrahedral graph
::
sage: g = graphs.TetrahedralGraph()
sage: g.show() # long time
The following example requires networkx::
sage: import networkx as NX
Compare this Tetrahedral, Wheel(4), Complete(4), and the
Tetrahedral plotted with the spring-layout algorithm below in a
Sage graphics array::
sage: tetra_pos = graphs.TetrahedralGraph()
sage: tetra_spring = Graph(NX.tetrahedral_graph())
sage: wheel = graphs.WheelGraph(4)
sage: complete = graphs.CompleteGraph(4)
sage: g = [tetra_pos, tetra_spring, wheel, complete]
sage: j = []
sage: for i in range(2):
....: n = []
....: for m in range(2):
....: n.append(g[i + m].plot(vertex_size=50, vertex_labels=False))
....: j.append(n)
sage: G = graphics_array(j)
sage: G.show() # long time
"""
import networkx
G = networkx.tetrahedral_graph()
return Graph(G, name="Tetrahedron", pos =
{ 0 : (0, 0),
1 : (0, 1),
2 : (cos(3.5*pi/3), sin(3.5*pi/3)),
3 : (cos(5.5*pi/3), sin(5.5*pi/3))}
) | 2,067 |
def build_resnet_v1(input_shape, depth, num_classes, pfac, use_frn=False,
use_internal_bias=True):
"""Builds ResNet v1.
Args:
input_shape: tf.Tensor.
depth: ResNet depth.
num_classes: Number of output classes.
pfac: priorfactory.PriorFactory class.
use_frn: if True, then use Filter Response Normalization (FRN) instead of
batchnorm.
use_internal_bias: if True, use biases in all Conv layers.
If False, only use a bias in the final Dense layer.
Returns:
tf.keras.Model.
"""
def resnet_layer(inputs,
filters,
kernel_size=3,
strides=1,
activation=None,
pfac=None,
use_frn=False,
use_bias=True):
"""2D Convolution-Batch Normalization-Activation stack builder.
Args:
inputs: tf.Tensor.
filters: Number of filters for Conv2D.
kernel_size: Kernel dimensions for Conv2D.
strides: Stride dimensinons for Conv2D.
activation: tf.keras.activations.Activation.
pfac: prior.PriorFactory object.
use_frn: if True, use Filter Response Normalization (FRN) layer
use_bias: if True, use biases in Conv layers.
Returns:
tf.Tensor.
"""
x = inputs
logging.info('Applying conv layer.')
x = pfac(tf.keras.layers.Conv2D(
filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
use_bias=use_bias))(x)
if use_frn:
x = pfac(frn.FRN())(x)
else:
x = tf.keras.layers.BatchNormalization()(x)
if activation is not None:
x = tf.keras.layers.Activation(activation)(x)
return x
# Main network code
num_res_blocks = (depth - 2) // 6
filters = 16
if (depth - 2) % 6 != 0:
raise ValueError('depth must be 6n+2 (e.g. 20, 32, 44).')
logging.info('Starting ResNet build.')
inputs = tf.keras.layers.Input(shape=input_shape)
x = resnet_layer(inputs,
filters=filters,
activation='relu',
pfac=pfac,
use_frn=use_frn,
use_bias=use_internal_bias)
for stack in range(3):
for res_block in range(num_res_blocks):
logging.info('Starting ResNet stack #%d block #%d.', stack, res_block)
strides = 1
if stack > 0 and res_block == 0: # first layer but not first stack
strides = 2 # downsample
y = resnet_layer(x,
filters=filters,
strides=strides,
activation='relu',
pfac=pfac,
use_frn=use_frn,
use_bias=use_internal_bias)
y = resnet_layer(y,
filters=filters,
activation=None,
pfac=pfac,
use_frn=use_frn,
use_bias=use_internal_bias)
if stack > 0 and res_block == 0: # first layer but not first stack
# linear projection residual shortcut connection to match changed dims
x = resnet_layer(x,
filters=filters,
kernel_size=1,
strides=strides,
activation=None,
pfac=pfac,
use_frn=use_frn,
use_bias=use_internal_bias)
x = tf.keras.layers.add([x, y])
if use_frn:
x = pfac(frn.TLU())(x)
else:
x = tf.keras.layers.Activation('relu')(x)
filters *= 2
# v1 does not use BN after last shortcut connection-ReLU
x = tf.keras.layers.AveragePooling2D(pool_size=8)(x)
x = tf.keras.layers.Flatten()(x)
x = pfac(tf.keras.layers.Dense(
num_classes,
kernel_initializer='he_normal'))(x)
logging.info('ResNet successfully built.')
return tf.keras.models.Model(inputs=inputs, outputs=x) | 2,068 |
def lr_recover_l1(invecs, intensities, nonneg=True, **kwargs):
"""Computes the low-rank matrix reconstruction using l1-minimisation
.. math::
\min_Z \sum_i \vert \langle a_i| Z | a_i \rangle - y_i \vert \\
\mathrm{s.t.} Z \ge 0
where :math:`a_i` are the input vectors and :math:`y_i` are the measured
intensities.
For the arguments not listed see :func:`recover`
:param bool nonneg: Enfornce the constraint Z >= 0 (default True)
:param kwargs: Additional arguemnts passed to `cvx.Problem.solve`
:returns: array of shape (dim, dim); Low-rank matrix approximation for
given measurements
"""
dim = invecs.shape[1]
# we have to manually convert convex programm to real form since cvxpy
# does not support complex programms
z, mat_cons = _semidef_complex_as_real(dim) if nonneg else \
_hermitian_as_real(dim)
invecs_real = np.concatenate((invecs.real, invecs.imag), axis=1)
obj = cvx.Minimize(sum(cvx.abs(cvx.quad_form(a, z) - y)
for a, y in zip(invecs_real, intensities)))
prob = cvx.Problem(obj, mat_cons)
prob.solve(**kwargs)
if prob.status not in ['optimal', 'optimal_inaccurate']:
raise RuntimeError("Optimization did not converge: " + prob.status)
return z.value[:dim, :dim] + 1.j * z.value[dim:, :dim] | 2,069 |
def fetch_function_names() -> str:
"""Returns a list of cloud function names"""
functions = fetch_functions_in_json()
logs.debug(f"Fetched {len(functions)} cloud functions")
return "Temp holder until I figure out how to get a function's name" | 2,070 |
def fieldset_experiment(objparent):
"""
:param objparent:
"""
objparent.id()
objparent.assigned_to_id()
objparent.conclusion()
objparent.created_at()
objparent.description()
# objparent.due_on() # failed this field
objparent.effort_actual()
objparent.effort_estimated()
objparent.hypothesis()
objparent.notes()
objparent.observation()
# objparent.started_on() # failed this field
objparent.title()
objparent.updated_at()
# research_plan
# experiment_comments
# experiment_results
# training_runs
# research_plan | 2,071 |
def start_view_data(trans_id):
"""
This method is used to execute query using asynchronous connection.
Args:
trans_id: unique transaction id
"""
limit = -1
# Check the transaction and connection status
status, error_msg, conn, trans_obj, session_obj = \
check_transaction_status(trans_id)
if error_msg == ERROR_MSG_TRANS_ID_NOT_FOUND:
return make_json_response(success=0, errormsg=error_msg,
info='DATAGRID_TRANSACTION_REQUIRED',
status=404)
# get the default connection as current connection which is attached to
# trans id holds the cursor which has query result so we cannot use that
# connection to execute another query otherwise we'll lose query result.
try:
manager = get_driver(PG_DEFAULT_DRIVER).connection_manager(
trans_obj.sid)
default_conn = manager.connection(did=trans_obj.did)
except (ConnectionLost, SSHTunnelConnectionLost) as e:
raise
except Exception as e:
current_app.logger.error(e)
return internal_server_error(errormsg=str(e))
# Connect to the Server if not connected.
if not default_conn.connected():
status, msg = default_conn.connect()
if not status:
return make_json_response(
data={'status': status, 'result': "{}".format(msg)}
)
if status and conn is not None and \
trans_obj is not None and session_obj is not None:
# set fetched row count to 0 as we are executing query again.
trans_obj.update_fetched_row_cnt(0)
# Fetch the sql and primary_keys from the object
sql = trans_obj.get_sql(default_conn)
pk_names, primary_keys = trans_obj.get_primary_keys(default_conn)
session_obj['command_obj'] = pickle.dumps(trans_obj, -1)
has_oids = False
if trans_obj.object_type == 'table':
# Fetch OIDs status
has_oids = trans_obj.has_oids(default_conn)
# Fetch the applied filter.
filter_applied = trans_obj.is_filter_applied()
# Fetch the limit for the SQL query
limit = trans_obj.get_limit()
can_edit = trans_obj.can_edit()
can_filter = trans_obj.can_filter()
# Store the primary keys to the session object
session_obj['primary_keys'] = primary_keys
# Store the OIDs status into session object
session_obj['has_oids'] = has_oids
update_session_grid_transaction(trans_id, session_obj)
# Execute sql asynchronously
status, result = conn.execute_async(sql)
else:
status = False
result = error_msg
filter_applied = False
can_edit = False
can_filter = False
sql = None
return make_json_response(
data={
'status': status, 'result': result,
'filter_applied': filter_applied,
'limit': limit, 'can_edit': can_edit,
'can_filter': can_filter, 'sql': sql,
'info_notifier_timeout': blueprint.info_notifier_timeout.get()
}
) | 2,072 |
def fft_resize(images, resize=False, new_size=None):
"""Function for applying DFT and resizing.
This function takes in an array of images, applies the 2-d fourier transform
and resizes them according to new_size, keeping the frequencies that overlap
between the two sizes.
Args:
images: a numpy array with shape
[batch_size, height, width, num_channels]
resize: boolean, whether or not to resize
new_size: a tuple (size, size), with height and width the same
Returns:
im_fft_downsampled: a numpy array with shape
[batch_size, (new) height, (new) width, num_channels]
"""
assert len(images.shape) == 4, ("expecting images to be"
"[batch_size, height, width, num_channels]")
im_complex = images.astype("complex64")
im_fft = np.fft.fft2(im_complex, axes=(1, 2))
# resizing images
if resize:
# get fourier frequencies to threshold
assert (im_fft.shape[1] == im_fft.shape[2]), ("Need images to have same"
"height and width")
# downsample by threshold
width = im_fft.shape[2]
new_width = new_size[0]
freqs = np.fft.fftfreq(width, d=1.0 / width)
idxs = np.flatnonzero((freqs >= -new_width / 2.0) & (freqs <
new_width / 2.0))
im_fft_downsampled = im_fft[:, :, idxs, :][:, idxs, :, :]
else:
im_fft_downsampled = im_fft
return im_fft_downsampled | 2,073 |
def install_build(zipfile_name, ignore_if_exists=False):
"""
Install server build on local drive. 'zipfile_name' is the name of the
zip file in 'BSD_TEMP_FOLDER'.
The function returns the folder name of the battleserver build image.
If 'ignore_if_exists' is True, then the function returns immediately if the
build is already installed on local drive.
Details:
The build is installed into a subfolder in BSD_BATTLESERVER_FOLDER using
the same name as the zip file (sans the .zip ending). The contents of the
zip file is first extracted to a temporary folder, then that folder is
renamed to the final name. This is to ensure an atomic publishing of the
build. If the target folder already exists, it will be removed first.
"""
head, tail = os.path.split(zipfile_name)
image_name, ext = os.path.splitext(tail)
# The final destination of the build
dest_folder = os.path.join(config.BSD_BATTLESERVER_FOLDER, image_name)
dest_folder = os.path.abspath(dest_folder)
if ignore_if_exists and os.path.exists(dest_folder):
return image_name
zipfile_path = os.path.join(config.BSD_TEMP_FOLDER, zipfile_name)
zipfile_path = os.path.abspath(zipfile_path)
if not os.path.exists(zipfile_path):
raise RuntimeError("Zipfile '{}' not found!".format(zipfile_path))
with ZipFile(zipfile_path) as zipfile:
update_state(
state='PROGRESS',
meta={'file': tail, 'step': 'unzipping'},
)
# Extract to a staging folder
staging_folder = dest_folder + ".temp"
try:
logger.info("Unzipping %s to %s", zipfile_path, staging_folder)
zipfile.extractall(staging_folder)
# Publish the build
update_state(
state='PROGRESS',
meta={'file': tail, 'step': 'publishing'},
)
if os.path.exists(dest_folder):
logger.info("Removing previous install at %s", dest_folder)
shutil.rmtree(dest_folder, ignore_errors=False)
logger.info("Publishing %s to %s", staging_folder, dest_folder)
os.rename(staging_folder, dest_folder)
finally:
# Remove staging folder, if needed.
if os.path.exists(staging_folder):
logger.debug("Removing staging folder %s", staging_folder)
shutil.rmtree(staging_folder)
return image_name | 2,074 |
def vaccine_percentage(df):
"""Plot the percentage of the vaccinated population over time.
Args:
df (DataFrame): Requires data returned by get_uk_data
or get_national_data methods
Retuns:
Plot of total percentage of population vaccinated
"""
df['date'] = df['date'].astype('datetime64[ns]')
plt.figure(figsize=(14, 7))
plot1 = sns.lineplot(x='date', y='vac_total_perc', data=df)
plt.ylim(0, 100)
plot1.set_xlabel("Covid pandemic, up to date", fontsize=12)
plot1.set_ylabel("Percentage", fontsize=12)
plot1.set_title('Percentage of the vaccinated population over time',
fontsize=14)
# print(plot1)
| 2,075 |
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Sequential(nn.ReplicationPad2d(1), nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=0, groups=groups, bias=False, dilation=dilation)) | 2,076 |
def get_inbound_layers_without_params(layer):
"""Return inbound layers.
Parameters
----------
layer: Keras.layers
A Keras layer.
Returns
-------
: list[Keras.layers]
List of inbound layers.
"""
return [layer for layer in get_inbound_layers(layer)
if not has_weights(layer)] | 2,077 |
def orders_matchresults(symbol, types=None, start_date=None, end_date=None, _from=None, direct=None, size=None):
"""
:param symbol:
:param types: 可选值 {buy-market:市价买, sell-market:市价卖, buy-limit:限价买, sell-limit:限价卖}
:param start_date:
:param end_date:
:param _from:
:param direct: 可选值{prev 向前,next 向后}
:param size:
:return:
"""
params = {'symbol': symbol}
if types:
params[types] = types
if start_date:
params['start-date'] = start_date
if end_date:
params['end-date'] = end_date
if _from:
params['from'] = _from
if direct:
params['direct'] = direct
if size:
params['size'] = size
url = '/v1/order/matchresults'
return api_key_get(params, url) | 2,078 |
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision | 2,079 |
def check_error_in_status(retcode, statuses):
"""Check possible errors in status.
Parameters
----------
retcode : int
Return code of an MPI function
statuses : MPI_Status[]
Array of MPI_Status objects.
Raises
------
MPIStatusErrors
If there are errors in status
MPIError
For any other errors
"""
if retcode == lib.MPI_SUCCESS:
return
if retcode == lib.MPI_ERR_IN_STATUS:
erridxs, errcodes = [], []
for idx in range(len(statuses)):
if statuses[idx].MPI_ERROR != lib.MPI_SUCCESS:
erridxs.append(idx)
errcodes.append(statuses[idx].MPI_ERROR)
raise MPIStatusErrors(erridxs, errcodes)
raise MPIError(retcode) | 2,080 |
def _is_grpc_unary_method(attr):
"""Check if attribute is a grpc method that returns unary."""
return isinstance(attr, (grpc.UnaryUnaryMultiCallable, grpc.StreamUnaryMultiCallable)) | 2,081 |
def addPhoto(fileName, personName):
"""
Load a supplied photo and add detected facial encoding to the database
"""
#Check if image is a jpg
if (fileName[-4:] != ".jpg"):
print("\n[!] File extenstion must be .jpg!\n")
return
#Check image exists
if (not os.path.isfile(fileName)):
print("\n[!] File does not exist!\n")
return
#Check no illegal characters in file name
for c in ILLEGAL_FILE_NAMES:
if (c in personName):
print("\n[!] Provided name contains an illegal argument\n")
return
#Load image
image = face_recognition.load_image_file(fileName)
#Use the name in the filename as the identity key
identity = os.path.splitext(os.path.basename(fileName))[0]
#Get the face location
locationsHog = hogDetectFaceLocations(image)
locationsHaar = haarDetectFaceLocations(image)
#Get the face encoding
encodingsHaar = face_recognition.face_encodings(image, locationsHaar)
encodingsHog = face_recognition.face_encodings(image, locationsHog)
#check if exactly one face is in the photo
if ((len(encodingsHaar) == 0) or (len(encodingsHog) == 0)):
print("\n[!] No face detected in the provided photo\n")
return
elif ((len(encodingsHaar) > 1) or (len(encodingsHog) > 1)):
print("\n[!] More than one face detected in the provided photo\n")
return
#Set path to respective dataset
directoryToAddTo = DATABASE_PATH + personName
#Look for directory
exists = False
for subdir, dirs, files in os.walk(DATABASE_PATH):
if (subdir == directoryToAddTo):
exists = True
#If directory doesnt exist, make it
if (not exists):
os.mkdir(directoryToAddTo)
#Save data to file
np.savetxt((directoryToAddTo + "/" + identity + "Haar.txt"),
encodingsHaar[0])
np.savetxt((directoryToAddTo + "/" + identity + "Hog.txt"),
encodingsHog[0])
print("\n[*] Face successfully added!\n") | 2,082 |
def gridcorner(
D,
xyz,
labels=None,
projection="max_slice",
max_n_ticks=4,
factor=2,
whspace=0.05,
showDvals=True,
lines=None,
label_offset=0.4,
**kwargs
):
"""Generate a grid corner plot
Parameters
----------
D: array_like
N-dimensional data to plot, `D.shape` should be `(n1, n2,..., nN)`,
where `N`, is the number of grid points along dimension `i`.
xyz: list
List of 1-dimensional arrays of coordinates. `xyz[i]` should have
length `N` (see help for `D`).
labels: list
N+1 length list of labels; the first N correspond to the coordinates
labels, the final label is for the dependent (D) variable.
projection: str or func
If a string, one of `{"log_mean", "max_slice"} to use inbuilt functions
to calculate either the logged mean or maximum slice projection. Else
a function to use for projection, must take an `axis` argument. Default
is `gridcorner.max_slice()`, to project out a slice along the
maximum.
max_n_ticks: int
Number of ticks for x and y axis of the `pcolormesh` plots.
factor: float
Controls the size of one window.
showDvals: bool
If true (default) show the D values on the right-hand-side of the
1D plots and add a label.
lines: array_like
N-dimensional list of values to delineate.
Returns
-------
fig, axes:
The figure and NxN set of axes
"""
ndim = D.ndim
fig, axes = _get_fig_and_axes(ndim, factor, whspace)
if type(projection) == str:
if projection in ["log_mean"]:
projection = log_mean
elif projection in ["max_slice"]:
projection = max_slice
else:
raise ValueError("Projection {} not understood".format(projection))
for i in range(ndim):
projection_1D(
axes[i, i],
xyz[i],
D,
i,
projection=projection,
showDvals=showDvals,
lines=lines,
**kwargs
)
for j in range(ndim):
ax = axes[i, j]
if j > i:
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
continue
ax.get_shared_x_axes().join(axes[ndim - 1, j], ax)
if i < ndim - 1:
ax.set_xticklabels([])
if j < i:
ax.get_shared_y_axes().join(axes[i, i - 1], ax)
if j > 0:
ax.set_yticklabels([])
if j == i:
continue
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="upper"))
ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="upper"))
ax, pax = projection_2D(
ax,
xyz[i],
xyz[j],
D,
i,
j,
lines=lines,
projection=projection,
**kwargs
)
if labels:
for i in range(ndim):
axes[-1, i].set_xlabel(labels[i])
if i > 0:
axes[i, 0].set_ylabel(labels[i])
if showDvals:
axes[i, i].set_ylabel(labels[-1])
for ax in axes[:, 0]:
ax.yaxis.set_label_coords(-label_offset, 0.5)
for ax in axes[-1, :]:
ax.xaxis.set_label_coords(0.5, -label_offset)
return fig, axes | 2,083 |
def read_dig_hpts(fname, unit='mm'):
"""Read historical .hpts mne-c files.
Parameters
----------
fname : str
The filepath of .hpts file.
unit : 'm' | 'cm' | 'mm'
Unit of the positions. Defaults to 'mm'.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_polhemus_isotrak
make_dig_montage
Notes
-----
The hpts format digitzer data file may contain comment lines starting
with the pound sign (#) and data lines of the form::
<*category*> <*identifier*> <*x/mm*> <*y/mm*> <*z/mm*>
where:
``<*category*>``
defines the type of points. Allowed categories are: ``hpi``,
``cardinal`` (fiducial), ``eeg``, and ``extra`` corresponding to
head-position indicator coil locations, cardinal landmarks, EEG
electrode locations, and additional head surface points,
respectively.
``<*identifier*>``
identifies the point. The identifiers are usually sequential
numbers. For cardinal landmarks, 1 = left auricular point,
2 = nasion, and 3 = right auricular point. For EEG electrodes,
identifier = 0 signifies the reference electrode.
``<*x/mm*> , <*y/mm*> , <*z/mm*>``
Location of the point, usually in the head coordinate system
in millimeters. If your points are in [m] then unit parameter can
be changed.
For example::
cardinal 2 -5.6729 -12.3873 -30.3671
cardinal 1 -37.6782 -10.4957 91.5228
cardinal 3 -131.3127 9.3976 -22.2363
hpi 1 -30.4493 -11.8450 83.3601
hpi 2 -122.5353 9.2232 -28.6828
hpi 3 -6.8518 -47.0697 -37.0829
hpi 4 7.3744 -50.6297 -12.1376
hpi 5 -33.4264 -43.7352 -57.7756
eeg FP1 3.8676 -77.0439 -13.0212
eeg FP2 -31.9297 -70.6852 -57.4881
eeg F7 -6.1042 -68.2969 45.4939
...
"""
from ._standard_montage_utils import _str_names, _str
_scale = _check_unit_and_get_scaling(unit)
out = np.genfromtxt(fname, comments='#',
dtype=(_str, _str, 'f8', 'f8', 'f8'))
kind, label = _str_names(out['f0']), _str_names(out['f1'])
kind = [k.lower() for k in kind]
xyz = np.array([out['f%d' % ii] for ii in range(2, 5)]).T
xyz *= _scale
del _scale
fid_idx_to_label = {'1': 'lpa', '2': 'nasion', '3': 'rpa'}
fid = {fid_idx_to_label[label[ii]]: this_xyz
for ii, this_xyz in enumerate(xyz) if kind[ii] == 'cardinal'}
ch_pos = {label[ii]: this_xyz
for ii, this_xyz in enumerate(xyz) if kind[ii] == 'eeg'}
hpi = np.array([this_xyz for ii, this_xyz in enumerate(xyz)
if kind[ii] == 'hpi'])
hpi.shape = (-1, 3) # in case it's empty
hsp = np.array([this_xyz for ii, this_xyz in enumerate(xyz)
if kind[ii] == 'extra'])
hsp.shape = (-1, 3) # in case it's empty
return make_dig_montage(ch_pos=ch_pos, **fid, hpi=hpi, hsp=hsp) | 2,084 |
def GetTaskAttr( fname, attrName, defaultVal = None ):
"""Return the specified attribute of a task, or the specified default value if the task does not have this attribute."""
for line in SlurpFile( fname ).rstrip('\n').split('\n'):
arg, val = line.split('\t')
if arg == attrName: return coerceVal( val )
return defaultVal | 2,085 |
def print_board(white, black):
"""Produce GnuGO like output to verify board position.
Args:
white (np.array): array with 1's for white
black (np.array): array with 1's for black
Returns:
str: gnugo like output (without legend)
"""
s = ''
for x in xrange(19):
for y in xrange(19):
if white[x][y] == 1:
s += '0 '
elif black[x][y] == 1:
s += 'X '
else:
s += '. '
s += '\n'
return s | 2,086 |
def phaseCorrelate(src1, src2, window=None):
""" phaseCorrelate(src1, src2[, window]) -> retval, response """
pass | 2,087 |
def get_repositories_containing_graph(name: str) -> List[str]:
"""Returns the repositories containing a graph with the given graph name.
Parameters
----------------------------
name: str,
The name of the graph to retrieve.
Returns
----------------------------
List of repository names.
"""
return [
repository
for repository in get_available_repositories()
if name in get_available_graphs_from_repository(repository)
] | 2,088 |
def build_query(dct):
"""Build SQL with '?' and value tuples from clause dictionary"""
if (dct is not {}):
str_clauses = ''
tpl_values = ()
bln_start = True
#print dct
for str_field, dct_op_val in dct.iteritems():
if (str_field is not None):
if (bln_start):
str_open = ' ('
bln_start = False
else:
str_open = ' and ('
str_clauses = ''.join([str_clauses, str_open, str_field, ' ', \
dct_op_val['logic'], ' ?)'])
var_val = dct_op_val['value']
if (str(var_val).lower() == 'null'):
var_val = None
tpl_values = tpl_values + (var_val, )
else: # simple 1 or 0 (ALL records or NO records) ...
# trumps all other clauses, so lets exit the loop
str_clauses = ' ?'
tpl_values = (dct_op_val['value'],)
break
return (tpl_values, str_clauses)
else:
return ((), " 1") | 2,089 |
def load_secrets(fn=".env", prefix="DJANGO_ENV_", **kwargs):
"""Load a list of configuration variables.
Return a dictionary of configuration variables, as loaded from a
configuration file or the environment. Values passed in as
``args`` or as the value in ``kwargs`` will be used as the
configuration variable's default value if one is not found in the
configuration file or environment.
Parameters
----------
fn : string, default=".env"
Configuration filename, defaults to ``.env``. May be in TOML,
JSON, YAML, or BespON formats. Formats will be attempted in this
order.
prefix : string, default="DJANGO_ENV_"
Prefix for environment variables. This prefix will be
prepended to all variable names before searching for them in
the environment.
kwargs : dict, optional
Dictionary with configuration variables as keys and default
values as values.
Returns
-------
dict
A dictionary of configuration variables and their values.
"""
return merge(kwargs, load_file(fn), load_environment(prefix)) | 2,090 |
def dependency_chain(pkgname):
"""Return an ordered list of dependencies for a package"""
depends = recurse_depends(pkgname)
return set(list(depends.keys()) + list(itertools.chain.from_iterable(depends.values()))) | 2,091 |
def with_input(func: Callable) -> Callable:
"""
Attaches a "source" argument to the command.
"""
return click.argument(
"source", type=click.Path(exists=True), required=True
)(func) | 2,092 |
def check_loop_validity(inst_list):
""" Given a list of instructions, check whether they can form a valid loop.
This means, checking for anything that could create an infinite loop.
We are also disallowing double loops right now"""
for i, c in enumerate(inst_list):
if c in [5, 6, 16, 25]:
return False, i
return True, -1 | 2,093 |
def target_channel_id_name_list(
conversations_list: list=None, including_archived: bool=False):
"""extract targeted channels id list from conversations_list response.
Returns:
id_list, name_list
"""
id_list = []
name_list = []
for ch in conversations_list:
if including_archived is False:
if ch['is_archived'] is True:
continue
id_list.append(ch['id'])
name_list.append(ch['name'])
return id_list, name_list | 2,094 |
def NETWORKDAYS(*args) -> Function:
"""
Returns the number of net working days between two provided days.
Learn more: https//support.google.com/docs/answer/3092979
"""
return Function("NETWORKDAYS", args) | 2,095 |
def calc_high_outlier(values) -> float:
"""Calculates the high outlier from a pandas Series"""
q1, q3 = [values.quantile(x, 'midpoint') for x in (0.25, 0.75)]
return q3 + 1.5 * (q3 - q1) | 2,096 |
def test_api():
"""Test some aux methods."""
assert iemre.get_dailyc_mrms_ncname() is not None
assert iemre.get_dailyc_ncname() is not None | 2,097 |
def get_sql_delete_by_ids(table: str, ids_length: int):
"""
获取添加数据的字符串
:param table:
:return:
"""
# 校验数据
if not table:
raise ParamError(f"table 参数错误:table={table}")
if not ids_length or not isinstance(ids_length, int):
raise ParamError(f"ids_length 参数错误:ids_length={ids_length}")
# 准备参数
ids = ["%s" for _ in range(ids_length)]
ids_str = ", ".join(ids)
# 准备sql
s = f"delete from {table} where id in ({ids_str});"
return s | 2,098 |
def save_results_numpy(dir, filename, results):
"""Save the given dictionary of results into a numpy file.
Args:
results (dict): Dictionary of results
"""
file_path = os.path.join(dir, filename)
assert not os.path.exists(file_path), 'File ' + str(file_path) + ' already exists!'
if not os.path.exists(dir):
os.makedirs(dir)
np.save(file_path, results) | 2,099 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.