content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def rand_email(domain=None):
"""Generate a random zone name
:return: a random zone name e.g. example.org.
:rtype: string
"""
domain = domain or rand_zone_name()
return 'example@%s' % domain.rstrip('.') | 5,358,000 |
def wordcount_for_reddit(data, search_word):
"""Return the number of times a word has been used."""
count = 0
for result in data: # do something which each result from scrape
for key in result:
stringed_list = str(result[key])
text_list = stringed_list.split()
for word in text_list:
if search_word == 'Go':
if word == search_word:
count += 1
elif word.lower() == search_word.lower():
count += 1
return count | 5,358,001 |
def openeo_to_eodatareaders(process_graph_json_in: Union[dict, str], job_data: str,
process_defs: Union[dict, list, str], vrt_only: bool = False,
existing_node_ids: List[Tuple] = None) \
-> Tuple[List[Tuple[str, List[str], Optional[str], List[str], str]], Graph]:
"""
This function translates an OpenEO process graph into a sequence of calls to EODataProcessor,
one for each node of the process graph.
Each openEO process is wrapped into an apply/reduce call using EODataProcessor methods.
"""
# Translate openEO PG to traversable object
if isinstance(process_graph_json_in, dict):
process_graph_json = deepcopy(process_graph_json_in)
else:
process_graph_json = process_graph_json_in
graph = translate_process_graph(process_graph_json, process_defs=process_defs).sort(by='dependency')
# Get wrapper processes -> TODO: is this really needed?
wrapper_processes = get_wrapper_processes()
nodes = []
N_nodes = len(graph.ids)
last_node = False
for k, node_id in enumerate(graph.ids):
cur_node = graph[node_id]
wrapper_name = None
wrapper_dimension = None
node_dependencies = None
if k + 1 == N_nodes:
last_node = True
if cur_node.is_reducer:
# Current process is classified as "reducer" in its process definition
if cur_node.parent_process:
# Current process has parent, must be an embedded process graph
wrapper_name = cur_node.parent_process.process_id
wrapper_dimension = cur_node.parent_process.dimension
else:
# Current process is of type "reducer" but has no parent, must be one of these processes:
# "reduce_dimension", "reduce_dimension_binary"
wrapper_name = cur_node.process_id
wrapper_dimension = cur_node.dimension
else:
wrapper_name = cur_node.process_id
recuder_dimension = None # for clarity, this will be needed when also 'apply_dimension' is supported by EODataProcessor
# Workaround for process "array_element" until it has the category "reducer" set
# TODO remove when the process definition is updated
if (not cur_node.is_reducer) and (cur_node.parent_process):
# Current process has parent, must be an embedded process graph
wrapper_name = cur_node.parent_process.process_id
wrapper_dimension = cur_node.parent_process.dimension
# NB find better solution
if wrapper_dimension:
wrapper_dimension = check_dim_name(wrapper_dimension)
if cur_node.content['process_id'] == 'run_udf':
operator = "UdfExec"
params = map_udf(cur_node.content, job_data, cur_node.id)
else:
operator = "EODataProcessor"
params = map_process(
cur_node.content,
cur_node.id,
cur_node.is_result,
job_data,
wrapper_name=wrapper_name,
wrapper_dimension=wrapper_dimension,
vrt_only=vrt_only,
last_node=last_node
)
# Get dependencies
if cur_node.result_process and (cur_node.process_id in wrapper_processes):
# The current process is a wrapper process, which embeds a process graph
# Its only dependency is the node in the embedded process graph with 'result' set to True.
node_dependencies = [cur_node.result_process.id]
else:
node_dependencies = list(cur_node.dependencies.ids)
# Add to nodes list
nodes.append((cur_node.id, params, node_dependencies, operator))
return nodes, graph | 5,358,002 |
def main():
"""
This function is executed automatically when the module is run directly.
"""
hats = []
# Define the channel list for each HAT device
chans = [
{0, 1},
{0, 1}
]
# Define the options for each HAT device
options = [
OptionFlags.EXTTRIGGER,
OptionFlags.EXTCLOCK
]
samples_per_channel = 10000
sample_rate = 1000.0 # Samples per second
trigger_mode = TriggerModes.RISING_EDGE
try:
# Get an instance of the selected hat device object.
hats = select_hat_devices(HatIDs.MCC_118, DEVICE_COUNT)
# Validate the selected channels.
for i, hat in enumerate(hats):
validate_channels(chans[i], hat.info().NUM_AI_CHANNELS)
# Set the trigger mode for the master device.
hats[MASTER].trigger_mode(trigger_mode)
# Calculate the actual sample rate.
actual_rate = hats[MASTER].a_in_scan_actual_rate(len(chans[MASTER]),
sample_rate)
print('MCC 118 multiple HAT example using external clock and',
'external trigger options')
print(' Functions demonstrated:')
print(' mcc118.trigger_mode')
print(' mcc118.a_in_scan_start')
print(' mcc118.a_in_scan_status')
print(' mcc118.a_in_scan_read')
print(' Samples per channel:', samples_per_channel)
print(' Requested Sample Rate: {:.3f} Hz'.format(sample_rate))
print(' Actual Sample Rate: {:.3f} Hz'.format(actual_rate))
print(' Trigger type:', trigger_mode.name)
for i, hat in enumerate(hats):
print(' HAT {}:'.format(i))
print(' Address:', hat.address())
print(' Channels: ', end='')
print(', '.join([str(chan) for chan in chans[i]]))
options_str = enum_mask_to_string(OptionFlags, options[i])
print(' Options:', options_str)
print('\n*NOTE: Connect the CLK terminals together on each MCC 118')
print(' HAT device being used. Connect a trigger source')
print(' to the TRIG input terminal on HAT 0.')
try:
input("\nPress 'Enter' to continue")
except (NameError, SyntaxError):
pass
# Start the scan.
for i, hat in enumerate(hats):
chan_mask = chan_list_to_mask(chans[i])
hat.a_in_scan_start(chan_mask, samples_per_channel, sample_rate,
options[i])
print('\nWaiting for trigger ... Press Ctrl-C to stop scan\n')
try:
# Monitor the trigger status on the master device.
wait_for_trigger(hats[MASTER])
# Read and display data for all devices until scan completes
# or overrun is detected.
read_and_display_data(hats, chans)
except KeyboardInterrupt:
# Clear the '^C' from the display.
print(CURSOR_BACK_2, ERASE_TO_END_OF_LINE, '\nAborted\n')
except (HatError, ValueError) as error:
print('\n', error)
finally:
for hat in hats:
hat.a_in_scan_stop()
hat.a_in_scan_cleanup() | 5,358,003 |
def calc_precision(gnd_assignments, pred_assignments):
"""
gnd_clusters should be a torch tensor of longs, containing
the assignment to each cluster
assumes that cluster assignments are 0-based, and no 'holes'
"""
precision_sum = 0
assert len(gnd_assignments.size()) == 1
assert len(pred_assignments.size()) == 1
assert pred_assignments.size(0) == gnd_assignments.size(0)
N = gnd_assignments.size(0)
K_gnd = gnd_assignments.max().item() + 1
K_pred = pred_assignments.max().item() + 1
for k_pred in range(K_pred):
mask = pred_assignments == k_pred
gnd = gnd_assignments[mask.nonzero().long().view(-1)]
max_intersect = 0
for k_gnd in range(K_gnd):
intersect = (gnd == k_gnd).long().sum().item()
max_intersect = max(max_intersect, intersect)
precision_sum += max_intersect
precision = precision_sum / N
return precision | 5,358,004 |
def scheduler_job_output_route():
"""receive output from assigned job"""
try:
jsonschema.validate(request.json, schema=sner.agent.protocol.output)
job_id = request.json['id']
retval = request.json['retval']
output = base64.b64decode(request.json['output'])
except (jsonschema.exceptions.ValidationError, binascii.Error):
return jsonify({'title': 'Invalid request'}), HTTPStatus.BAD_REQUEST
job = Job.query.filter(Job.id == job_id).one_or_none()
if job and (not job.retval):
# requests for invalid, deleted, repeated or clashing job ids are discarded
# agent should delete the output on it's side as well
job.retval = retval
os.makedirs(os.path.dirname(job.output_abspath), exist_ok=True)
with open(job.output_abspath, 'wb') as ftmp:
ftmp.write(output)
job.time_end = datetime.utcnow()
db.session.commit()
return '', HTTPStatus.OK | 5,358,005 |
def __get_app_package_path(package_type, app_or_model_class):
"""
:param package_type:
:return:
"""
models_path = []
found = False
if isinstance(app_or_model_class, str):
app_path_str = app_or_model_class
elif hasattr(app_or_model_class, '__module__'):
app_path_str = app_or_model_class.__module__
else:
raise RuntimeError('Unable to get module path.')
for item in app_path_str.split('.'):
if item in ['models', 'admin']:
models_path.append(package_type)
found = True
break
else:
models_path.append(item)
if not found:
models_path.append(package_type)
return '.'.join(models_path) | 5,358,006 |
def generate_colors(history, config):
"""
This is some old code to generate colors for the ray plotting
in _add_ray_history.
For the moment I am just saving the old code here, but this
will not work with the new history dictionary. (Only minor
adjustments are neccessary to make it work, but the
implementation was not very general.)
"""
raise NotImplementedError()
color_masks = []
color_list = []
# This is a good pink color for un-reflected rays.
# color_unref = [1.0, 0.5, 0.5, 0.2]
if False:
color_masks.append(output[0]['mask'].copy())
color_list.append((1.0, 0.0, 0.0, 0.5))
# Color by vertical position in Plasma.
if False:
norm = matplotlib.colors.Normalize(0.0, 1.0)
cm = matplotlib.cm.ScalarMappable(norm=norm, cmap='gist_rainbow')
color_masks.append((output[0]['origin'][:, 2] > 0.1))
color_list.append(cm.to_rgba(0.0, alpha=0.5))
color_masks.append((output[0]['origin'][:, 2] > 0.05) & (output[0]['origin'][:, 2] < 0.1))
color_list.append(cm.to_rgba(1 / 5, alpha=0.5))
color_masks.append((output[0]['origin'][:, 2] > 0.0) & (output[0]['origin'][:, 2] < 0.05))
color_list.append(cm.to_rgba(2 / 5, alpha=0.5))
color_masks.append((output[0]['origin'][:, 2] > -0.05) & (output[0]['origin'][:, 2] < 0))
color_list.append(cm.to_rgba(3 / 5, alpha=0.5))
color_masks.append((output[0]['origin'][:, 2] > -0.1) & (output[0]['origin'][:, 2] < -0.05))
color_list.append(cm.to_rgba(4 / 5, alpha=0.5))
color_masks.append((output[0]['origin'][:, 2] < -0.1))
color_list.append(cm.to_rgba(5 / 5, alpha=0.5))
if True:
norm = matplotlib.colors.Normalize(0.0, 1.0)
cm = matplotlib.cm.ScalarMappable(norm=norm, cmap='gist_rainbow')
delta = 0.005
num_wave = 5
for ii in range(num_wave):
wave_0 = inputs['source_input']['wavelength'] - 1 * delta * (num_wave - 1) / 2 + delta * ii
wave_min = wave_0 - delta / 2
wave_max = wave_0 + delta / 2
mask_temp = (output[0]['wavelength'][:] > wave_min) & (output[0]['wavelength'][:] < wave_max)
print('{:7.4f} {:7.4f} {:7.4f} {}'.format(wave_0, wave_min, wave_max, np.sum(mask_temp)))
color_masks.append(mask_temp)
color_list.append(cm.to_rgba(ii / num_wave, alpha=0.5)) | 5,358,007 |
def dict_from_payload(base64_input: str, fport: int = None):
""" Decodes a base64-encoded binary payload into JSON.
Parameters
----------
base64_input : str
Base64-encoded binary payload
fport: int
FPort as provided in the metadata. Please note the fport is optional and can have value "None", if not provided by the LNS or invoking function.
If fport is None and binary decoder can not proceed because of that, it should should raise an exception.
Returns
-------
JSON object with key/value pairs of decoded attributes
"""
bytes = base64.b64decode(base64_input)
value= (bytes[0] << 8 | bytes[1]) & 0x3FFF
battery = value/1000
door_open_status = 0
if bytes[0] & 0x40:
water_leak_status = 1
water_leak_status = 0
if bytes[0] & 0x80:
door_open_status = 1
mod = bytes[2]
if mod == 1:
open_times = bytes[3] << 16 | bytes[4] << 8 | bytes[5]
open_duration = bytes[6] << 16 | bytes[7] << 8 | bytes[8]
result = {
"mod": mod,
"battery": battery,
"door_open_status": door_open_status,
"open_times": open_times,
"open_duration": open_duration
}
return result
if mod == 2:
leak_times = bytes[3] << 16 | bytes[4] << 8 | bytes[5]
leak_duration = bytes[6] << 16 | bytes[7] << 8 | bytes[8]
result = {
"mod": mod,
"battery": battery,
"leak_times": leak_times,
"leak_duration": leak_duration
}
return result
result = {
"battery": battery,
"mod": mod
} | 5,358,008 |
def pack(circles, x, y, padding=2, exclude=[]):
""" Circle-packing algorithm.
Groups the given list of Circle objects around (x,y) in an organic way.
"""
# Ported from Sean McCullough's Processing code:
# http://www.cricketschirping.com/processing/CirclePacking1/
# See also: http://en.wiki.mcneel.com/default.aspx/McNeel/2DCirclePacking
# Repulsive force: move away from intersecting circles.
for i, circle1 in enumerate(circles):
for circle2 in circles[i+1:]:
d = distance(circle1.x, circle1.y, circle2.x, circle2.y)
r = circle1.radius + circle2.radius + padding
if d < r - 0.01:
dx = circle2.x - circle1.x
dy = circle2.y - circle1.y
vx = (dx / d) * (r-d) * 0.5
vy = (dy / d) * (r-d) * 0.5
if circle1 not in exclude:
circle1.x -= vx
circle1.y -= vy
if circle2 not in exclude:
circle2.x += vx
circle2.y += vy
# Attractive force: move all circles to center.
for circle in circles:
circle.goal.x = x
circle.goal.y = y
if circle not in exclude:
damping = circle.radius ** 3 * 0.000001 # Big ones in the middle.
vx = (circle.x - x) * damping
vy = (circle.y - y) * damping
circle.x -= vx
circle.y -= vy | 5,358,009 |
def test_call_bluesky(daq):
"""
These are things that bluesky uses. Let's check them.
"""
logger.debug('test_call_bluesky')
daq.describe()
daq.describe_configuration()
daq.stage()
daq.begin(duration=10)
# unstage should end the run and we don't time out
daq.unstage() | 5,358,010 |
def handle(req):
"""POST"""
im = Image.open(BytesIO(req.files[list(req.files.keys())[0]].body))
w, h = im.size
im2 = ImageOps.mirror(im.crop((0, 0, w / 2, h)))
im.paste(im2, (int(w / 2), 0))
io = BytesIO()
im.save(io, format='PNG')
return req.Response(
body=io.getvalue(), mime_type='image/png', encoding='UTF-8') | 5,358,011 |
def read_sachs_all(folder_path):
"""Reads all the sachs data specified in the folder_path.
Args:
folder_path: str specifying the folder containing the sachs data
Returns:
An np.array containing all the sachs data
"""
sachs_data = list()
# Divides the Sachs dataset into environments.
for _, file in enumerate(glob.glob(f'{folder_path}*.xls')):
sachs_df = pd.read_excel(file)
sachs_array = sachs_df.to_numpy()
sachs_data.append(sachs_array)
sachs_data_envs = np.vstack(sachs_data)
return sachs_data_envs | 5,358,012 |
def test_axes_map():
"""
map from Axes([aaa, bbb]) to Axes([zzz, bbb]) via AxesMap {aaa: zzz}
"""
a = ng.make_axis(1, name='aaa')
b = ng.make_axis(2, name='bbb')
z = ng.make_axis(1, name='zzz')
axes_before = ng.make_axes([a, b])
axes_after = ng.make_axes([z, b])
axes_map = AxesMap({a.name: z.name})
assert axes_after == axes_map.map_axes(axes_before) | 5,358,013 |
def populate_job_directories():
""" -function to populate or update job directory tree
with job scripts that are located in /Setup_and_Config """
JobStreams, Replicates, BaseDirNames, JobBaseNames, Runs, \
nJobStreams, nReplicates, nBaseNames = check_job_structure()
mcf = read_master_config_file()
ljdf_t = read_local_job_details()
cwd=os.getcwd()
ljdf_t[ 'BASE_DIR' ] = cwd
ljdf_t[ 'CurrentRound' ] = mcf["Round"]
ljdf_t[ 'Account' ] = mcf["Account"]
ljdf_t[ 'Nodes' ] = mcf["nodes"]
ljdf_t[ 'ntpn' ] = mcf["ntpn"]
ljdf_t[ 'ppn' ] = mcf["ppn"]
ljdf_t[ 'Walltime' ] = mcf["Walltime"]
ljdf_t[ 'JobFailTime' ] = mcf["JobFailTime"]
ljdf_t[ 'DiskSpaceCutOff' ] = mcf["DiskSpaceCutOff"]
Flavour = mcf["Flavour"]
OptScript = mcf["EquilibrateConfScript"]
ProdScript = mcf["ProductionConfScript"]
ModuleFile = mcf["ModuleFile"]
startscript = mcf["SbatchEquilibrateScript"]
productionscript = mcf["SbatchProductionScript"]
## list files to transfer:
print(("{}Job Files to transfer from /Setup_and_Config:{}"\
.format(GREEN, DEFAULT)))
print(("{} {}\n {}".format(BLUE, startscript,\
productionscript)))
print(" local_job_details.json ")
for pyfile in glob.glob(r'Setup_and_Config/*.py' ):
print((" " + pyfile[17:]))
for conffile in glob.glob(r'Setup_and_Config/*.conf' ):
print((" " + conffile[17:]))
## descend through job structure and populate job directories:
for i in range(0, nJobStreams):
TargetJobDir = cwd + "/" + JobStreams[i]
print(("{}\nPopulating JobStream: {} {}".format( GREEN,
TargetJobDir, DEFAULT)))
## check to see if there actually are any job directories to fill:
if not os.path.exists( TargetJobDir ):
print(("Job directory {} not found. Have you initialized?"\
.format(TargetJobDir)))
sys.exit(error)
## create temporary sbatch scripts:
sb_start_template = "Setup_and_Config/" + startscript + ".template"
sb_prod_template = "Setup_and_Config/" + productionscript + ".template"
if not os.path.exists( sb_start_template ) \
or not os.path.exists( sb_prod_template ):
print("Can't find sbatch template files in Settup_and_Config. Exiting.")
sys.exit(error)
## modify replicate elements in staging dictionary file:
ljdf_t['JOB_STREAM_DIR'] = JobStreams[i]
ljdf_t['CurrentRun'] = 0
ljdf_t['TotalRuns'] = int( Runs[i] )
ljdf_t['JobBaseName'] = JobBaseNames[i]
nnodes = "#SBATCH --nodes=" + mcf["nodes"]
ntime = "#SBATCH --time=" + mcf["Walltime"]
naccount = "#SBATCH --account=" + mcf["Account"]
nntpn = "ntpn=" + mcf["ntpn"]
nppn = "ppn=" + mcf["ppn"]
nmodule = "module load " + ModuleFile
nopt = "optimize_script=" + OptScript
nprod = "production_script=" + ProdScript
shutil.copy( sb_start_template, 'sb_start_temp')
shutil.copy( sb_prod_template, 'sb_prod_temp' )
## replace lines in sbatch files:
for f in ["sb_start_temp", "sb_prod_temp"]:
for line in fileinput.FileInput( f, inplace=True ):
line = line.replace('#SBATCH --nodes=X', nnodes )
line = line.replace('#SBATCH --time=X', ntime )
line = line.replace('#SBATCH --account=X', naccount)
line = line.replace('ntpn=X', nntpn )
line = line.replace('ppn=X', nppn )
line = line.replace('module load X', nmodule )
line = line.replace('optimize_script=X', nopt )
line = line.replace('production_script=X', nprod )
sys.stdout.write(line)
## update local job details file:
jobdirlist = get_current_dir_list(JobStreams[i])
for j in jobdirlist:
print(("{} -populating: {}{}".format(BLUE, j, DEFAULT)))
ljdf_t['JobDirName'] = j
ljdfile = JobStreams[i] + "/" + j + "/local_job_details.json"
if not os.path.isfile(ljdfile):
with open(ljdfile, 'w') as outfile:
json.dump(ljdf_t, outfile, indent=2)
outfile.close()
else:
print(" skipping local_details_file: already exists ")
## copy across python scripts from /Setup_and_Config:
jobpath = JobStreams[i] + "/" + j + "/"
sbs_path = jobpath + "/" + startscript
sbp_path = jobpath + "/" + productionscript
shutil.copy('sb_start_temp', sbs_path)
shutil.copy('sb_prod_temp' , sbp_path)
for pyfile in glob.glob(r'Setup_and_Config/*.py' ):
shutil.copy2( pyfile, jobpath )
for conffile in glob.glob(r'Setup_and_Config/*.conf' ):
shutil.copy2(conffile, jobpath)
## remove tempfiles.
os.remove('sb_start_temp')
os.remove('sb_prod_temp')
print("\n -done populating directories") | 5,358,014 |
def user_permitted_tree(user):
"""Generate a dictionary of the representing a folder tree composed of
the elements the user is allowed to acccess.
"""
# Init
user_tree = {}
# Dynamically collect permission to avoid hardcoding
# Note: Any permission to an element is the same as read permission so
# they are all included.
file_perm_list = [
f'data_driven_acquisition.{perm}' for perm in
get_perms_for_model('data_driven_acquisition.File').values_list(
'codename', flat=True)
]
folder_perm_list = [
f'data_driven_acquisition.{perm}' for perm in
get_perms_for_model('data_driven_acquisition.Folder').values_list(
'codename', flat=True)
]
# Collect all permistted elements
permitted_folders = get_objects_for_user(
user,
folder_perm_list,
any_perm=True).all()
permitted_files = get_objects_for_user(
user,
file_perm_list,
any_perm=True).all()
# Add all permitted folders to the user tree with their content and parents.
for folder_obj in permitted_folders:
# Get the folder content as tree
tree = get_folder_tree(folder_obj)
# Try to place the tree in the user tree
if not place_folder_in_tree(user_tree, folder_obj, tree):
# The parent is not in the user tree.
# Cresting the parent folder at root level and then completing the
# climb to the package level, mergin as needed.
user_tree[folder_obj] = tree
user_tree = climb_to_package(user_tree, folder_obj)
# Add all permitted files to the user tree with theirs parents.
for file_obj in permitted_files:
# Add to user tree iof the parent folder is already there.
if not place_file_in_tree(user_tree, file_obj):
# Cold not find the parent folder in the tree.
# Creating a base tree with the parent folder
# the file at root level and the climbing up to the Package
# Merging when required
tree = {
"files": [file_obj, ]
}
user_tree[file_obj.parent] = tree
user_tree = climb_to_package(user_tree, file_obj.parent)
return user_tree | 5,358,015 |
def upgrade():
"""
Run upgrade
"""
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_domain_proxy_logs_cbsd_serial_number', table_name='domain_proxy_logs')
op.drop_index('ix_domain_proxy_logs_created_date', table_name='domain_proxy_logs')
op.drop_index('ix_domain_proxy_logs_fcc_id', table_name='domain_proxy_logs')
op.drop_index('ix_domain_proxy_logs_response_code', table_name='domain_proxy_logs')
op.drop_table('domain_proxy_logs')
# ### end Alembic commands ### | 5,358,016 |
def __save_roc(y_true, y_pred, output_dir):
"""
Creates an ROC curve with AUC for the model
:param y_true: The actual phenotypes for the test data
:param y_pred: The predicted phenotypes for the test data
:param output_dir: The directory to save the ROC curve in
"""
fpr, tpr, thresholds = roc_curve(y_true, y_pred)
roc_auc = auc(fpr, tpr)
# Plot code referenced from http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.3f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC')
plt.legend(loc="lower right")
plt.savefig(path.join(output_dir, 'roc.png'))
plt.close() | 5,358,017 |
def extract_failure(d):
"""
Returns the failure object the given deferred was errback'ed with.
If the deferred has result, not a failure a `ValueError` is raised.
If the deferred has no result yet a :class:`NotCalledError` is raised.
"""
if not has_result(d):
raise NotCalledError()
else:
result = []
def callback(value):
result.append(value)
d.addBoth(callback)
result = result[0]
if isinstance(result, failure.Failure):
return result
else:
raise ValueError("Deferred was called back with a value: %r" % result) | 5,358,018 |
def convert_where_clause(clause: dict) -> str:
"""
Convert a dictionary of clauses to a string for use in a query
Parameters
----------
clause : dict
Dictionary of clauses
Returns
-------
str
A string representation of the clauses
"""
out = "{"
for key in clause.keys():
out += "{}: ".format(key)
#If the type of the right hand side is string add the string quotes around it
if type(clause[key]) == str:
out += '"{}"'.format(clause[key])
else:
out += "{}".format(clause[key])
out += ","
out += "}"
return out | 5,358,019 |
def updatereqs():
"""Update services using requirements.txt and requirements.sh"""
commands.updatereqs() | 5,358,020 |
def clear():
"""
Deletes the cache.
"""
cprint('Clearing cache', 'yellow')
shutil.rmtree(_cache_path, ignore_errors=True) | 5,358,021 |
def all_metadata_async():
"""Retrieves all available metadata for an instance async"""
loop = trollius.get_event_loop()
res = loop.run_until_complete(call())
return res | 5,358,022 |
def median_std_from_ma(data: np.ma, axis=0):
"""On the assumption that there are bit-flips in the *data*,
attempt to find a value that might represent the standard
deviation of the 'real' data. The *data* object must be a
numpy masked array.
The value of *axis* determines which way the data are handled.
The default is 0 to scan vertially to accumulate statistics for
columns. In this case, only those columns with the most unmasked
data are evaluated. For them, the standard deviation is found
for each column, and the returned value is the median of those
standard deviations. If *axis* is 1, then this is applied to
rows, not columns.
"""
valid_points = data.count(axis=axis)
std_devs = np.std(data, axis=axis)
return median_std(valid_points, std_devs) | 5,358,023 |
def __check_value_range(x: int) -> bool:
"""
Checks if integer is in valid value range to
be a coordinate for Tic-Tac-Toe.
"""
if x < 1 or x > 3:
print(__standard_error_text +
"Coordinates have to be between 1 and 3.\n")
return False
return True | 5,358,024 |
def find_option(opt):
"""
This function checks for option defined with optcode;
it could be implemented differently - by checking entries in world.cliopts
"""
# received msg from client must not be changed - make a copy of it
tmp = world.climsg[world.clntCounter].copy()
# 0 - ether, 1 - ipv6, 2 - udp, 3 - dhcpv6, 4 - opts
if type(tmp) == Ether:
tmp = tmp.getlayer(4)
else:
tmp = tmp.getlayer(3)
while tmp:
if tmp.optcode == int(opt):
return True
tmp = tmp.payload
return False | 5,358,025 |
def generic_add_model_components(
m,
d,
reserve_zone_param,
reserve_zone_set,
reserve_generator_set,
generator_reserve_provision_variable,
total_reserve_provision_expression,
):
"""
Generic treatment of reserves. This function creates model components
related to a particular reserve requirement, including
1) an expression aggregating generator-level provision to total provision
:param m:
:param d:
:param reserve_zone_param:
:param reserve_zone_set:
:param reserve_generator_set:
:param generator_reserve_provision_variable:
:param total_reserve_provision_expression:
:return:
"""
# Reserve generators operational generators in timepoint
# This will be the intersection of the reserve generator set and the set of
# generators operational in the timepoint
op_set = str(reserve_generator_set) + "_OPERATIONAL_IN_TIMEPOINT"
setattr(
m,
op_set,
Set(
m.TMPS,
initialize=lambda mod, tmp: getattr(mod, reserve_generator_set)
& mod.OPR_PRJS_IN_TMP[tmp],
),
)
# Reserve provision
def total_reserve_rule(mod, ba, tmp):
return sum(
getattr(mod, generator_reserve_provision_variable)[g, tmp]
for g in getattr(mod, op_set)[tmp]
if getattr(mod, reserve_zone_param)[g] == ba
)
setattr(
m,
total_reserve_provision_expression,
Expression(getattr(m, reserve_zone_set), m.TMPS, rule=total_reserve_rule),
) | 5,358,026 |
def return_all_content(content):
"""Help function to return untruncated stripped content."""
return mark_safe(str(content).replace('><', '> <')) if content else None | 5,358,027 |
def get_trailing_returns(uid):
""" Get trailing return chart """
connection = pymysql.connect(host=DB_SRV,
user=DB_USR,
password=DB_PWD,
db=DB_NAME,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor(pymysql.cursors.SSCursor)
sql = "SELECT instruments.fullname, instruments.is_benchmark, "+\
"instruments.market, instruments.symbol, instruments.asset_class "+\
"FROM instruments JOIN symbol_list ON symbol_list.symbol = instruments.symbol "+\
"WHERE symbol_list.uid=" + str(uid)
cursor.execute(sql)
res = cursor.fetchall()
for row in res:
fullname = row[0].replace("'", "")
is_benchmark = row[1]
market = row[2]
symbol_is_portf = row[3]
asset_class = row[4]
if symbol_is_portf.find(get_portf_suffix()) > -1:
sql = "SELECT date FROM chart_data WHERE uid=" + str(uid) + " ORDER BY date DESC LIMIT 1"
else:
sql = "SELECT price_instruments_data.date FROM price_instruments_data JOIN symbol_list "+\
"ON symbol_list.symbol = price_instruments_data.symbol "+\
"WHERE symbol_list.uid=" + str(uid) +" ORDER BY date DESC LIMIT 1"
cursor.execute(sql)
res = cursor.fetchall()
as_date = ''
l_as_date = ''
for row in res:
as_date = row[0]
if as_date != '':
l_as_date = 'Trailing returns as of '+ as_date.strftime("%d-%b-%Y")
font_size = 10
l_y1 = '1-Year'
l_m6 = '6-month'
l_m3 = '3-month'
l_m1 = '1-month'
l_w1 = '1-week'
minb = 0
mini = 0
maxb = 0
maxi = 0
benchmark_header = ''
benchmark_data_y1 = ''
benchmark_data_m6 = ''
benchmark_data_m3 = ''
benchmark_data_m1 = ''
benchmark_data_w1 = ''
if not is_benchmark:
sql = "SELECT symbol_list.uid, instruments.fullname "+\
"FROM symbol_list JOIN instruments "+\
"ON symbol_list.symbol = instruments.symbol "+\
"WHERE instruments.market='"+\
str(market) +"' AND instruments.asset_class='"+\
str(asset_class) +"' AND instruments.is_benchmark=1"
cursor.execute(sql)
res = cursor.fetchall()
benchmark_uid = 0
for row in res:
benchmark_uid = row[0]
benchmark_fullname = row[1].replace("'", "")
if benchmark_uid != 0:
benchmark_header = ", ' " +\
benchmark_fullname +\
" ', {type: 'string', role: 'annotation'}"
benchmark_data_y1 = ','+ get_chart_data(benchmark_uid, 'y1')
benchmark_data_m6 = ','+ get_chart_data(benchmark_uid, 'm6')
benchmark_data_m3 = ','+ get_chart_data(benchmark_uid, 'm3')
benchmark_data_m1 = ','+ get_chart_data(benchmark_uid, 'm1')
benchmark_data_w1 = ','+ get_chart_data(benchmark_uid, 'w1')
minb = get_minmax(benchmark_uid, 'min')
maxb = get_minmax(benchmark_uid, 'max')
data = ''+\
'["'+ l_y1 + '",' + get_chart_data(uid, 'y1') + benchmark_data_y1 +']' + ',' +\
'["'+ l_m6 + '",' + get_chart_data(uid, 'm6') + benchmark_data_m6 + ']' + ',' +\
'["'+ l_m3 + '",' + get_chart_data(uid, 'm3') + benchmark_data_m3 + ']' + ',' +\
'["'+ l_m1 + '",' + get_chart_data(uid, 'm1') + benchmark_data_m1 + ']' + ',' +\
'["'+ l_w1 + '",' + get_chart_data(uid, 'w1') + benchmark_data_w1 + ']'
mini = get_minmax(uid, 'min')
maxi = get_minmax(uid, 'max')
if minb < mini:
mini = minb
if maxb > maxi:
maxi = maxb
header = " ['x', ' " +\
fullname + " ', {type: 'string', role: 'annotation'}"+\
benchmark_header +" ],"
chart_content = "" +\
"<script>" +\
"google.charts.load('current', {packages: ['corechart', 'bar']});" +\
"google.charts.setOnLoadCallback(drawAnnotations);" +\
"function drawAnnotations() {" +\
" var data = google.visualization.arrayToDataTable([" +\
header +\
data +\
" ]);" +\
" var options = {" +\
" fontSize: "+ str(font_size) + "," +\
" legend: {position:'top', textStyle: {color:"+\
theme_return_this("'black'", "'white'") +"} }," +\
" title: ''," +\
" backgroundColor: 'transparent',"+\
" chartArea: {width: '50%'}," +\
" annotations: {" +\
" alwaysOutside: true," +\
" textStyle: {" +\
" auraColor: 'none'," +\
" color: '#555'" +\
" }," +\
" boxStyle: {" +\
" stroke: '#ccc'," +\
" strokeWidth: 1," +\
" gradient: {" +\
" color1: 'yellow'," +\
" color2: 'white'," +\
" x1: '0%', y1: '0%'," +\
" x2: '100%', y2: '100%'" +\
" }" +\
" }" +\
" }," +\
" series: {0:{color: "+\
theme_return_this("'blue'", "'orange'") +"}, 1:{color: '#c9d6ea'} }," +\
" chartArea: {width:'80%',height:'80%'}," +\
" hAxis: {" +\
" title: '" + l_as_date + "', " +\
" titleTextStyle:{ color:"+\
theme_return_this("'black'", "'white'") +"},"+\
" viewWindow:{min:"+\
str(mini) +",max:"+\
str(maxi) +"}," +\
" gridlines: { color: 'transparent' },"+\
" textStyle: { color: "+\
theme_return_this("'black'", "'white'") +" } "+\
" }," +\
" vAxis: {" +\
" title: '', " +\
" textStyle: { color: "+\
theme_return_this("'black'", "'white'") +" } "+\
" }" +\
" };" +\
" var chart = "+\
"new google.visualization.BarChart(document.getElementById('trail_chart'));" +\
" chart.draw(data, options);" +\
" }" +\
" </script>" +\
" <div id='trail_chart' class='sa-chart-hw-290'></div>"
cursor.close()
connection.close()
return chart_content | 5,358,028 |
def bitsNotSet(bitmask, maskbits):
"""
Given a bitmask, returns True where any of maskbits are set
and False otherwise.
Parameters
----------
bitmask : ndarray
Input bitmask.
maskbits : ndarray
Bits to check if set in the bitmask
"""
goodLocs_bool = np.zeros(bitmask.shape).astype(bool)
for m in maskbits:
bitind = bm.bit_set(m, bitmask)
goodLocs_bool[bitind] = True
return goodLocs_bool | 5,358,029 |
def plot(foo, x, y):
"""x, y are tuples of 3 values: xmin, xmax, xnum"""
np_foo = np.vectorize(foo)
x_space = np.linspace(*x)
y_space = np.linspace(*y)
xx, yy = np.meshgrid(x_space, y_space)
xx = xx.flatten()
yy = yy.flatten()
zz = np_foo(xx, yy)
num_x = x[-1]
num_y = y[-1]
points = np.array([xx, yy, zz]).T
scale = coin.SoScale()
scale.scaleFactor.setValue(1, 1, abs(x[1] - x[0]) / abs(max(zz) - min(zz)))
return [scale, simple_quad_mesh(points, num_x, num_y)] | 5,358,030 |
def _StructPackEncoder(wire_type, format):
"""Return a constructor for an encoder for a fixed-width field.
Args:
wire_type: The field's wire type, for encoding tags.
format: The format string to pass to struct.pack().
"""
value_size = struct.calcsize(format)
def SpecificEncoder(field_number, is_repeated, is_packed):
local_struct_pack = struct.pack
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value) * value_size)
for element in value:
write(local_struct_pack(format, element))
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
write(local_struct_pack(format, element))
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return write(local_struct_pack(format, value))
return EncodeField
return SpecificEncoder | 5,358,031 |
def transition_temperature(wavelength):
"""
To get temperature of the transition in K
Wavelength in micros
T = h*f / kB
"""
w = u.Quantity(wavelength, u.um)
l = w.to(u.m)
c = _si.c.to(u.m / u.s)
h = _si.h.to(u.eV * u.s)
kb = _si.k_B.to(u.eV / u.K)
f = c/l
t = h*f/kb
return t | 5,358,032 |
def truncate_string(string: str, max_length: int) -> str:
"""
Truncate a string to a specified maximum length.
:param string: String to truncate.
:param max_length: Maximum length of the output string.
:return: Possibly shortened string.
"""
if len(string) <= max_length:
return string
else:
return string[:max_length] | 5,358,033 |
def geoinfo_from_ip(ip: str) -> dict: # pylint: disable=invalid-name
"""Looks up the geolocation of an IP address using ipinfo.io
Example ipinfo output:
{
"ip": "1.1.1.1",
"hostname": "one.one.one.one",
"anycast": true,
"city": "Miami",
"region": "Florida",
"country": "US",
"loc": "25.7867,-80.1800",
"org": "AS13335 Cloudflare, Inc.",
"postal": "33132",
"timezone": "America/New_York",
"readme": "https://ipinfo.io/missingauth"
}
"""
valid_ip = ip_address(ip)
url = f"https://ipinfo.io/{valid_ip}/json"
resp = requests.get(url)
if resp.status_code != 200:
raise Exception(f"Geo lookup failed: GET {url} returned {resp.status_code}")
geoinfo = json.loads(resp.text)
return geoinfo | 5,358,034 |
async def test_camera_image(
hass: HomeAssistant,
mock_entry: MockEntityFixture,
simple_camera: tuple[Camera, str],
):
"""Test retrieving camera image."""
mock_entry.api.get_camera_snapshot = AsyncMock()
await async_get_image(hass, simple_camera[1])
mock_entry.api.get_camera_snapshot.assert_called_once() | 5,358,035 |
def mpileup2acgt(pileup, quality, depth, reference, qlimit=53,
noend=False, nostart=False):
"""
This function was written by Francesco Favero,
from: sequenza-utils pileup2acgt
URL: https://bitbucket.org/sequenza_tools/sequenza-utils
original code were protected under GPLv3 license.
Parse the mpileup format and return the occurrence of
each nucleotides in the given positions.
pileup format:
1 chr
2 1-based coordinate
3 reference base
4 depth
5 read bases
6 base qualities
7 mapping qualities
# argument pileup = column-6
"""
nucleot_dict = {'A': 0, 'C': 0, 'G': 0, 'T': 0}
strand_dict = {'A': 0, 'C': 0, 'G': 0, 'T': 0}
n = 0
block = {'seq': '', 'length': 0}
start = False
del_ins = False
l_del_ins = ''
last_base = None
ins_del_length = 0
for base in pileup:
if block['length'] == 0:
if base == '$':
if noend:
if last_base:
nucleot_dict[last_base.upper()] -= 1
if last_base.isupper():
strand_dict[last_base.upper()] -= 1
last_base = None
elif base == '^':
start = True
block['length'] += 1
block['seq'] = base
elif base == '+' or base == '-':
del_ins = True
block['length'] += 1
block['seq'] = base
elif base == '.' or base == ',': ## . on froward, , on reverse
if ord(quality[n]) >= qlimit:
nucleot_dict[reference] += 1
if base == '.':
strand_dict[reference] += 1
last_base = reference
else:
last_base = reference.lower()
else:
last_base = None
n += 1
elif base.upper() in nucleot_dict:
if ord(quality[n]) >= qlimit:
nucleot_dict[base.upper()] += 1
if base.isupper():
strand_dict[base.upper()] += 1
last_base = base
else:
last_base = None
n += 1
else:
n += 1
else:
if start:
block['length'] += 1
block['seq'] += base
if block['length'] == 3:
if not nostart:
if base == '.' or base == ',':
if ord(quality[n]) >= qlimit:
nucleot_dict[reference] += 1
if base == '.':
strand_dict[reference] += 1
elif base.upper() in nucleot_dict:
if ord(quality[n]) >= qlimit:
nucleot_dict[base.upper()] += 1
if base.isupper():
strand_dict[base.upper()] += 1
block['length'] = 0
block['seq'] = ''
start = False
last_base = None
n += 1
elif del_ins:
if base.isdigit():
l_del_ins += base
block['seq'] += base
block['length'] += 1
else:
ins_del_length = int(l_del_ins) + 1 + len(l_del_ins)
block['seq'] += base
block['length'] += 1
if block['length'] == ins_del_length:
block['length'] = 0
block['seq'] = ''
l_del_ins = ''
# ins_del = False
ins_del_length = 0
nucleot_dict['Z'] = [strand_dict['A'], strand_dict['C'], strand_dict['G'],
strand_dict['T']]
return nucleot_dict | 5,358,036 |
def normalize_missing(xs):
"""Normalize missing values to avoid string 'None' inputs.
"""
if isinstance(xs, dict):
for k, v in xs.items():
xs[k] = normalize_missing(v)
elif isinstance(xs, (list, tuple)):
xs = [normalize_missing(x) for x in xs]
elif isinstance(xs, basestring):
if xs.lower() in ["none", "null"]:
xs = None
elif xs.lower() == "true":
xs = True
elif xs.lower() == "false":
xs = False
return xs | 5,358,037 |
def ensure_command_line_tools_are_installed(command):
"""
Determine if the Xcode command line tools are installed.
If they are not installed, an exception is raised; in addition, a OS dialog
will be displayed prompting the user to install Xcode.
:param command: The command that needs to perform the verification check.
:param min_version: The minimum allowed version of Xcode, specified as a
tuple of integers (e.g., (11, 2, 1)). Default: ``None``, meaning there
is no minimum version.
"""
# We determine if the command line tools are installed by running:
#
# xcode-select --install
#
# If that command exits with status 0, it means the tools are *not*
# installed; but a dialog will be displayed prompting an installation.
#
# If it returns a status code of 1, the tools are already installed
# and outputs the message "command line tools are already installed"
#
# Any other status code is a problem.
try:
command.subprocess.check_output(
['xcode-select', '--install'],
stderr=subprocess.STDOUT
)
raise BriefcaseCommandError("""
Xcode command line developer tools are not installed.
You should be shown a dialog prompting you to install Xcode and the
command line tools. Select "Install" to install the command line developer
tools.
Re-run Briefcase once that installation is complete.
""")
except subprocess.CalledProcessError as e:
if e.returncode != 1:
print("""
*************************************************************************
** WARNING: Unable to determine if Xcode is installed **
*************************************************************************
Briefcase will proceed, assuming everything is OK. If you experience
problems, this is almost certainly the cause of those problems.
Please report this as a bug at:
https://github.com/beeware/briefcase/issues/new
In your report, please including the output from running:
xcode-select --install
from the command prompt.
*************************************************************************
""") | 5,358,038 |
def ini_inventory(nhosts=10):
"""Return a .INI representation of inventory"""
output = list()
inv_list = generate_inventory(nhosts)
for group in inv_list.keys():
if group == '_meta':
continue
# output host groups
output.append('[%s]' % group)
for host in inv_list[group].get('hosts', []):
output.append(host)
output.append('') # newline
# output child groups
output.append('[%s:children]' % group)
for child in inv_list[group].get('children', []):
output.append(child)
output.append('') # newline
# output group vars
output.append('[%s:vars]' % group)
for k, v in inv_list[group].get('vars', {}).items():
output.append('%s=%s' % (k, v))
output.append('') # newline
return '\n'.join(output) | 5,358,039 |
def get_invalid_bunny_revival_dungeons():
"""
Dungeon regions that can't be bunny revived from without superbunny state.
"""
yield 'Tower of Hera (Bottom)'
yield 'Swamp Palace (Entrance)'
yield 'Turtle Rock (Entrance)'
yield 'Sanctuary' | 5,358,040 |
def get_crp_constrained_partition_counts(Z, Cd):
"""Compute effective counts at each table given dependence constraints.
Z is a dictionary mapping customer to table, and Cd is a list of lists
encoding the dependence constraints.
"""
# Compute the effective partition.
counts = defaultdict(int)
seen = set()
# Table assignment of constrained customers.
for block in Cd:
seen.update(block)
customer = block[0]
table = Z[customer]
counts[table] += 1
# Table assignment of unconstrained customers.
for customer in Z:
if customer in seen:
continue
table = Z[customer]
counts[table] += 1
return counts | 5,358,041 |
def create_doc():
"""Test basic layer creation and node creation."""
# Stupid tokenizer
tokenizer = re.compile(r"[a-zA-Z]+|[0-9]+|[^\s]")
doc = Document()
main_text = doc.add_text("main", "This code was written in Lund, Sweden.")
# 01234567890123456789012345678901234567
# 0 1 2 3
token = doc.add_layer("token", text=main_text.spantype)
for m in tokenizer.finditer(str(main_text)):
token.add(text=main_text[m.start():m.end()])
named_entity = doc.add_layer("named_entity", text=main_text.spantype, cls=T.string)
named_entity.add(text=main_text[25:29], cls="GPE")
named_entity.add(text=main_text[31:37], cls="GPE")
return doc | 5,358,042 |
def jissue_get_chunked(jira_in, project, issue_max_count, chunks=100):
""" This method is used to get the issue list with references,
in case the number of issues is more than 1000
"""
result = []
# step and rest simple calc
step = issue_max_count / chunks
rest = issue_max_count % chunks
# iterate the issue gathering
for i in range(step):
result.extend(jissue_query(jira_in, project, chunks*i, chunks))
result.extend(jissue_query(jira_in, project, issue_max_count-rest, rest))
return result | 5,358,043 |
def negative_height_check(height):
"""Check the height return modified if negative."""
if height > 0x7FFFFFFF:
return height - 4294967296
return height | 5,358,044 |
def turn_off_plotting(namespace=globals()):
"""Call turn_off_plotting(globals()) to turn off all plotting."""
use(namespace['plt'], namespace, True) | 5,358,045 |
def test_basic_parse():
"""Test parsing a basic expression."""
instructions = parse(tokenise('obj allow user edit'))
assert len(instructions) == 1
assert instructions == [('obj', 'allow', 'user', 'edit')] | 5,358,046 |
def main():
""" Main function """
args=get_args()
file_args = args.file
print(args.num)
if args.num:
file_num = args.num
else:
file_num=10
for fh in file_args:
num_lines = 0
print(f"{fh.name}")
for line in fh:
y = fh.readline()
print(y, end='')
num_lines += 1
if num_lines == file_num:
print()
break | 5,358,047 |
def get_visemenet(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create VisemeNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
net = VisemeNet(
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net | 5,358,048 |
def linked_gallery_view(request, obj_uuid):
"""
View For Permalinks
"""
gallery = get_object_or_404(Gallery, uuid=obj_uuid)
images = gallery.images.all().order_by(*gallery.display_sort_string)
paginator = Paginator(images, gallery.gallery_pagination_count)
page = request.GET.get('page')
try:
imgs = paginator.page(page)
except PageNotAnInteger:
imgs = paginator.page(1)
except EmptyPage:
imgs = paginator.page(paginator.num_pages)
context = {
"images": imgs,
"gallery": gallery,
"gallery_name": gallery.title
}
return render(request, 'image_list.html', context) | 5,358,049 |
def _gm_cluster_assign_id(gm_list, track_id, num_tracks, weight_threshold,
z_dim, max_id, max_iteration=1000):
"""The cluster algorithm that assign a new ID to the track
Args:
gm_list (:obj:`list`): List of ``GaussianComponent`` representing
current multi-target PHD density.
track_id (:obj:`int`): Current track id.
num_tracks (:obj:`int`): The number of tracks that this list of Gaussian
components need to split into.
weight_threshold (:obj:`float`): Initial weight threshold for each newly
spawned track.
z_dim (:obj:`int`): The dimensionality of measurement space.
max_id (:obj:`int`): The next track ID number that can be assigned.
max_iteration (:obj:`int`): Max number of iterations in case that the
clustering algorithm does not converge and oscillates.
Returns:
A `list` of Gaussian components with updated track ID and the next track
ID that can be assigned to new tracks in the future.
"""
clusters_mean = np.random.uniform(0, 1, (num_tracks, z_dim))
previous_clusters_mean = None
cluster_gms = [[] for i in range(num_tracks)]
count = 0
while np.any(clusters_mean != previous_clusters_mean) and \
count < max_iteration:
previous_clusters_mean = np.copy(clusters_mean)
# There n_tracks means, calculate the distance between each track,
# and sorted from high to low
gm_distance_matrix = _gm_cluster_distance(gm_list=gm_list,
clusters_mean=clusters_mean,
num_tracks=num_tracks,
z_dim=z_dim)
# Assign GM to each mean where the weight of each cluster equals or
# just higher than the weight threshold.
cluster_gms = _gm_group_cluster(gm_list=gm_list,
distance_matrix=gm_distance_matrix,
weight_threshold=weight_threshold)
# Update mean
for i in range(num_tracks):
new_mean = np.zeros((z_dim,), dtype=np.float32)
new_weight = 0.
for gm in cluster_gms[i]:
new_mean += gm.mean.flatten()[0:z_dim] * gm.weight
new_weight += gm.weight
if new_weight == 0.:
new_weight = 1
clusters_mean[i, :] = new_mean / new_weight
# Update count
count += 1
# Assign ID to each cluster
for i in range(num_tracks):
# For every new track, start counting with max_id
if track_id == 0 and i == 0:
for gm in cluster_gms[i]:
gm.mean[-1, :] = max_id
max_id += 1
elif i != 0:
for gm in cluster_gms[i]:
gm.mean[-1, :] = max_id
max_id += 1
return gm_list, max_id | 5,358,050 |
def get_bprop_scatter_nd(self):
"""Generate bprop for ScatterNd"""
op = P.GatherNd()
def bprop(indices, x, shape, out, dout):
return zeros_like(indices), op(dout, indices), zeros_like(shape)
return bprop | 5,358,051 |
def myHullNumber() -> int:
"""Возвращает бортовой номер робота.""" | 5,358,052 |
def dates(bot, mask, target, args):
"""Show the planned dates within the next days
%%dates
"""
config = dates_configuration(bot)
now = datetime.utcnow().replace(hour=0,
minute=0,
second=0,
microsecond=0)
yield from _update_cache(bot)
yield from output_dates(bot,
target,
now,
now + timedelta(days=config['list_days']),
config['filter_location']) | 5,358,053 |
def progress_update_r(**kwargs):
""" Receiver to update a progressbar
"""
index = kwargs.get('index')
if index:
update_pbar(index) | 5,358,054 |
def get_md5(location: str, ignore_hidden_files: bool=True) -> Optional[str]:
"""
Gets an MD5 checksum of the file or directory at the given location.
:param location: location of file or directory
:param ignore_hidden_files: whether hidden files should be ignored when calculating an checksum for a directory
:return: the MD5 checksum or `None` if the given location does not exist
"""
if not os.path.exists(location):
return None
if os.path.isfile(location):
with open(location, "rb") as file:
content = file.read()
return hashlib.md5(content).hexdigest()
else:
return dirhash(location, "md5", ignore_hidden=ignore_hidden_files) | 5,358,055 |
def set_output_image_folder(folder: str) -> None:
"""
Service that sets up the location of image output folder
:param folder: location to set
:return: None
"""
config_main.APPL_SAVE_LOCATION = folder
log_setup_info_to_console('IMAGE FOLDER OUTPUT:{}'.format(os.path.join(os.getcwd(), config_main.APPL_INPUT_DIR))) | 5,358,056 |
def delete_policy_rule(policy_key, key, access_token):
"""
Deletes a policy rule with the given key.
Returns the response JSON.
See http://localhost:8080/docs#/Policy/delete_rule_api_v1_policy__policy_key__rule__rule_key__delete
"""
return requests.delete(
f"{FIDESOPS_URL}/api/v1/policy/{policy_key}/rule/{key}",
headers=oauth_headers(access_token=access_token),
) | 5,358,057 |
def knn_search_parallel(data, K, qin=None, qout=None, tree=None, t0=None, eps=None, leafsize=None, copy_data=False):
""" find the K nearest neighbours for data points in data,
using an O(n log n) kd-tree, exploiting all logical
processors on the computer. if eps <= 0, it returns the distance to the kth point. On the other hand, if eps > 0 """
# print("starting the parallel search")
if eps is not None:
assert data.shape[0]==len(eps)
# build kdtree
if copy_data:
dataCopy = data.copy()
# print('copied data')
else:
dataCopy = data
if tree is None and leafsize is None:
tree = ss.cKDTree(dataCopy)
elif tree is None:
tree = ss.cKDTree(dataCopy, leafsize=leafsize)
if t0 is not None:
print('time to tree formation: %f' %(clock()-t0))
ndata = data.shape[0]
nproc = 20
# print('made the tree')
# compute chunk size
chunk_size = int(data.shape[0] / (4*nproc))
chunk_size = 100 if chunk_size < 100 else chunk_size
if qin==None or qout==None:
# set up a pool of processes
qin = processing.Queue(maxsize=int(ndata/chunk_size))
qout = processing.Queue(maxsize=int(ndata/chunk_size))
if eps is None:
pool = [processing.Process(target=__remote_process_query,
args=(rank, qin, qout, tree, K, leafsize))
for rank in range(nproc)]
else:
pool = [processing.Process(target=__remote_process_ball,
args=(rank, qin, qout, tree, leafsize))
for rank in range(nproc)]
for p in pool: p.start()
# put data chunks in input queue
cur, nc = 0, 0
while 1:
_data = data[cur:cur+chunk_size, :]
if _data.shape[0] == 0: break
if eps is None:
qin.put((nc,_data))
else:
_eps = eps[cur:cur+chunk_size]
qin.put((nc,_data,_eps))
cur += chunk_size
nc += 1
# read output queue
knn = []
while len(knn) < nc:
knn += [qout.get()]
# avoid race condition
_knn = [n for i,n in sorted(knn)]
knn = []
for tmp in _knn:
knn += [tmp]
# terminate workers
for p in pool: p.terminate()
if eps is None:
output = np.zeros((sum([ x.shape[0] for x in knn]),knn[0].shape[1]))
else:
output = np.zeros(sum([ len(x) for x in knn]))
outputi = 0
for x in knn:
if eps is None:
nextVal = x.shape[0]
else:
nextVal = len(x)
output[outputi:(outputi+nextVal)] = x
outputi += nextVal
return output | 5,358,058 |
def find_absolute_reference(
target: str,
domain: str,
remote_url: urllib.parse.ParseResult,
https_mode: _constants.HTTPSMode = _constants.DEFAULT_HTTPS_MODE,
base: typing.Optional[urllib.parse.ParseResult] = None
) -> typing.Optional[str]:
"""
Transform the partly defined target string to a full URL
The implementation of this method is partly based
on RFC 3986, section 5.1 and 5.2 (with modifications).
:param target: anything that seems to be an URI, relative or absolute
:param domain: remote network location name (usually domain name)
:param remote_url: remote URL that was used before, i.e. the referrer
to the new target (and most likely also the origin of the reference)
:param https_mode: definition how to treat the HTTPS mode (for the scheme)
:param base: optional base URI used to correctly find absolute paths
for relative resource indicators (uses the remote URL if absent)
:return: a full URL that can be used to request further resources,
if possible and the target matched the criteria (otherwise None);
one of those criteria is the same remote netloc, which is enforced
to limit the width of our requests to not query the whole web
"""
def merge_paths(a: urllib.parse.ParseResult, b: str) -> str:
"""
Merge two paths, where `a` should be a base and `b` should be a reference
"""
if not b.startswith("/"):
b = "/" + b
if a.netloc != "" and a.path == "":
return b
return "/".join(a.path.split("/")[:-1]) + b
url = urllib.parse.urlparse(target)
scheme, netloc, path, params, query, fragment = url
# TODO: section 5.1, order of precedence
if base is None:
base = remote_url
# Unknown schemes are ignored (e.g. mailto:) and a given schema indicates
# an absolute URL which should not be processed (only filtered)
if scheme != "" and scheme.lower() not in ("http", "https"):
return
elif scheme == "":
if https_mode == _constants.HTTPSMode.DEFAULT:
scheme = remote_url.scheme
elif https_mode in (_constants.HTTPSMode.HTTPS_ONLY, _constants.HTTPSMode.HTTPS_FIRST):
scheme = "https"
elif https_mode == _constants.HTTPSMode.HTTP_ONLY:
scheme = "http"
elif netloc != "" and netloc.lower() == domain.lower():
return urllib.parse.urlunparse(
(scheme, netloc, remove_dot_segments(path), params, query, "")
)
# Other network locations are ignored (so we don't traverse the whole web)
if netloc != "" and netloc.lower() != domain.lower():
return
elif netloc != "":
return urllib.parse.urlunparse(
(scheme, netloc, remove_dot_segments(path), params, query, "")
)
netloc = domain
# Determine the new path
if path == "":
path = base.path
if query == "":
query = base.query
else:
if path.startswith("/"):
path = remove_dot_segments(path)
else:
path = remove_dot_segments(merge_paths(base, path))
return urllib.parse.urlunparse(
(scheme, netloc, remove_dot_segments(path), params, query, "")
) | 5,358,059 |
def compute_vectors_from_coordinates(
x: np.ndarray, y: np.ndarray, fps: int = 1
) -> Tuple[Vector, Vector, Vector, Vector, np.array]:
"""
Given the X and Y position at each frame -
Compute vectors:
i. velocity vector
ii. unit tangent
iii. unit norm
iv. acceleration
and scalar quantities:
i. speed
ii. curvature
See: https://stackoverflow.com/questions/28269379/curve-curvature-in-numpy
"""
# compute velocity vector
dx_dt = np.gradient(x)
dy_dt = np.gradient(y)
velocity = (
np.array([[dx_dt[i], dy_dt[i]] for i in range(dx_dt.size)]) * fps
)
# compute scalr speed vector
ds_dt = np.sqrt(dx_dt * dx_dt + dy_dt * dy_dt)
# get unit tangent vector
tangent = np.array([1 / ds_dt] * 2).transpose() * velocity
unit_tangent = tangent / np.apply_along_axis(
np.linalg.norm, 1, tangent
).reshape(len(tangent), 1)
# get unit normal vector
tangent_x = tangent[:, 0]
tangent_y = tangent[:, 1]
deriv_tangent_x = np.gradient(tangent_x)
deriv_tangent_y = np.gradient(tangent_y)
dT_dt = np.array(
[
[deriv_tangent_x[i], deriv_tangent_y[i]]
for i in range(deriv_tangent_x.size)
]
)
length_dT_dt = np.sqrt(
deriv_tangent_x * deriv_tangent_x + deriv_tangent_y * deriv_tangent_y
)
normal = np.array([1 / length_dT_dt] * 2).transpose() * dT_dt
# get acceleration and curvature
d2s_dt2 = np.gradient(ds_dt)
d2x_dt2 = np.gradient(dx_dt)
d2y_dt2 = np.gradient(dy_dt)
curvature = (
np.abs(d2x_dt2 * dy_dt - dx_dt * d2y_dt2)
/ (dx_dt * dx_dt + dy_dt * dy_dt) ** 1.5
)
t_component = np.array([d2s_dt2] * 2).transpose()
n_component = np.array([curvature * ds_dt * ds_dt] * 2).transpose()
acceleration = t_component * tangent + n_component * normal
return (
Vector(velocity),
Vector(tangent),
Vector(
-unit_tangent[:, 1], unit_tangent[:, 0]
), # normal as rotated tangent
Vector(acceleration),
curvature,
) | 5,358,060 |
def get_now(pair):
"""
Return last info for crypto currency pair
:param pair: ex: btc-ltc
:return:
"""
info = {'marketName': pair, 'tickInterval': 'oneMin'}
return requests.get('https://bittrex.com/Api/v2.0/pub/market/GetLatestTick', params=info).json() | 5,358,061 |
def resize_and_convert_images_to_png():
"""
For each file in the folders:
Convert to a .PNG file, preserve file size
Label it something computer-processable
"""
print("Running resize_and_convert_images_to_png()")
current_directory = './Deep Learning Plant Classifier'
# first pass - convert to png, preserving file name (except for the extension)
for (root, _, files) in os.walk(current_directory):
tif_filepaths = [rf"{root}\{f}" for f in files if f.endswith(".tif")]
with ThreadPoolExecutor(max_workers=16) as executor:
executor.map(resize_file, tif_filepaths)
# second pass - delete the tif file IF PNG EXISTS
for (root, _, files) in os.walk(current_directory):
for file_name in [f for f in files if f.endswith(".tif")]:
file_path = rf"{root}\{file_name}"
png_filepath = file_path.replace('.tif', '.png')
if os.path.isfile(png_filepath):
os.remove(file_path)
print("All done! PNG images are ready to be split into fragments now.") | 5,358,062 |
def fixture_buildchain_template_context() -> Any:
"""Emulate .in template context for buildchain."""
buildchain_path = paths.REPO_ROOT / "buildchain"
sys.path.insert(0, str(buildchain_path))
# pylint: disable=import-error,import-outside-toplevel
from buildchain import versions
# pylint: enable=import-error,import-outside-toplevel
sys.path.pop(0)
return {
"VERSION": versions.VERSION,
} | 5,358,063 |
def test_no_optional_attrs():
"""Test loading DAG with no optional attributes."""
obj = OptionalAttrs("go-basic.obo", None)
obj.chk_no_optattrs()
obj = OptionalAttrs("go-basic.obo", [])
obj.chk_no_optattrs()
obj = OptionalAttrs("go-basic.obo", set([]))
obj.chk_no_optattrs() | 5,358,064 |
def hsv(h: float, s: float, v: float) -> int:
"""Convert HSV to RGB.
:param h: Hue (0.0 to 1.0)
:param s: Saturation (0.0 to 1.0)
:param v: Value (0.0 to 1.0)
"""
return 0xFFFF | 5,358,065 |
def transform2json(source, target):
"""
Transform bahaviors file in tsv to json for later evaluation
Args:
TODO
source:
target:
"""
behaviors = pd.read_table(
source,
header=None,
names=['uid', 'time', 'clicked_news', 'impression'])
f = open(target, "w")
with tqdm(total=len(behaviors), desc="Transforming tsv to json") as pbar:
for row in behaviors.itertuples(index=False):
item = {}
item['uid'] = row.uid[1:]
item['time'] = row.time
item['impression'] = {
x.split('-')[0][1:]: int(x.split('-')[1])
for x in row.impression.split()
}
f.write(json.dumps(item) + '\n')
pbar.update(1)
f.close() | 5,358,066 |
def make_exponential_mask(img, locations, radius, alpha, INbreast=False):
"""Creating exponential proximity function mask.
Args:
img (np.array, 2-dim): the image, only it's size is important
locations (np.array, 2-dim): array should be (n_locs x 2) in size and
each row should correspond to a location [x,y]. Don't need to be integer,
truncation is applied.
NOTICE [x,y] where x is row number (distance from top) and y column number
(distance from left)
radius (int): radius of the exponential pattern
alpha (float): decay rate
INbreast (bool, optional): Not needed anymore, handled when parsing INbreast dataset
Returns:
mask (np.array, 0.0-1.0): Exponential proximity function
"""
# create kernel which we will be adding at locations
# Kernel has radial exponential decay form
kernel = np.zeros((2*radius+1,2*radius+1))
for i in range(0, kernel.shape[0]):
for j in range(0, kernel.shape[1]):
d = np.sqrt((i-radius)**2+(j-radius)**2)
if d<= radius:
kernel[i,j]=(np.exp(alpha*(1-d/radius))-1)/(np.exp(alpha)-1)
# pad original img to avoid out of bounds errors
img = np.pad(img, radius+1, 'constant').astype(float)
# update locations
locations = np.array(locations)+radius+1
locations = np.round(locations).astype(int)
# initialize mask
mask = np.zeros_like(img)
for location in locations:
if INbreast:
y, x = location
else:
x, y = location
# add kernel
mask[x-radius:x+radius+1, y-radius:y+radius+1] =np.maximum(mask[x-radius:x+radius+1, y-radius:y+radius+1],kernel)
# unpad
mask = mask[radius+1:-radius-1,radius+1:-radius-1]
return mask | 5,358,067 |
def loadTileSources(entryPointName='large_image.source', sourceDict=AvailableTileSources):
"""
Load all tilesources from entrypoints and add them to the
AvailableTileSources dictionary.
:param entryPointName: the name of the entry points to load.
:param sourceDict: a dictionary to populate with the loaded sources.
"""
for entryPoint in iter_entry_points(entryPointName):
try:
sourceClass = entryPoint.load()
if sourceClass.name and None in sourceClass.extensions:
sourceDict[entryPoint.name] = sourceClass
config.getConfig('logprint').debug('Loaded tile source %s' % entryPoint.name)
except Exception:
config.getConfig('logprint').exception(
'Failed to loaded tile source %s' % entryPoint.name) | 5,358,068 |
def delete(client, url: str, payload: dict) -> Tuple[dict, bool]:
"""Make DELETE requests to K8s (see `k8s_request`)."""
resp, code = request(client, 'DELETE', url, payload, headers=None)
err = (code not in (200, 202))
if err:
logit.error(f"{code} - DELETE - {url} - {resp}")
return (resp, err) | 5,358,069 |
def f(x):
"""The objective is defined as the cost + a per-demographic penalty
for each demographic not reached."""
n = len(x)
assert n == n_venues
reached = np.zeros(n_demographics, dtype=int)
cost = 0.0
for xi, ri, ci in zip(x, r, c):
if xi:
reached = reached | ri #
cost += ci
for ri, pi in zip(reached, p):
if ri == 0:
cost += pi
return cost | 5,358,070 |
def clamp(minVal, val, maxVal):
"""Clamp a `val` to be no lower than `minVal`, and no higher than `maxVal`."""
return max(minVal, min(maxVal, val)) | 5,358,071 |
def _remove_discussion_tab(course, user_id):
"""
Remove the discussion tab for the course.
user_id is passed to the modulestore as the editor of the xblock.
"""
course.tabs = [tab for tab in course.tabs if not tab.type == 'discussion']
modulestore().update_item(course, user_id) | 5,358,072 |
def generate_primes(start):
""" generate primes in increasing order, starting from
the number start.
>>> generator = generate_primes(2)
>>> [generator.next() for _ in xrange(10)] # first 10 primes
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
>>> generator = generate_primes(100)
>>> # first 10 primes over 100
>>> [generator.next() for _ in xrange(10)]
[101, 103, 107, 109, 113, 127, 131, 137, 139, 149]
"""
for n in itertools.count(start):
# check if n is a prime
maxdivisor = int(math.floor(math.sqrt(n))) # maximum possible divisor
for div_cand in range(2, maxdivisor+1):
if n%div_cand==0:
break # n is not a prime, continue searching
else: # no divisors -> n is a prime
yield n | 5,358,073 |
def get_closest_spot(
lat: float, lng: float, area: config.Area
) -> t.Optional[config.Spot]:
"""Return closest spot if image taken within 100 m"""
if not area.spots:
return None
distances = [
(great_circle((spot.lat, spot.lng), (lat, lng)).meters, spot)
for spot in area.spots
]
distance, closest_spot = min(distances)
return closest_spot if distance < 100 else None | 5,358,074 |
def drawBezier(
page: Page,
p1: point_like,
p2: point_like,
p3: point_like,
p4: point_like,
color: OptSeq = None,
fill: OptSeq = None,
dashes: OptStr = None,
width: float = 1,
morph: OptStr = None,
closePath: bool = False,
lineCap: int = 0,
lineJoin: int = 0,
overlay: bool = True,
stroke_opacity: float = 1,
fill_opacity: float = 1,
oc: int = 0,
) -> Point:
"""Draw a general cubic Bezier curve from p1 to p4 using control points p2 and p3."""
img = page.newShape()
Q = img.drawBezier(Point(p1), Point(p2), Point(p3), Point(p4))
img.finish(
color=color,
fill=fill,
dashes=dashes,
width=width,
lineCap=lineCap,
lineJoin=lineJoin,
morph=morph,
closePath=closePath,
stroke_opacity=stroke_opacity,
fill_opacity=fill_opacity,
oc=oc,
)
img.commit(overlay)
return Q | 5,358,075 |
def product(numbers):
"""Return the product of the numbers.
>>> product([1,2,3,4])
24
"""
return reduce(operator.mul, numbers, 1) | 5,358,076 |
def load_ref_system():
""" Returns l-phenylalanine as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
N 0.7060 -1.9967 -0.0757
C 1.1211 -0.6335 -0.4814
C 0.6291 0.4897 0.4485
C -0.8603 0.6071 0.4224
C -1.4999 1.1390 -0.6995
C -2.8840 1.2600 -0.7219
C -3.6384 0.8545 0.3747
C -3.0052 0.3278 1.4949
C -1.6202 0.2033 1.5209
C 2.6429 -0.5911 -0.5338
O 3.1604 -0.2029 -1.7213
O 3.4477 -0.8409 0.3447
H -0.2916 -2.0354 -0.0544
H 1.0653 -2.2124 0.8310
H 0.6990 -0.4698 -1.5067
H 1.0737 1.4535 0.1289
H 0.9896 0.3214 1.4846
H -0.9058 1.4624 -1.5623
H -3.3807 1.6765 -1.6044
H -4.7288 0.9516 0.3559
H -3.5968 0.0108 2.3601
H -1.1260 -0.2065 2.4095
H 4.1118 -0.2131 -1.6830
""") | 5,358,077 |
def parse_dependency_file(filename):
"""Parse a data file containing dependencies.
The input file is the following csv format:
name,minerals,gas,build time,dependencies
command center,400,0,100,
orbital command,150,0,35,command center|barracks
Notice that the "dependencies" column is a list, deliminated with |
# TODO: We should lowercase everthing in this file
# TODO: We should validate all names and dependencies are valid units/buildings/research
# TODO: Should we store this stuff in memory rather than reading a file? Or memcache it?
"""
reader = csv.DictReader(open(filename, 'rb'), delimiter=',', quotechar='"')
data = list(reader) # Force to a list
# Ensure values for these keys are integers
int_keys = ['minerals', 'gas', 'supply', 'build time', 'research time']
result = {}
for line in data:
# Change "thing1 |thing2 | thing3 " into ["thing1", "thing2", "thing3"]
line['dependencies'] = [s.strip() for s in line['dependencies'].split("|") if s]
for key in int_keys:
if key in line:
line[key] = int(line[key])
result[line['name']] = line
return result | 5,358,078 |
def main():
""" there are four cubes with concrete colors on their sides and the goal
is to place each cube in one row that way that along the row each side
presents four different colors """
# ----------- ----------- ----------- ----------- ----------- -----------
# front back left right top bottom
# ----------- ----------- ----------- ----------- ----------- -----------
cubes = [ Cube(Color.GREEN, Color.WHITE, Color.GREEN, Color.RED, Color.WHITE, Color.BLUE),
Cube(Color.WHITE, Color.RED , Color.WHITE, Color.GREEN, Color.BLUE, Color.RED),
Cube(Color.RED, Color.RED, Color.RED , Color.GREEN, Color.BLUE, Color.WHITE),
Cube(Color.BLUE, Color.RED, Color.GREEN, Color.GREEN, Color.WHITE, Color.BLUE) ]
cubesChecker = CubesChecker(cubes)
cubesChecker.calculate() | 5,358,079 |
def range_with_bounds(start: int, stop: int, interval: int) -> List[int]:
"""Return list"""
result = [int(val) for val in range(start, stop, interval)]
if not isclose(result[-1], stop):
result.append(stop)
return result | 5,358,080 |
def insert(cursor, name, value):
""" Insert data into CrateDB with a current timestamp. """
cursor.execute("""INSERT INTO sensordata (ts, name, value) VALUES (?, ?, ?)""",
(timestamp_ms(), name, value)) | 5,358,081 |
def iou_score(box1, box2):
"""Returns the Intersection-over-Union score, defined as the area of
the intersection divided by the intersection over the union of
the two bounding boxes. This measure is symmetric.
Args:
box1: The coordinates for box 1 as a list of points
box2: The coordinates for box 2 in same format as box1.
"""
if len(box1) == 2:
x1, y1 = box1[0]
x2, y2 = box1[1]
box1 = np.array([[x1, y1], [x2, y1], [x2, y2], [x1, y2]])
if len(box2) == 2:
x1, y1 = box2[0]
x2, y2 = box2[1]
box2 = np.array([[x1, y1], [x2, y1], [x2, y2], [x1, y2]])
if any(cv2.contourArea(np.int32(box2)[:, np.newaxis, :]) == 0 for box in [box1, box2]):
warnings.warn('A box with zero area was detected.')
return 0
pc = pyclipper.Pyclipper()
pc.AddPath(np.int32(box1), pyclipper.PT_SUBJECT, closed=True)
pc.AddPath(np.int32(box2), pyclipper.PT_CLIP, closed=True)
intersection_solutions = pc.Execute(pyclipper.CT_INTERSECTION, pyclipper.PFT_EVENODD,
pyclipper.PFT_EVENODD)
union_solutions = pc.Execute(pyclipper.CT_UNION, pyclipper.PFT_EVENODD, pyclipper.PFT_EVENODD)
union = sum(cv2.contourArea(np.int32(points)[:, np.newaxis, :]) for points in union_solutions)
intersection = sum(
cv2.contourArea(np.int32(points)[:, np.newaxis, :]) for points in intersection_solutions)
return intersection / union | 5,358,082 |
def _actually_on_chip(ra, dec, obs_md):
"""
Take a numpy array of RA in degrees, a numpy array of Decin degrees
and an ObservationMetaData and return a boolean array indicating
which of the objects are actually on a chip and which are not
"""
out_arr = np.array([False]*len(ra))
d_ang = 2.11
good_radii = np.where(angularSeparation(ra, dec, obs_md.pointingRA, obs_md.pointingDec)<d_ang)
if len(good_radii[0]) > 0:
chip_names = chipNameFromRaDecLSST(ra[good_radii], dec[good_radii], obs_metadata=obs_md).astype(str)
vals = np.where(np.char.find(chip_names, 'None')==0, False, True)
out_arr[good_radii] = vals
return out_arr | 5,358,083 |
def outlier_dataset(seed=None) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Generates Outliers dataset, containing 10'000 inliers and 50 outliers
Args:
seed: random seed for generating points
Returns:
Tuple containing the inlier features, inlier labels,
outlier features and outlier labels
"""
if seed is not None:
np.random.seed(seed)
inlier_feats = np.concatenate(
[np.random.normal(1, 1, 5000), np.random.normal(-1, 1, 5000)]
)
inlier_labels = np.concatenate(
[
np.ones((5000,)),
-1 * np.ones((5000,)),
]
)
outlier_feats = np.concatenate(
[np.random.normal(-200, 1, 25), np.random.normal(200, 1, 25)]
)
outlier_labels = np.concatenate(
[
np.ones((25,)),
-1 * np.ones((25,)),
]
)
return inlier_feats, inlier_labels, outlier_feats, outlier_labels | 5,358,084 |
def posts(request, payload={}, short_id=None):
"""
Posts endpoint of the example.com public api
Request with an id parameter:
/public_api/posts/1qkx8
POST JSON in the following format:
POST /public_api/posts/
{"ids":["1qkx8","ma6fz"]}
"""
Metrics.api_comment.record(request)
ids = payload.get('ids')
if short_id and not ids:
try:
comment = Comment.details_by_id(long_id(short_id), promoter=PublicAPICommentDetails)
(comment,) = CachedCall.multicall([comment])
return comment.to_client()
except (ObjectDoesNotExist, util.Base36DecodeException):
raise ServiceError("Post not found")
elif ids:
ids = [long_id(x) for x in set(ids)]
calls = [Comment.details_by_id(id, ignore_not_found=True, promoter=PublicAPICommentDetails) for id in ids]
comments = CachedCall.multicall(calls, skip_decorator=True)
return {'posts': [x.to_client() for x in comments if x]} | 5,358,085 |
def compute_bleu(reference_corpus,
translation_corpus,
max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
bleu:float,翻译句子的bleu得分,
precisions:list, 包含每种ngram的准确率,
bp:brevity penalty, 短句惩罚系数,
ratio:translation_length / min(reference_length),
translation_length:int,翻译长度,
reference_length:int,最短的reference长度
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus, translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references: # 同时考虑多个references
merged_ref_ngram_counts |= _get_ngrams(reference, max_order) # 位或
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts # 位与
# matches_by_order:{len(ngram):sum of ngram overlap counts}
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
# possible_matches_by_order(可匹配n-gram总数):{len(ngram):sum of each ngram possible matches}
for order in range(1, max_order + 1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order - 1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
# 翻译长度惩罚(对较短的翻译基于较大的惩罚,以防止短翻译准确率会更高的问题)
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length) | 5,358,086 |
def rf_agg_local_mean(tile_col):
"""Compute the cellwise/local mean operation between Tiles in a column."""
return _apply_column_function('rf_agg_local_mean', tile_col) | 5,358,087 |
def Shot(project, name):
"""
샷 정보를 가지고 오는 함수.
(딕셔너리, err)값을 반환한다.
"""
restURL = "http://10.0.90.251/api/shot?project=%s&name=%s" % (project, name)
try:
data = json.load(urllib2.urlopen(restURL))
except:
return {}, "RestAPI에 연결할 수 없습니다."
if "error" in data:
return {}, data["error"]
return data["data"], None | 5,358,088 |
def inversion_double(in_array):
"""
Get the input boolean array along with its element-wise logical not beside it. For error correction.
>>> inversion_double(np.array([1,0,1,1,1,0,0,1], dtype=np.bool))
array([[ True, False, True, True, True, False, False, True],
[False, True, False, False, False, True, True, False]])
"""
return np.stack((in_array, np.logical_not(in_array))) | 5,358,089 |
def query_to_csv(engine, host, user, port, password, database, query, headers=False, out_type='stdout', destination_file=None, delimiter=',', quotechar='"', print_info=1000):
""" Run a query and store the result to a CSV file """
# Get SQL connection
connection = get_connection(
engine=engine,
host=host,
user=user,
port=port,
password=password,
database=database
)
cursor = get_cursor(connection)
if out_type == 'file':
print('\n* Exporting rows...')
with open_tempfile() if out_type == 'stdout' else open_file(resolve_home_dir(destination_file)) as file_:
writer = get_writer(file_, delimiter=delimiter, quotechar=quotechar)
# Execute query
execute_query(cursor=cursor, query=query)
# Write headers if requested
if headers:
writer.writerow(fetch_headers(cursor=cursor))
# Write rows to CSV
i = 0
while True:
row = cursor.fetchone()
if not row:
break
# Increment row counter
i += 1
if out_type == 'file' and i % print_info == 0:
print(' ...%s rows written' % "{:,}".format(i))
writer.writerow(row.values())
if out_type == 'file':
print(' ...done')
print('* The result has been exported to %s.\n' %
(destination_file))
cursor.close
# Print stdout
if out_type == 'stdout':
file_to_stdout() | 5,358,090 |
def test_list_roles(requests_mock):
"""
Tests synapse-list-users command function.
"""
from Synapse import Client, list_roles_command
mock_response = util_load_json('test_data/list_roles.json')
requests_mock.get('https://test.com/api/v1/auth/roles', json=mock_response)
mock_roles = util_load_json('test_data/list_roles.json')
requests_mock.get('https://test.com/api/v1/auth/roles', json=mock_roles)
client = Client(
base_url='https://test.com/api/v1',
username='test',
password='test',
verify=False,
proxy=False
)
response = list_roles_command(client)
assert response.outputs_prefix == 'Synapse.Roles'
assert response.outputs_key_field == 'Iden'
assert response.outputs[0]['Iden'] == mock_response['result'][0]['iden'] | 5,358,091 |
def test_expands_blank_panes():
"""Expand blank config into full form.
Handle ``NoneType`` and 'blank'::
# nothing, None, 'blank'
'panes': [
None,
'blank'
]
# should be blank
'panes': [
'shell_command': []
]
Blank strings::
panes: [
''
]
# should output to:
panes:
'shell_command': ['']
"""
yaml_config_file = os.path.join(example_dir, 'blank-panes.yaml')
test_config = load_config(yaml_config_file)
assert config.expand(test_config) == fixtures.expand_blank.expected | 5,358,092 |
def solution(N):
"""
This is a fairly simple task.
What we need to do is:
1. Get string representation in binary form (I love formatted string literals)
2. Measure biggest gap of zeroes (pretty self explanatory)
"""
# get binary representation of number
binary_repr = f"{N:b}"
# initialise counters
current_gap, max_gap = 0, 0
for b in binary_repr:
# end of gap, update max
if b == '1':
max_gap = max(current_gap, max_gap)
current_gap = 0
# increase gap counter
else:
current_gap += 1
return max_gap | 5,358,093 |
def version(verbose: bool = False) -> None:
"""Show version information"""
if not verbose:
console.print(idom.__version__)
else:
table = Table()
table.add_column("Package")
table.add_column("Version")
table.add_column("Language")
table.add_row("idom", str(idom.__version__), "Python")
for js_pkg, js_ver in _private.build_dependencies().items():
table.add_row(js_pkg, js_ver, "Javascript")
console.print(table) | 5,358,094 |
def distributed_compute_expectations(
building_blocks: Tuple[cw.ComplexDeviceArray],
operating_axes: Tuple[Tuple[int]],
pbaxisums: Tuple[Tuple[cw.ComplexDeviceArray]],
pbaxisums_operating_axes: Tuple[Tuple[Tuple[int]]],
pbaxisum_coeffs: Tuple[Tuple[float]],
num_discretes: int,
) -> ShardedDeviceArray:
"""
Compute the expectation values of several observables given in `pbaxisums`.
This function uses a single pmap and can be memory intesive for
pbaxisums with many long prob-basis-axis-strings.
Args:
building_blocks: The building_blocks in super-matrix format (i.e. 128x128)
operating_axes: The discrete axes on which `building_blocks` act.
pbaxisums: Supermatrices of large_block representation of pauli sum
operators. A single pbaxistring is represented as an innermost list
of matrix-large_blocks. The outermost list iterates through different
prob-basis-axis-sums, the intermediate list iterates through pbaxistrings
within a pbaxisum.
pbaxisums_operating_axes: The discrete axes on which the pbaxisums act.
pbaxisum_coeffs: The coefficients of the
prob-basis-axis-strings appearing in the union of all prob-basis-axis-sum operators.
num_discretes: The number of discretes needed for the simulation.
num_params: The number of parameters on which the acyclic_graph depends.
Returns:
ShardedDeviceArray: The expectation values.
"""
num_pbaxisums = len(pbaxisums)
expectation_values = jnp.zeros(num_pbaxisums)
final_state = helpers.get_final_state(building_blocks, operating_axes, num_discretes)
for m, pbaxisum in enumerate(pbaxisums):
pbaxisum_op_axes = pbaxisums_operating_axes[m]
pbaxisum_coeff = pbaxisum_coeffs[m]
# `psi` is brought into natural discrete order
# don't forget to also align the axes here!
coeff = pbaxisum_coeff[0]
psi = helpers.apply_building_blocks(
final_state, pbaxisum[0], pbaxisum_op_axes[0]
).align_axes()
expectation_value = (
helpers.scalar_product_real(psi, final_state) * coeff
)
for n in range(1, len(pbaxisum)):
pbaxistring = pbaxisum[n]
op_axes = pbaxisum_op_axes[n]
coeff = pbaxisum_coeff[n]
psi = helpers.apply_building_blocks(
final_state, pbaxistring, op_axes
).align_axes()
expectation_value += (
helpers.scalar_product_real(psi, final_state) * coeff
)
# at this point all `psis` are in natural discrete ordering,
# with the same `labels` values as `final_state` (i.e.
# `labels = [0,1,2,3,..., num_discretes - 1]`). They also all have the
# same (sorted) `perm` ordering due to the call to `align_axes()`.
# compute the expectation values. Note that `psi` and `final_state`
# have identical `perm` and `labels`.
expectation_values = expectation_values.at[m].set(
expectation_value.real[0]
)
return expectation_values | 5,358,095 |
def write_to_pubsub(data):
"""
:param data:
:return:
"""
try:
if data["lang"] == "en":
publisher.publish(topic_path, data=json.dumps({
"text": data["text"],
"user_id": data["user_id"],
"id": data["id"],
"posted_at": datetime.datetime.fromtimestamp(
data["created_at"]).strftime('%Y-%m-%d %H:%M:%S')
}).encode("utf-8"), tweet_id=str(data["id"]).encode("utf-8"))
except Exception as e:
raise | 5,358,096 |
def extractStudents(filename):
"""
Pre: The list in xls file is not empty
Post: All students are extract from file
Returns students list
"""
list = []
try:
# open Excel file
wb = xlrd.open_workbook(str(filename))
except IOError:
print ("Oops! No file "+filename+ " has been found !")
else:
sh = wb.sheet_by_name(wb.sheet_names()[0])
for rownum in range(1,sh.nrows):#1 to remove title line
student = sh.row_values(rownum)
list.append(student)
return list | 5,358,097 |
def dmenu_select(num_lines, prompt="Entries", inp=""):
"""Call dmenu and return the selected entry
Args: num_lines - number of lines to display
prompt - prompt to show
inp - bytes string to pass to dmenu via STDIN
Returns: sel - string
"""
cmd = dmenu_cmd(num_lines, prompt)
sel, err = Popen(cmd,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
env=bwm.ENV).communicate(input=inp)
if err:
cmd = [cmd[0]] + ["-dmenu"] if "rofi" in cmd[0] else [""]
Popen(cmd[0], stdin=PIPE, stdout=PIPE, env=bwm.ENV).communicate(input=err)
sys.exit()
if sel is not None:
sel = sel.decode(bwm.ENC).rstrip('\n')
return sel | 5,358,098 |
def clean_ip(ip):
"""
Cleans the ip address up, useful for removing leading zeros, e.g.::
1234:0:01:02:: -> 1234:0:1:2::
1234:0000:0000:0000:0000:0000:0000:000A -> 1234::a
1234:0000:0000:0000:0001:0000:0000:0000 -> 1234:0:0:0:1::
0000:0000:0000:0000:0001:0000:0000:0000 -> ::1:0:0:0
:type ip: string
:param ip: An IP address.
:rtype: string
:return: The cleaned up IP.
"""
theip = normalize_ip(ip)
segments = ['%x' % int(s, 16) for s in theip.split(':')]
# Find the longest consecutive sequence of zeroes.
seq = {0: 0}
start = None
count = 0
for n, segment in enumerate(segments):
if segment != '0':
start = None
count = 0
continue
if start is None:
start = n
count += 1
seq[count] = start
# Replace those zeroes by a double colon.
count = max(seq)
start = seq[count]
result = []
for n, segment in enumerate(segments):
if n == start and count > 1:
if n == 0:
result.append('')
result.append('')
if n == 7:
result.append('')
continue
elif start < n < start + count:
if n == 7:
result.append('')
continue
result.append(segment)
return ':'.join(result) | 5,358,099 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.