content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def update_action_state():
""" :type action: dart.model.action.Action """
# we receive a list of {action_id, action_status, workflow_instance_id/status}
# We will update the database for each such entry
try:
action_status_updates = request.get_json()
_logger.info("AWS_Batch: extracted json from request: {0}".format(action_status_updates))
except Exception as err:
_logger.error("AWS_Batch: Failed to extract json from request")
return {'result': str(err)}, 500
try:
for action_status in action_status_updates:
# updating the action state
current_action = action_service().get_action(action_status['action_id'])
if should_update(action_status['action_status'], current_action.data.state):
_logger.info("AWS_Batch: Updating action={0} from {1} to state {2}".format(current_action.id, current_action.data.state, action_status['action_status']))
action_service().update_action_state(current_action, action_status['action_status'])
# if we receive a workflow_instance_id (not empty) then we need to set workflow_instance status.
# we may need to set workflow and datastore status if they need to be deactivated on failure.
if action_status.get('workflow_instance_id'):
wfs = action_status.get('workflow_instance_status')
wf_instance_status = WorkflowInstanceState.FAILED if (wfs == 'FAILED') else WorkflowInstanceState.COMPLETED
_logger.info("AWS_Batch: Updating workflow_instance={0} to state {1}".format(action_status.get('workflow_instance_id'), wf_instance_status))
# Updating workflow_instance with the status sent (success or failure).
wf_instance = workflow_service().get_workflow_instance(action_status.get('workflow_instance_id'))
workflow_service().update_workflow_instance_state(wf_instance, wf_instance_status)
# check if need to deactivate workflow and datastore.
if wf_instance_status == WorkflowInstanceState.FAILED:
workflow_id = wf_instance.data.workflow_id
master_workflow = workflow_service().get_workflow(workflow_id)
# Failed action with deactivate on_failure should deactivate the current workflow.
if current_action.data.on_failure == ActionOnFailure.HALT:
_logger.info("AWS_Batch: Action in workflow={0} failed. Halting on failure and remaining in state {2}".format(master_workflow.id, WorkflowState.ACTIVE))
elif current_action.data.on_failure == ActionOnFailure.DEACTIVATE:
_logger.info("AWS_Batch: Updating workflow={0} to state {2}".format(master_workflow.id, WorkflowState.INACTIVE))
workflow_service().update_workflow_state(master_workflow, WorkflowState.INACTIVE)
if master_workflow.data.on_failure == WorkflowOnFailure.DEACTIVATE:
datastore_id = master_workflow.data.datastore_id
_logger.info("AWS_Batch: Updating datastore={0} to state {2}".format(datastore_id, DatastoreState.INACTIVE))
datastore = datastore_service().get_datastore(datastore_id)
datastore_service().update_datastore_state(datastore, DatastoreState.INACTIVE)
except Exception as err:
_logger.error("AWS_Batch: Failed to update action state. err= {0}".format(err))
return {'result': str(err)}, 501
# if all pass we send success status (200) otherwise we will try again later.
return {'result': "OK"}, 200 | 5,357,800 |
def page_not_found():
"""Directs to error page if user is not logged in.
:return: HTML file for error page.
"""
error = 'You must be logged in to view this page.'
return render_template('error.html', error=error) | 5,357,801 |
def mtf_toy_model_parallel():
"""Set of hyperparameters."""
hparams = mtf_toy_base()
hparams.add_hparam("layout", "hidden:0")
return hparams | 5,357,802 |
def evaluate_model(model, X_test, Y_test, category_names,):
"""used to evaluate given model by using
confusion matrix and classification report
args:
- model (sklearn model)
- X_test
- Y_test
- category_names: list of 36 category names
returns:
None
"""
# predict
y_pred = model.predict(X_test)
# process confusion matrix and classification report
# for each category
for j in range(y_pred.shape[1]):
print('TARGET: ', category_names[j])
print('\t\t---- CONFUSION MATRIX----')
# get labels for current category
classes = np.unique(Y_test[:, j])
print(confusion_matrix(Y_test[:, j],
y_pred[:, j],
labels=classes))
print('\t\t----REPORT----')
print(classification_report(Y_test[:, j],
y_pred[:, j],
labels=classes))
print('===========================================', end='\n\n') | 5,357,803 |
def test_insertion(empty_stack):
"""test that we can push a val into an empty stack and increment the ._size of the stack"""
assert empty_stack.top is None
assert empty_stack.push(1).val == 1
assert empty_stack._size == 1 | 5,357,804 |
def to_dataframe(sas7bdat_file: Union[str, Path]) -> pd.DataFrame:
"""Converts a sas7bdat and/or xpt file into a pandas dataframe.
args:
sas7bdat_file: The name, including the path, for the sas7bdat file.
return:
A pandas dataframe containing the data from the sas7bdat file.
"""
df = pd.read_sas(sas7bdat_file)
# convert binary strings to utf-8
str_df = df.select_dtypes([np.dtype(object)])
if len(str_df.columns) > 0:
str_df = str_df.stack().str.decode("utf-8").unstack()
for col in str_df:
df[col] = str_df[col]
# end conversion to utf-8
return df | 5,357,805 |
def ShrinkBE(slack):
""" shrinks quota for all BE workloads by be_shrink_rate
"""
be_shrink_ratio = st.params['quota_controller']['BE_shrink_ratio']
be_shrink_rate = 1 + be_shrink_ratio * slack
min_be_quota = int(st.node.cpu * 100000 * st.params['quota_controller']['min_be_quota'])
max_be_quota = int(st.node.cpu * 100000 * st.params['quota_controller']['max_be_quota'])
aggregate_be_quota = 0
for _, pod in st.active.pods.items():
for _, cont in pod.containers.items():
if pod.wclass == 'BE':
if not cont.period == 100000:
pod.period = 100000
cont.docker.update(cpu_period=100000)
old_quota = cont.quota
cont.quota = int(be_shrink_rate * cont.quota)
if cont.quota < min_be_quota:
cont.quota = min_be_quota
if cont.quota > max_be_quota:
cont.quota = max_be_quota
try:
cont.docker.update(cpu_quota=cont.quota)
print "Main: Shrink CPU quota of BE container in pod %s from %d to %d" % (pod.name, old_quota, cont.quota)
except docker.errors.APIError as e:
print "Main:WARNING: Cannot update quota for container %s: %s" % (str(cont), e)
aggregate_be_quota += cont.quota
st.node.be_quota = aggregate_be_quota | 5,357,806 |
def display_candle(
data: pd.DataFrame,
to_symbol: str,
from_symbol: str,
ma: Optional[Iterable[int]] = None,
external_axes: Optional[List[plt.Axes]] = None,
):
"""Show candle plot for fx data.
Parameters
----------
data : pd.DataFrame
Loaded fx historical data
to_symbol : str
To forex symbol
from_symbol : str
From forex symbol
external_axes: Optional[List[plt.Axes]]
External axes (1 axis is expected in the list), by default None
"""
candle_chart_kwargs = {
"type": "candle",
"style": theme.mpf_style,
"volume": False,
"xrotation": theme.xticks_rotation,
"scale_padding": {"left": 0.3, "right": 1, "top": 0.8, "bottom": 0.8},
"update_width_config": {
"candle_linewidth": 0.6,
"candle_width": 0.8,
"volume_linewidth": 0.8,
"volume_width": 0.8,
},
"warn_too_much_data": 20000,
}
if ma:
candle_chart_kwargs["mav"] = ma
# This plot has 1 axis
if not external_axes:
candle_chart_kwargs["returnfig"] = True
candle_chart_kwargs["figratio"] = (10, 7)
candle_chart_kwargs["figscale"] = 1.10
candle_chart_kwargs["figsize"] = plot_autoscale()
fig, ax = mpf.plot(data, **candle_chart_kwargs)
fig.suptitle(
f"{from_symbol}/{to_symbol}",
x=0.055,
y=0.965,
horizontalalignment="left",
)
if ma:
# Manually construct the chart legend
colors = []
for i, _ in enumerate(ma):
colors.append(theme.get_colors()[i])
lines = [Line2D([0], [0], color=c) for c in colors]
labels = ["MA " + str(label) for label in ma]
ax[0].legend(lines, labels)
theme.visualize_output(force_tight_layout=False)
elif is_valid_axes_count(external_axes, 1):
(ax1,) = external_axes
candle_chart_kwargs["ax"] = ax1
mpf.plot(data, **candle_chart_kwargs)
else:
return | 5,357,807 |
def tensor_imshow(inp, title=None, **kwargs):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
# Mean and std for ImageNet
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp, **kwargs)
if title is not None:
plt.title(title) | 5,357,808 |
def shift_scale_rmsf(rmsf_double, phi, cellsize, ccomp, faraday_peak):
"""Shift and scale the RMSF, to the parameters of the found clean component.
Args:
rmsf_double (numpy array): double sized array of complex point spread
function values in Faraday space.
phi (numpy array): array of Faraday depths.
cellsize (float): advised cellsize in Faraday space.
ccomp (float): the complex-valued clean component.
faraday_peak (int): the index of the peak of the clean component.
Returns:
ccomp*rmsf_shifted: the shifted and scaled RMSF.
"""
# Calculate the integer number of pixels required to shift the RMSF:
faraday_shift = phi[faraday_peak]/cellsize
faraday_shift = faraday_shift.astype(int)
# Shift the RMSF and pad with zeros based upon its sign:
if faraday_shift > 0:
rmsf_shifted = np.roll(rmsf_double, faraday_shift)
rmsf_shifted[0:faraday_shift] = 0.0
elif faraday_shift < 0:
rmsf_shifted = np.roll(rmsf_double, faraday_shift)
rmsf_shifted[len(rmsf_shifted)+faraday_shift:len(rmsf_shifted)] = 0.0
elif faraday_shift == 0:
rmsf_shifted = np.copy(rmsf_double)
# The shifted RMSF is double the width of the sampled Faraday space
# to ensure the shifted beam is subtracted correctly.
# Truncate the RMSF so it has same dimension as sampled parameter space:
rmsf_len = len(rmsf_shifted)
rmsf_shifted = np.delete(rmsf_shifted, np.arange((3*((rmsf_len-1)//4))+1,
rmsf_len))
rmsf_shifted = np.delete(rmsf_shifted, np.arange(0, ((rmsf_len-1)//4)))
# Scale the RMSF by the magnitude of the clean component:
return ccomp*rmsf_shifted | 5,357,809 |
def displayCards(cards):
"""Display all cards in the cards list"""
rows = ['', '', '', '', '']
for i, card in enumerate(cards):
rows[0] += ' ___ '
if card == BACKSIDE:
rows[1] += '|## | '
rows[2] += '|###| '
rows[3] += '|_##| '
else:
rank, suit = card
rows[1] += '|{} | '.format(rank.ljust(2))
rows[2] += '| {} | '.format(suit)
rows[3] += '|_{}| '.format(rank.rjust(2, '_'))
for row in rows:
print(row) | 5,357,810 |
def region_filter(annos, annotation):
"""filter for Region annotations.
The 'time' parameter can match either 'time' or 'timeEnd' parameters.
"""
result = []
for anno in annos:
time = annotation.get("time")
timeEnd = annotation.get("timeEnd")
for key in ['text', 'tags']:
if anno.get(key) != annotation.get(key):
continue
if anno.get("regionId") == 0:
continue
if anno.get("time") not in [time, timeEnd]:
continue
result.append(anno)
return result | 5,357,811 |
def estimator_mixt_default(sample):
"""Default estimator of mixture distribution
This estimator returns tuple with two non-overlaping parts of `sample`
which are estimated to come from continuous and discrete parts of mixture
distribution. Estimation is done by deciding sample element to be from
discrete part if it is present at least twice in input `sample`.
If some part of estimation has no elements, it is represented as `None` in
output.
Parameters
----------
sample : array_like
This should be a valid input to `np.asarray()` so that its output is
numeric.
Returns
-------
sample_cont, sample_disc : tuple with two elements
Elements can be `None` if estimation showed no elements from
corresponding mixture part.
"""
# Detect sample from discrete part
sample = np.asarray(sample)
vals, inverse, counts = np.unique(sample, return_inverse=True, return_counts=True)
disc_inds = np.nonzero(counts >= 2)[0]
sample_is_disc = np.isin(inverse, disc_inds)
# Return separation
if np.all(sample_is_disc):
return (None, sample)
elif np.all(~sample_is_disc):
return (sample, None)
else:
return (sample[~sample_is_disc], sample[sample_is_disc]) | 5,357,812 |
def modularity_clustering(graph, size_cutoff=10, deg_cutoff=0.5,
callback=None):
"""
Use the Clauset-Newman-Moore greedy modularity maximization
algorithm to partition the TN93 pairwise graph into communities.
Modularity quantifies the density of edges at the periphery of
a community relative to the density within it.
TODO: try other methods like Louvain algorithm
:param graph: networkx.Graph object from import_graph()
:param size_cutoff: int, minimum component size to consider
applying modularity community detection
:param deg_cutoff: float, maximum edge density at which use
community detection.
:param callback: optional, write verbose messages
:return: list, lists of node labels
"""
if callback:
callback("Modularity clustering...")
result = []
count = 0
for component in nx.connected_components(graph):
count += 1
if len(component) > size_cutoff:
sg = graph.subgraph(component)
# retrieve list of degree sizes
deg = [d for _, d in sg.degree()]
mean_deg = sum(deg) / float(len(deg))
if mean_deg / len(deg) < deg_cutoff:
communities = list(greedy_modularity_communities(sg))
if callback:
callback(
' partitioning component of size {} into {} '
'communities'.format(len(component), len(communities))
)
result.extend(communities)
else:
# component has sufficient edge density
result.append(component)
else:
result.append(component)
if callback:
callback("Partitioned graph from {} to {} components".format(
count, len(result))
)
return result | 5,357,813 |
def get_post_processors():
"""
Loads post processors by inspecting members of the 'post_processors' package.
"""
post_processor_classes = []
for _, member in inspect.getmembers(post_processors):
if inspect.isclass(member):
post_processor_classes.append(member)
return post_processor_classes | 5,357,814 |
def apply_array_pars(arr_par_file="arr_pars.csv"):
""" a function to apply array-based multipler parameters.
Args:
arr_par_file (`str`): path to csv file detailing parameter array multipliers.
This file is written by PstFromFlopy.
Note:
Used to implement the parameterization constructed by
PstFromFlopyModel during a forward run
This function should be added to the forward_run.py script but can
be called on any correctly formatted csv
This function using multiprocessing, spawning one process for each
model input array (and optionally pp files). This speeds up
execution time considerably but means you need to make sure your
forward run script uses the proper multiprocessing idioms for
freeze support and main thread handling.
"""
df = pd.read_csv(arr_par_file,index_col=0)
# for fname in df.model_file:
# try:
# os.remove(fname)
# except:
# print("error removing mult array:{0}".format(fname))
if 'pp_file' in df.columns:
print("starting fac2real",datetime.now())
pp_df = df.loc[df.pp_file.notna(),
['pp_file', 'fac_file', 'mlt_file']].rename(
columns={'fac_file': 'factors_file', 'mlt_file': 'out_file'})
pp_df.loc[:, 'lower_lim'] = 1.0e-10
# don't need to process all (e.g. if const. mults apply across kper...)
pp_args = pp_df.drop_duplicates().to_dict('records')
num_ppargs = len(pp_args)
chunk_len = 50
num_chunk_floor = num_ppargs // chunk_len
main_chunks = np.array(pp_args)[:num_chunk_floor * chunk_len].reshape(
[-1, chunk_len]).tolist()
remainder = np.array(pp_args)[num_chunk_floor * chunk_len:].tolist()
chunks = main_chunks + [remainder]
procs = []
for chunk in chunks:
p = mp.Process(target=_process_chunk_fac2real, args=[chunk])
p.start()
procs.append(p)
for p in procs:
p.join()
print("finished fac2real",datetime.now())
print("starting arr mlt",datetime.now())
uniq = df.model_file.unique() # unique model input files to be produced
num_uniq = len(uniq) # number of input files to be produced
# number of files to send to each processor
chunk_len = 50 # - this may not be the optimum number,
# sure there is some cleverway of working it out
# lazy plitting the files to be processed into even chunks
num_chunk_floor = num_uniq // chunk_len # number of whole chunks
main_chunks = uniq[:num_chunk_floor * chunk_len].reshape(
[-1, chunk_len]).tolist() # the list of files broken down into chunks
remainder = uniq[num_chunk_floor * chunk_len:].tolist() # remaining files
chunks = main_chunks + [remainder]
procs = []
for chunk in chunks: # now only spawn processor for each chunk
p = mp.Process(target=_process_chunk_model_files, args=[chunk, df])
p.start()
procs.append(p)
for p in procs:
p.join()
print("finished arr mlt", datetime.now()) | 5,357,815 |
def vulcanize(name: str) -> str:
"""Add prefixes to names that are similar to the prefixes seen
in Vulcan characters in the Star Trek™ franchise.
:param name: The name to modify.
:return: A :class:str object.
:rtype: str
Usage:
>>> # Seed the RNG to make the example predictable. Don't do
>>> # this if you want the modification to be random.
>>> seed('spam')
>>>
>>> name = 'Bacon'
>>> vulcanize(name)
"T'Bacon"
"""
letter = 't'
if roll('1d6') > 5:
letters = 'd k l m n p s su v'.split()
index = roll(f'1d{len(letters)}') - 1
letter = letters[index]
letter = letter.title()
name = name.title()
return f"{letter}'{name}" | 5,357,816 |
def intersperse(iterable, element):
"""Generator yielding all elements of `iterable`, but with `element`
inserted between each two consecutive elements"""
iterable = iter(iterable)
yield next(iterable)
for next_from_iterable in iterable:
yield element
yield next_from_iterable | 5,357,817 |
def B(j, p, x, knots):
""" Compute B-splines using recursive definition. """
if p == 0:
if knots[j] <= x < knots[j+1]:
return 1.0
else:
return 0.0
else:
left = special_div((x-knots[j])*B(j,p-1,x,knots), knots[j+p]-knots[j])
right = special_div((knots[j+1+p]-x)*B(j+1,p-1,x,knots), knots[j+1+p]-knots[j+1])
return left + right | 5,357,818 |
def not_falsy(item: T, item_name: str) -> T:
"""
Check if a value is falsy and throw an exception if so.
:param item: the item to check for falsiness.
:param item_name: the name of the item to include in any exception.
:raises ValueError: if the item is falsy.
:returns: the item.
"""
if not item:
raise ValueError(f"{item_name} cannot be a value that evaluates to false")
return item | 5,357,819 |
def _apply_limit_abs_unit(x, lim, unit):
"""Return one limit with applied unit(abs(x)). See get_limits."""
if unit is None:
return lim
unit = unit.lower()
if unit == 'near':
return lim * np.nanmin(np.abs(x))
if unit == 'far':
return lim * np.nanmax(np.abs(x))
elif unit == 'median':
return lim * np.nanmedian(np.abs(x))
elif unit == 'mean':
return lim * np.nanmean(np.abs(x))
else:
raise ValueError("Unknown unit %s"%unit) | 5,357,820 |
def load_fits(name):
""" Open a fits file image
Inputs:
name: name of the .fits file (str).
Output:
image:
"""
while True:
try:
file = fits.open(name)
image = file.copy()
return image, name
except FileNotFoundError:
print(f"File {name} not found")
name = input('Please enter a different file name: ') | 5,357,821 |
def run_flow(command, contents):
"""Run Flow command on a given contents."""
read, write = os.pipe()
os.write(write, str.encode(contents))
os.close(write)
try:
output = subprocess.check_output(
command, stderr=subprocess.STDOUT, stdin=read
)
decoded_output = output.decode("utf-8")
clean_output = decoded_output[decoded_output.find('{"') :]
result = json.loads(clean_output)
os.close(read)
return result
except subprocess.CalledProcessError as err:
raise err | 5,357,822 |
async def __fetch_img_data(session: ClientSession, tile_data, image_tiles) -> None:
"""Get a cv2 image from a URL and insert it into the full-size image."""
start_y = tile_data['y']
end_y = start_y + tile_data['height']
start_x = tile_data['x']
end_x = start_x + tile_data['width']
async with session.get(tile_data['url']) as response:
image_data = np.asarray(bytearray(await response.read()), dtype="uint8")
image_tiles[start_y:end_y, start_x:end_x] = cv2.imdecode(image_data, cv2.IMREAD_COLOR) | 5,357,823 |
def cleanup():
"""Clean up resoruces in use by implementation.
Clean up any resources that have been allocated by the RPC implementation.
This is typically open connections to a messaging service. This function
would get called before an application using this API exits to allow
connections to get torn down cleanly.
:returns: None
"""
return _get_impl().cleanup() | 5,357,824 |
def plot_part1(avg_face, face_hog):
"""plot average face and hog representatitons of face."""
plt.subplot(1, 2, 1)
plt.imshow(avg_face)
plt.axis('off')
plt.title('average face image')
plt.subplot(1, 2, 2)
plt.imshow(face_hog)
plt.title('hog representation of face')
plt.axis('off')
plt.show() | 5,357,825 |
def social_bonus_count(user, count):
"""Returns True if the number of social bonus the user received equals to count."""
return user.actionmember_set.filter(social_bonus_awarded=True).count() >= count | 5,357,826 |
def del_category_tag_lib(self,c_uuid,t_uuid):
"""04删除便签或分类"""
if c_uuid:
category = Category.by_uuid(c_uuid)
if category is None:
flash(self, '分类不存在', 'error')
return {'status':False}
if category.articles:
flash(self,'分类下面有文章,请先删除文章','error')
return {'status': False}
self.db.delete(category)
self.db.commit()
flash(self, '分类删除成功', 'success')
return {'status':True}
if t_uuid:
tag = Tag.by_uuid(t_uuid)
if tag is None:
flash(self, '标签不存在', 'error')
return {'status':False}
if tag.articles:
flash(self, '标签下面有文章,请先删除文章', 'error')
return {'status': False}
self.db.delete(tag)
self.db.commit()
flash(self, '标签删除成功', 'success')
return {'status':True}
flash(self, '请输入标签或分类', 'error')
return {'status': False} | 5,357,827 |
def get_distance_metres(aLocation1, aLocation2):
"""
Returns the ground distance in metres between two `LocationGlobal` or `LocationGlobalRelative` objects.
This method is an approximation, and will not be accurate over large distances and close to the
earth's poles. It comes from the ArduPilot test code:
https://github.com/diydrones/ardupilot/blob/master/Tools/autotest/common.py
"""
dlat = aLocation2.lat - aLocation1.lat
dlong = aLocation2.lon - aLocation1.lon
return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5 | 5,357,828 |
def pd_series_overload(data=None, index=None, dtype=None, name=None, copy=False, fastpath=False):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series
Limitations
-----------
- Parameters ``dtype`` and ``copy`` are currently unsupported.
- Types iterable and dict as ``data`` parameter are currently unsupported.
- Categorical types (i.e. 'category' and ``CategoricalDtype``) are supported in ``dtype``
only if they are provided as constants in jitted code.
Examples
--------
Create Series with data [1, 2, 3] and index ['A', 'B', 'C'].
>>> pd.Series([1, 2, 3], ['A', 'B', 'C'])
Create Series with categorical data:
>>> pd.Series([1, 2, 3], dtype='category')
>>> pd.Series([1, 2, 3], dtype=CategoricalDtype([1, 2, 3]))
.. seealso::
:ref:`DataFrame <pandas.DataFrame>`
DataFrame constructor.
"""
is_index_none = isinstance(index, types.NoneType) or index is None
if is_categoricaldtype(dtype):
return _Series_category(data, index, dtype, name, copy, fastpath)
def hpat_pandas_series_ctor_impl(data=None, index=None, dtype=None, name=None, copy=False, fastpath=False):
'''' use binop here as otherwise Numba's dead branch pruning doesn't work
TODO: replace with 'if not is_index_none' when resolved '''
if is_index_none == False: # noqa
fix_index = sdc.hiframes.api.fix_df_array(index)
else:
fix_index = index
return sdc.hiframes.api.init_series(sdc.hiframes.api.fix_df_array(data), fix_index, name)
return hpat_pandas_series_ctor_impl | 5,357,829 |
def get_export_summary(results):
"""Prints to screen the exporting results of example programs.
Args:
results - results of the compilation stage. which is the output of and export_repos()
Returns: Numbers of failed results
"""
pass_table = PrettyTable()
pass_table.field_names = ["EXAMPLE NAME", "TARGET", "IDE", "EXPORT RESULT", "BUILD RESULT"]
pass_table.align["EXAMPLE NAME"] = "l"
fail_table = copy.deepcopy(pass_table)
failure_counter = 0
for exp, status in list(results.items()):
for summary in status[2]:
pass_table.add_row([summary["name"], summary["target"], summary["ide"], "PASSED", "PASSED"])
for summary in status[3]:
fail_table.add_row([summary["name"], summary["target"], summary["ide"], "FAILED", ""])
failure_counter+=1
for summary in status[4]:
fail_table.add_row([summary["name"], summary["target"], summary["ide"], "PASSED", "FAILED"])
failure_counter+=1
for summary in status[5]:
pass_table.add_row([summary["name"], summary["target"], summary["ide"], "PASSED", "SKIPPED"])
print("\n\nPassed Example Exporting:")
print(pass_table)
if (failure_counter > 0):
print("\n\nFailed Example Exporting:")
print(fail_table)
print("Number of failures = %d" % failure_counter)
return failure_counter | 5,357,830 |
def _misfitfunc(data, predicted):
"""
Calculate the total data misfit function between the observed and predicted
data.
"""
result = 0.
for d, p, in zip(data, predicted):
residuals = d.observed - p
result += sqrt(numpy.dot(d.weights*residuals, residuals))/d.norm
return result | 5,357,831 |
def lammps_calc_rdf(job):
"""Create an rdf from the gsd file using Freud analysis scripts."""
import mbuild as mb
import MDAnalysis as mda
traj = mda.coordinates.XTC.XTCReader("prod.xtc")
top = mda.topology.LAMMPSParser.DATAParser("box.lammps")
u = mda.Universe(top, traj)
u.trajectory.next(-1)
parmed_structure = u.convert_to("PARMED")
mb.formats.gsdwriter.write_gsd(parmed_structure, "prod.gsd")
# TODO: Use freud rdf PR to create an RDF from the gsd file
return | 5,357,832 |
def stream_doi(app, doi):
"""Returns tuple of URL string and a urlopen() return value."""
apikey = app.cfg.get_or_die('api-keys', 'crossref')
url = ('http://crossref.org/openurl/?id=%s&noredirect=true&pid=%s&'
'format=unixref' % (wu.urlquote(doi), wu.urlquote(apikey)))
return url, wu.urlopen(url) | 5,357,833 |
def system_resource_repo(class_ini, class_configurator): # redefining ini, class_configurator pylint: disable=W0621
"""
Like `resource_repo`, but yields the system repository instead of the
default repository.
"""
with ResourceCreatorContextManager(class_ini, class_configurator,
repo_name=REPOSITORY_DOMAINS.SYSTEM) \
as repo:
yield repo | 5,357,834 |
def do_evaluation(
*,
input_path,
training_path: Optional[str] = None,
testing_path: Optional[str] = None,
method,
prediction_task,
dimensions: int = 300,
number_walks: int = 8,
walk_length: int = 8,
window_size: int = 4,
p: float = 1.5,
q: float = 2.1,
alpha: float = 0.1,
beta: float = 4,
epochs: int = 5,
kstep: int = 4,
order: int = 3,
embeddings_path: Optional[str] = None,
predictive_model_path: Optional[str] = None,
training_model_path: Optional[str] = None,
evaluation_file: Optional[str] = None,
classifier_type: Optional[str] = None,
weighted: bool = False,
labels_file: Optional[str] = None,
):
"""Train and evaluate an NRL model."""
if prediction_task == 'link_prediction':
node_list = None
labels = None
graph, graph_train, testing_pos_edges, train_graph_filename = create_graphs(
input_path=input_path,
training_path=training_path,
testing_path=testing_path,
weighted=weighted,
)
else:
if not labels_file:
raise ValueError("No input label file. Exit.")
node_list, labels = read_node_labels(labels_file)
train_graph_filename = input_path
graph, graph_train, testing_pos_edges = None, None, None
model = embedding_training(
train_graph_filename=train_graph_filename,
method=method,
dimensions=dimensions,
number_walks=number_walks,
walk_length=walk_length,
window_size=window_size,
p=p,
q=q,
alpha=alpha,
beta=beta,
epochs=epochs,
kstep=kstep,
order=order,
weighted=weighted,
)
if training_model_path is not None:
model.save_model(training_model_path)
if embeddings_path is not None:
model.save_embeddings(embeddings_path)
if method == 'LINE':
embeddings = model.get_embeddings_train()
else:
embeddings = model.get_embeddings()
_results = dict(
input=input_path,
method=method,
dimension=dimensions,
user=getpass.getuser(),
date=datetime.datetime.now().strftime('%Y-%m-%d-%H%M%S'),
)
if prediction_task == 'link_prediction':
auc_roc, auc_pr, accuracy, f1, mcc = do_link_prediction(
embeddings=embeddings,
original_graph=graph,
train_graph=graph_train,
test_pos_edges=testing_pos_edges,
save_model=predictive_model_path,
classifier_type=classifier_type,
)
_results['results'] = dict(
auc_roc=auc_roc,
auc_pr=auc_pr,
accuracy=accuracy,
f1=f1,
mcc=mcc,
)
else:
accuracy, macro_f1, micro_f1, mcc = do_node_classification(
embeddings=embeddings,
node_list=node_list,
labels=labels,
save_model=predictive_model_path,
classifier_type=classifier_type,
)
_results['results'] = dict(
accuracy=accuracy,
macro_f1=macro_f1,
micro_f1=micro_f1,
mcc=mcc,
)
if evaluation_file is not None:
json.dump(_results, evaluation_file, sort_keys=True, indent=2)
return _results | 5,357,835 |
def test_structure_fatal_deformities(good_structure, deformity):
"""Make specific checks upon performing single invalidating deformations
of the data of a good structure.
"""
import re
if deformity is None:
return StructureResource(**good_structure)
deformity, message = deformity
good_structure["attributes"].update(deformity)
with pytest.raises(ValidationError, match=fr".*{re.escape(message)}.*"):
StructureResource(**good_structure) | 5,357,836 |
def displayTCP(tcp):
"""Display the TCP header."""
print "[TCP Header]"
print "\t Source Port: " + str(tcp.sport)
print "\t Destination Port: " + str(tcp.dport)
print "\t Sequence Number: " + str(tcp.seq)
print "\t Acknowledgment Number: " + str(tcp.ack)
print "\t Data Offset: " + str(tcp.dataofs)
print "\t Reserved: " + str(tcp.reserved)
print "\t Flags: " + tcp.underlayer.sprintf("%TCP.flags%")
print "\t Window Size: " + str(tcp.window)
print "\t Checksum: " + str(tcp.chksum)
if (tcp.flags & URG):
print "\t Urgent Pointer: " + str(tcp.window)
if (tcp.dataofs > 5):
print "\t Options: " + str(tcp.options) | 5,357,837 |
def edit_user():
""" 返回待编辑用户信息 """
data = request.json
user_id = data.get('id')
_edit = User.query.filter_by(id=user_id).first()
_data = {'account': _edit.account, 'name': _edit.name, 'role_id': _edit.role_id}
return jsonify({'data': _data, 'status': 1}) | 5,357,838 |
def fast_infer(test_data, trg_idx2word):
"""
Inference by beam search decoder based solely on Fluid operators.
"""
place = fluid.CUDAPlace(0) if InferTaskConfig.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
out_ids, out_scores = fast_decoder(
ModelHyperParams.src_vocab_size, ModelHyperParams.trg_vocab_size,
ModelHyperParams.max_length + 1, ModelHyperParams.n_layer,
ModelHyperParams.n_head, ModelHyperParams.d_key,
ModelHyperParams.d_value, ModelHyperParams.d_model,
ModelHyperParams.d_inner_hid, ModelHyperParams.prepostprocess_dropout,
ModelHyperParams.attention_dropout, ModelHyperParams.relu_dropout,
ModelHyperParams.preprocess_cmd, ModelHyperParams.postprocess_cmd,
ModelHyperParams.weight_sharing, InferTaskConfig.beam_size,
InferTaskConfig.max_out_len, ModelHyperParams.eos_idx)
fluid.io.load_vars(
exe,
InferTaskConfig.model_path,
vars=[
var for var in fluid.default_main_program().list_vars()
if isinstance(var, fluid.framework.Parameter)
])
# This is used here to set dropout to the test mode.
infer_program = fluid.default_main_program().clone(for_test=True)
for batch_id, data in enumerate(test_data.batch_generator()):
data_input = prepare_batch_input(
data, encoder_data_input_fields + fast_decoder_data_input_fields,
ModelHyperParams.eos_idx, ModelHyperParams.bos_idx,
ModelHyperParams.n_head, ModelHyperParams.d_model, place)
seq_ids, seq_scores = exe.run(infer_program,
feed=data_input,
fetch_list=[out_ids, out_scores],
return_numpy=False)
# How to parse the results:
# Suppose the lod of seq_ids is:
# [[0, 3, 6], [0, 12, 24, 40, 54, 67, 82]]
# then from lod[0]:
# there are 2 source sentences, beam width is 3.
# from lod[1]:
# the first source sentence has 3 hyps; the lengths are 12, 12, 16
# the second source sentence has 3 hyps; the lengths are 14, 13, 15
hyps = [[] for i in range(len(data))]
scores = [[] for i in range(len(data))]
for i in range(len(seq_ids.lod()[0]) - 1): # for each source sentence
start = seq_ids.lod()[0][i]
end = seq_ids.lod()[0][i + 1]
for j in range(end - start): # for each candidate
sub_start = seq_ids.lod()[1][start + j]
sub_end = seq_ids.lod()[1][start + j + 1]
hyps[i].append(" ".join([
trg_idx2word[idx]
for idx in post_process_seq(
np.array(seq_ids)[sub_start:sub_end])
]))
scores[i].append(np.array(seq_scores)[sub_end - 1])
print(hyps[i][-1])
if len(hyps[i]) >= InferTaskConfig.n_best:
break | 5,357,839 |
def transactions(request):
"""See all transactions that have been contained in blocks."""
vote_list = Vote.objects.all().order_by('timestamp')
paginator = Paginator(vote_list, 100, orphans=20, allow_empty_first_page=True)
page = request.GET.get('page')
votes = paginator.get_page(page)
hashes = [SHA3_256.new(str(v).encode('utf-8')).hexdigest() for v in votes]
# This happens if you don't use foreign key
block_hashes = []
for i in range(0, len(votes)):
try:
b = Block.objects.get(id=votes[i].block_id)
h = b.h
except:
h = 404
block_hashes.append(h)
# zip the three iters
votes_pg = votes # for pagination
votes = zip(votes, hashes, block_hashes)
# Calculate the voting result of 3 cands, the ugly way
result = []
for i in range(0, 3):
try:
r = Vote.objects.filter(vote=i+1).count()
except:
r = 0
result.append(r)
context = {
'votes': votes,
'result': result,
'votes_pg': votes_pg,
}
return render(request, 'simulation/transactions.html', context) | 5,357,840 |
def evaluate_all_models():
"""Evalate all models via cross validation."""
# retrieve data from model
x = get_feature_matrix()
y = get_labels()
# evaluate models
try:
for name in classifiers.keys():
evaluate_model(name, classifiers[name], x, y)
evaluate_model('VotingClassifier', vc, x, y)
except:
print("\nError at model evaluation: \n%s" % sys.exc_info()[1]) | 5,357,841 |
def adjust_images():
"""Adjust image colors."""
albums = get_albums()
for album in albums:
path = ADJUSTED_DIR / album[0]['album']
path.mkdir(exist_ok=True)
with multiprocessing.Pool(processes=PROCESSES) as pool:
results = [pool.apply_async(adjust_album, (a,)) for a in albums]
results = [r.get() for r in results]
found = sum(r for r in results)
total = sum(len(a) for a in albums)
print(f'Adjusted {found} / {total}') | 5,357,842 |
def count(pred_tokens, gold_tokens, ngram, result):
"""
count
"""
cover_count, total_count = result
pred_dict = get_dict(pred_tokens, ngram)
gold_dict = get_dict(gold_tokens, ngram)
cur_cover_count = 0
cur_total_count = 0
for token, freq in pred_dict.items():
if gold_dict.get(token) is not None:
gold_freq = gold_dict[token]
cur_cover_count += min(freq, gold_freq)
cur_total_count += freq
result[0] += cur_cover_count
result[1] += cur_total_count | 5,357,843 |
def elections_vote_places_geo(source="xd", folder=".", fLOG=noLOG):
"""
Retrieves data vote places (bureaux de vote in French)
with geocodes.
@param source should be None unless you want to use the backup plan ("xd")
@param folder where to download
@param fLOG logging function
@return list of dataframe
"""
if source is None:
raise NotImplementedError("use source='xd'")
url = source
file = "bureauxvotegeo.zip"
data = download_data(file, website=url, whereTo=folder, fLOG=fLOG)
for d in data:
if d.endswith(".txt"):
df = pandas.read_csv(d, sep="\t", encoding="utf-8")
return df
raise DataNotAvailableError(
"Unable to find any csv file in '{0}'".format(file)) | 5,357,844 |
def erfc(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the :math:`1-erf(x)`, for more details of `erf` function
please refer to `math.erf`.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def erfc_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.erfc(x)
x = np.array([1, 2, 3]).astype(np.float32)
out = erfc_Job(x)
# out [1.5729921e-01 4.6777353e-03 2.2090495e-05]
"""
return build_unary_elemwise_math_op("erfc", x, name) | 5,357,845 |
def ner_manual_tokenizers_bert(
dataset: str,
source: Union[str, Iterable[dict]],
loader: Optional[str] = None,
label: Optional[List[str]] = None,
tokenizer_vocab: Optional[str] = None,
lowercase: bool = False,
hide_special: bool = False,
hide_wp_prefix: bool = False,
) -> Dict[str, Any]:
"""Example recipe that shows how to use model-specific tokenizers like the
BERT word piece tokenizer to preprocess your incoming text for fast and
efficient NER annotation and to make sure that all annotations you collect
always map to tokens and can be used to train and fine-tune your model
(even if the tokenization isn't that intuitive, because word pieces). The
selection automatically snaps to the token boundaries and you can double-click
single tokens to select them.
Setting "honor_token_whitespace": true will ensure that whitespace between
tokens is only shown if whitespace is present in the original text. This
keeps the text readable.
Requires Prodigy v1.10+ and usese the HuggingFace tokenizers library."""
stream = get_stream(source, loader=loader, input_key="text")
# You can replace this with other tokenizers if needed
tokenizer = BertWordPieceTokenizer(tokenizer_vocab, lowercase=lowercase)
sep_token = tokenizer._parameters.get("sep_token")
cls_token = tokenizer._parameters.get("cls_token")
special_tokens = (sep_token, cls_token)
wp_prefix = tokenizer._parameters.get("wordpieces_prefix")
def add_tokens(stream):
for eg in stream:
tokens = tokenizer.encode(eg["text"])
eg_tokens = []
idx = 0
for (text, (start, end), tid) in zip(
tokens.tokens, tokens.offsets, tokens.ids
):
# If we don't want to see special tokens, don't add them
if hide_special and text in special_tokens:
continue
# If we want to strip out word piece prefix, remove it from text
if hide_wp_prefix and wp_prefix is not None:
if text.startswith(wp_prefix):
text = text[len(wp_prefix) :]
token = {
"text": text,
"id": idx,
"start": start,
"end": end,
# This is the encoded ID returned by the tokenizer
"tokenizer_id": tid,
# Don't allow selecting spacial SEP/CLS tokens
"disabled": text in special_tokens,
}
eg_tokens.append(token)
idx += 1
for i, token in enumerate(eg_tokens):
# If the next start offset != the current end offset, we
# assume there's whitespace in between
if i < len(eg_tokens) - 1 and token["text"] not in special_tokens:
next_token = eg_tokens[i + 1]
token["ws"] = (
next_token["start"] > token["end"]
or next_token["text"] in special_tokens
)
else:
token["ws"] = True
eg["tokens"] = eg_tokens
yield eg
stream = add_tokens(stream)
return {
"dataset": dataset,
"stream": stream,
"view_id": "ner_manual",
"config": {
"honor_token_whitespace": True,
"labels": label,
"exclude_by": "input",
"force_stream_order": True,
},
} | 5,357,846 |
def print_image(image, position, scale=1):
"""
Affiche une image à une position donnée.
Parameters
----------
image : string
Le chemin vers l'image dans les fichiers
position : int * int
Les coordonnées de l'image
scale : int
Le facteur d'échelle
"""
img = ut.load_image(image)
w, h = img.get_rect().size
img = ut.resize(img, (w * scale, h * scale))
cf.DISPLAYSURF.blit(img, position) | 5,357,847 |
def test_nested_simple_condition() -> None:
"""
Iterates and maps expressions over a complex Condition:
(A=B OR A=B) AND (A=B OR A=B)
"""
c1 = Column(None, "t1", "c1")
c2 = Column(None, "t1", "c2")
co1 = binary_condition(None, ConditionFunctions.EQ, c1, c2)
c3 = Column(None, "t1", "c1")
c4 = Column(None, "t1", "c2")
co2 = binary_condition(None, ConditionFunctions.EQ, c3, c4)
or1 = binary_condition(None, BooleanFunctions.OR, co1, co2)
c5 = Column(None, "t1", "c1")
c6 = Column(None, "t1", "c2")
co4 = binary_condition(None, ConditionFunctions.EQ, c5, c6)
c7 = Column(None, "t1", "c1")
c8 = Column(None, "t1", "c2")
co5 = binary_condition(None, ConditionFunctions.EQ, c7, c8)
or2 = binary_condition(None, BooleanFunctions.OR, co4, co5)
and1 = binary_condition(None, BooleanFunctions.AND, or1, or2)
ret = list(and1)
expected = [c1, c2, co1, c3, c4, co2, or1, c5, c6, co4, c7, c8, co5, or2, and1]
assert ret == expected
cX = Column(None, "t1", "cX")
co1_b = binary_condition(None, ConditionFunctions.EQ, c1, cX)
co2_b = binary_condition(None, ConditionFunctions.EQ, c3, cX)
or1_b = binary_condition(None, BooleanFunctions.OR, co1_b, co2_b)
co4_b = binary_condition(None, ConditionFunctions.EQ, c5, cX)
co5_b = binary_condition(None, ConditionFunctions.EQ, c7, cX)
or2_b = binary_condition(None, BooleanFunctions.OR, co4_b, co5_b)
and1_b = binary_condition(None, BooleanFunctions.AND, or1_b, or2_b)
def replace_col(e: Expression) -> Expression:
if isinstance(e, Column) and e.column_name == "c2":
return cX
return e
and1 = and1.transform(replace_col)
ret = list(and1)
expected = [
c1,
cX,
co1_b,
c3,
cX,
co2_b,
or1_b,
c5,
cX,
co4_b,
c7,
cX,
co5_b,
or2_b,
and1_b,
]
assert ret == expected | 5,357,848 |
def test_buy_one_investor(chain: TesterChain, web3: Web3, ico: Contract, uncapped_token: Contract, customer: str, preico_token_price, preico_starts_at, team_multisig):
"""Can buy when crowdsale is running."""
original_balance = web3.eth.getBalance(team_multisig)
wei_value = to_wei(1, "ether")
buys_tokens = wei_value // preico_token_price
assert buys_tokens > 0
time_travel(chain, preico_starts_at + 1)
assert ico.functions.getState().call() == CrowdsaleState.Funding
assert ico.functions.investorCount().call() == 0
assert ico.functions.investedAmountOf(customer).call() == 0
ico.functions.buy().transact({"from": customer, "value": wei_value})
#
# See everything was correctly credited
#
# Tokens on every account
assert uncapped_token.functions.balanceOf(customer).call() == buys_tokens
assert uncapped_token.functions.totalSupply().call() == buys_tokens
assert ico.functions.tokensSold().call() == buys_tokens
assert ico.functions.investorCount().call() == 1
# Ether on every account
assert ico.functions.weiRaised().call() == wei_value
assert ico.functions.investedAmountOf(customer).call() == wei_value
balance_diff = web3.eth.getBalance(team_multisig) - original_balance
assert balance_diff == wei_value
# Investors
assert ico.functions.investorCount().call() == 1
#
# Events
#
# Crowdsale
events = ico.events.Invested().createFilter(fromBlock=0).get_all_entries()
assert len(events) == 1
e = events[0]
assert e["args"]["investor"] == customer
assert e["args"]["weiAmount"] == wei_value
assert e["args"]["tokenAmount"] == buys_tokens
# ERC-20
events = uncapped_token.events.Transfer().createFilter(fromBlock=0).get_all_entries()
assert len(events) == 1
e = events[0]
assert e["args"]["from"] == "0x0000000000000000000000000000000000000000"
assert e["args"]["to"] == customer
assert e["args"]["value"] == buys_tokens | 5,357,849 |
def draw_list(draw_info, color_positions={}, clear_bg=False):
"""
Find it's height width and then five them diffrent shades to see them properly
"""
lst = draw_info.lst
if clear_bg:
clear_rect = (draw_info.SIDE_PAD//2, draw_info.TOP_PAD,
draw_info.width - draw_info.SIDE_PAD, draw_info.height - draw_info.TOP_PAD)
pygame.draw.rect(draw_info.window,
draw_info.BACKGROUND_COLOR, clear_rect)
for i, val in enumerate(lst):
x = draw_info.start_x + i * draw_info.block_width
y = draw_info.height - \
(val - draw_info.min_val) * draw_info.block_height
color = draw_info.GRADIENTS[i % 3]
if i in color_positions:
color = color_positions[i]
pygame.draw.rect(draw_info.window, color,
(x, y, draw_info.block_width, draw_info.height))
if clear_bg:
pygame.display.update() | 5,357,850 |
def print_file_cats(lines, category, categorizations):
"""
Debug printing
:param lines:
:param category:
:return:
"""
print(category)
for line_number, line in enumerate(lines):
stripped_line = line.strip("\n")
# Wow if I had assigned a doc string to the variable then I would have had a comment categorization...
code = "print(str(line_number) + \" \" + str(categorizations[line_number].%s) + \" \" + stripped_line)" % (category)
exec(code) | 5,357,851 |
def get_file_manager(ext=None, callback=None):
"""Get file manager.
Context manager to temporarily set `ext` and `callback` of file_manager.
"""
app = MDApp.get_running_app()
if getattr(app, "file_manager") is None:
app.file_manager = MDFileManager()
file_manager = app.file_manager
try:
file_manager.close()
except AttributeError:
pass
file_manager.ext, ext = ext, app.file_manager.ext
file_manager.select_path = partial(
close_and_callback,
file_manager=file_manager,
callback=callback,
old_callback=file_manager.select_path,
)
try:
yield file_manager
finally:
file_manager.ext, ext = ext, app.file_manager.ext | 5,357,852 |
def _deep_setattr(obj, key, val):
"""
Set an attribute `key` on the object. If any of the prefix attributes do
not exist, they are set to :class:`~pyro.nn.PyroModule`.
"""
def _getattr(obj, attr):
obj_next = getattr(obj, attr, None)
if obj_next is not None:
return obj_next
setattr(obj, attr, PyroModule())
return getattr(obj, attr)
lpart, _, rpart = key.rpartition(".")
# Recursive getattr while setting any prefix attributes to PyroModule
if lpart:
obj = functools.reduce(_getattr, [obj] + lpart.split("."))
setattr(obj, rpart, val) | 5,357,853 |
def rename_columns(data_dict: dict, file_out: str):
"""
Given a GRASP input excel file, renames the columns and index names, so that these are standardize and don't cause
problems with functions in this package.
If the number of columns in the given excel file sheet is less than it should be, it adds columns filled with 0s.
Args:
data_dict: dictionary representing GRASP input excel file.
file_out: path to excel input file to be outputed.
Returns:
None
"""
sheet_column_names = {'mets': ['Metabolite name', 'balanced?'],
'rxns': ['reaction name', 'transport reaction?', 'isoenzymes'],
'thermoRxns': ['∆Gr\'_min (kJ/mol)', '∆Gr\'_max (kJ/mol)'],
'thermoMets': ['min (M)', 'max (M)'],
'measRates': ['vref_mean (mmol/L/h)', 'vref_std (mmol/L/h)'],
'protData': ['lower_bound', 'mean', 'upper_bound'],
'metsData': ['lower_bound', 'mean', 'upper_bound'],
'kinetics1': ['kinetic mechanism', 'substrate order', 'product order', 'promiscuous',
'inhibitors', 'activators', 'negative effectors', 'positive effectors',
'allosteric', 'subunits', 'mechanism_refs_type', 'mechanism_refs',
'inhibitors_refs_type', 'inhibitors_refs', 'activators_refs_type',
'activators_refs', 'negative_effectors_refs_type', 'negative_effectors_refs',
'positive_effectors_refs_type', 'positive_effectors_refs', 'subunits_refs_type',
'subunits_refs', 'comments']}
sheet_index_name = {'stoic': 'rxn ID',
'mets': 'metabolite ID',
'rxns': 'reaction ID',
'splitRatios': 'reaction ID',
'poolConst': 'metabolite ID',
'thermo_ineq_constraints': 'metabolite ID',
'thermoRxns': 'reaction ID',
'thermoMets': 'metabolite ID',
'measRates': 'reaction ID',
'protData': 'reaction/enzyme ID',
'metsData': 'metabolite ID',
'kinetics1': 'reaction ID'}
writer = pd.ExcelWriter(file_out, engine='xlsxwriter')
for sheet in data_dict.keys():
if sheet in sheet_column_names:
if len(data_dict[sheet].columns) < len(sheet_column_names[sheet]):
for col_i in range(len(data_dict[sheet].columns), len(sheet_column_names[sheet])):
data_dict[sheet][col_i] = np.zeros([len(data_dict[sheet].index), 1])
elif len(data_dict[sheet].columns) > len(sheet_column_names[sheet]):
for col_i in range(len(sheet_column_names[sheet]), len(data_dict[sheet].columns)):
sheet_column_names[sheet].append(data_dict[sheet].columns[col_i])
data_dict[sheet].columns = sheet_column_names[sheet]
if sheet in sheet_index_name:
data_dict[sheet].index.name = sheet_index_name[sheet]
data_dict[sheet].to_excel(writer, sheet_name=sheet)
writer.save() | 5,357,854 |
def sunnynet_train_model(train_path, save_folder, save_file, model_type='SunnyNet_3x3',
loss_function='MSELoss', alpha=1e-3, cuda=True):
"""
Trains a SunnyNet neural network model to be used to predict non-LTE populations.
Needs a "train" file prepared with build_training_set(). Common options can
be entered as keywords. More advanced options can be edited on the dictionary
'config' below.
Parameters
----------
train_path : str
Filename of saved training data, obtained after running
build_training_set() from populations from a 3D atmosphere. The
format is HDF5.
save_folder : str
Folder where to place the output files.
save_file : str
Name of output file where to save the file. Usually has .pt extension.
model_type : str, optional
Type of network to use. The available types are the names of
SunnyNet_* classes in networkUtilities/modelArchitectures.py.
Currently supported networks are:
- 'SunnyNet_1x1' : 6 levels, 400 depth points, 1x1 window size
- 'SunnyNet_3x3' : 6 levels, 400 depth points, 3x3 window size
- 'SunnyNet_5x5' : 6 levels, 400 depth points, 5x5 window size
- 'SunnyNet_7x7' : 6 levels, 400 depth points, 7x7 window size
Should be consistent with choice of channels and ndep.
loss_function : str, optional
Type of loss function to use. Could be a class name of pytorch
loss functions (e.g. 'MSELoss', the default), or a class name
from networkUtils/lossFunctions.py.
alpha : float or None, optional
Weight in loss calculation between mass conservation and cell by
cell error. Default is 0.2. To switch off entirely set to None.
cuda : bool, optional
Whether to use GPU acceleration through CUDA (default True).
"""
if os.path.exists(os.path.join(save_folder, save_file)):
raise IOError("Output file already exists, refusing to overwrite.")
if not os.path.isdir(save_folder):
os.mkdir(save_folder)
train_size, test_size, channels, ndep, pad = read_train_params(train_path)
if not check_model_compat(model_type, pad, channels):
raise ValueError(f"Incompatible sizes between model {model_type} "
f"and training set (pad={pad}, channels={channels})")
params = {
'model': model_type, # pick one from networkUtilities/modelArchitectures.py
# only works with Adam right now,
# can add others from torch.optim to networkUtils/modelWrapper.py:
'optimizer': 'Adam',
'loss_fxn': loss_function,
'learn_rate': 1e-3,
'channels': channels,
'features': ndep,
'cuda': {'use_cuda': cuda, 'multi_gpu': False},
'mode': 'training'
}
# training configuration
config = {
'data_path': train_path,
'save_folder': save_folder,
'model_save': save_file,
'early_stopping': 5, # iterations without lower loss before breaking training loop
'num_epochs': 50, # training epochs
'train_size': train_size, # manually calculate from your train / test split
'batch_size_train': 128,
'val_size': test_size, # manually calculate from your train / test split
'batch_size_val': 128,
'num_workers': 64, # CPU threads
'alpha': alpha # weight in loss calc. between mass conservation and cell by cell error
}
print('Python VERSION:', sys.version)
print('pyTorch VERSION:', torch.__version__)
print('CUDA VERSION: ', torch.version.cuda)
print(f'CUDA available: {torch.cuda.is_available()}')
if torch.cuda.is_available():
print('GPU name:', torch.cuda.get_device_name())
print(f'Number of GPUS: {torch.cuda.device_count()}')
print(f"Using {params['model']} architecture...")
print('Creating dataset...')
tr_data = PopulationDataset3d(config['data_path'], train=True)
params['height_vector'] = tr_data.z
val_data = PopulationDataset3d(config['data_path'], train=False)
print('Creating data loaders...')
loader_dict = {}
train_loader = DataLoader(
tr_data,
batch_size = config['batch_size_train'],
pin_memory = True,
num_workers = config['num_workers']
)
val_loader = DataLoader(
val_data,
batch_size = config['batch_size_val'],
pin_memory = True,
num_workers = config['num_workers']
)
loader_dict['train'] = train_loader
loader_dict['val'] = val_loader
h_model = Model(params)
epoch_loss = train(config, h_model, loader_dict)
# save epoch losses for plotting
with open(f"{config['save_folder']}{config['model_save'][:-3]}_loss.txt", "w") as f:
for i in range(len(epoch_loss['train'])):
f.write(str(epoch_loss['train'][i]) + ' ' + str(epoch_loss['val'][i]) + '\n') | 5,357,855 |
def icon16(self):
"""
TOWRITE
"""
qDebug("icon16()")
self.iconResize(16) | 5,357,856 |
def inverse_max_dcg(labels,
gain_fn=lambda labels: tf.pow(2.0, labels) - 1.,
rank_discount_fn=lambda rank: 1. / tf.math.log1p(rank),
topn=None):
"""Computes the inverse of max DCG.
Args:
labels: A `Tensor` with shape [batch_size, list_size]. Each value is the
graded relevance of the corresponding item.
gain_fn: A gain function. By default this is set to: 2^label - 1.
rank_discount_fn: A discount function. By default this is set to:
1/log(1+rank).
topn: An integer as the cutoff of examples in the sorted list.
Returns:
A `Tensor` with shape [batch_size, 1].
"""
ideal_sorted_labels, = sort_by_scores(labels, [labels], topn=topn)
rank = tf.range(tf.shape(input=ideal_sorted_labels)[1]) + 1
discounted_gain = gain_fn(ideal_sorted_labels) * rank_discount_fn(
tf.cast(rank, dtype=tf.float32))
discounted_gain = tf.reduce_sum(
input_tensor=discounted_gain, axis=1, keepdims=True)
return tf.compat.v1.where(
tf.greater(discounted_gain, 0.), 1. / discounted_gain,
tf.zeros_like(discounted_gain)) | 5,357,857 |
def get(url):
"""
用 GET 请求 url 并返回响应,对301进行了处理
:param url:
:return:status_code, headers, body
"""
protocol, host, port, path = parsed_url(url)
s = socket_by_protocol(protocol)
s.connect((host, port))
request = 'GET {} HTTP/1.1\r\nhost: {}\r\nConnection: close\r\n\r\n'.format(path, host)
encoding = 'utf-8'
s.send(request.encode(encoding))
response = response_by_socket(s)
r = response.decode(encoding)
status_code, headers, body = parsed_response(r)
if status_code == 301:
url = headers['Location']
return get(url)
else:
return status_code, headers, body | 5,357,858 |
def make_rule(frontier_pair, amr, tree, align, next_index):
"""
Creates a new rule with the given parts, and collapses these parts in the
original graph and tree.
"""
constituent, amr_fragment = frontier_pair
outside_edges = [e for e in amr.triples() if e not in amr_fragment.triples()]
root_label = amr_fragment.root_edges()[0][1]
if isinstance(root_label, NonterminalLabel):
symbol = root_label.label
m = re.match(r'(.+)_(.+)_(\d+)', symbol)
role = m.group(1)
else:
if ':' in root_label:
role, concept = root_label.split(':')
else:
role = root_label
external_nodes = amr.find_external_nodes(amr_fragment)
if len(external_nodes) == 0:
external_nodes = [amr_fragment.find_leaves()[0]]
# WARNING: destructive. Unfortunately we can't make the change any earlier.
# TODO why?
amr_fragment.external_nodes = external_nodes
symbol = '%s_%s_%d' % (role, constituent.node, len(external_nodes))
label = NonterminalLabel(symbol, next_index)
new_triple = (amr_fragment.roots[0], label, tuple(external_nodes))
new_amr = amr.collapse_fragment(amr_fragment, label)
assert new_triple in new_amr.triples()
new_tree = collapse_constituent(tree, constituent, label)
new_alignments = collapse_alignments(align, amr_fragment, new_triple)
rule = Rule(0, symbol, 1, amr_fragment, constituent, original_index =
next_index)
return rule, new_amr, new_tree, new_alignments, next_index+1 | 5,357,859 |
def NotEqual(data1, data2, target=utils.CCE):
"""
check whether data1 notequals to data2.
Args:
data1 (tvm.tensor.Tensor): Tensor.
data2 (tvm.tensor.Tensor): Tensor.
Returns:
tvm.tensor.Tensor. If data1 notequal to data2 return True, else return False.
Supported Platforms:
'Ascend', 'GPU', 'CPU'
"""
utils.check_supported_target(target)
if target == utils.CCE:
return _not_equal_ascend(data1, data2)
else:
return _not_equal(data1, data2) | 5,357,860 |
def read_labels(labels_path):
"""Reads list of labels from a file"""
with open(labels_path, 'rb') as f:
return [w.strip() for w in f.readlines()] | 5,357,861 |
def test_double_binding_raises(binder_and_pool: Tuple[FunctionBinder, MockConnectionPool]):
"""Tests that binding a function more than once results in an error."""
binder, _ = binder_and_pool
match = "has already been bounded by"
with pytest.raises(FunctionAlreadyBoundError, match=match):
@binder.execute("UPDATE table SET col = #{arg1}")
@binder.execute("INSERT INTO TABLE (#{arg1})")
def should_raise_1(arg1: str):
pass # pragma: no cover
with pytest.raises(FunctionAlreadyBoundError, match=match):
@binder.execute("UPDATE table SET col = #{arg1}")
@binder.query("SELECT * FROM table WHERE col = #{arg1})")
def should_raise_2(arg1: str):
pass # pragma: no cover
with pytest.raises(FunctionAlreadyBoundError, match=match):
@binder.execute("UPDATE table SET col = #{arg1}")
@binder.transaction()
def should_raise_3(arg1: str):
pass | 5,357,862 |
def ensure_hours_unique(sender, instance, raw, using, update_fields, **kwargs):
"""Some DB's don't consider multiple rows which contain the same columns
and include null to violate unique contraints so we do our own check"""
if instance.id is None:
try:
Hours.objects.get(service_event=instance.service_event, third_party=instance.third_party, user=instance.user)
except Hours.DoesNotExist:
pass
else:
# not a unique Hours object
raise IntegrityError | 5,357,863 |
def targeted_neurogenesis(weights, n_replace, targeted_portion, is_training):
"""
Takes a weight matrix and applied targetted dropout based on weight
importance (From Gomez et al. 2019; https://for.ai/blog/targeted-dropout/)
Args:
weights - the input by ouput matrix of weights
dropout_rate - float (0,1), the proprotion of targeted neurons to dropout
targeted_portion - the proportion of neurons/weights to consider 'unimportant'
from which dropout_rate targets from
is_training - bool, whether model is training, or being evaluated
"""
# get the input vs output size
weights_shape = weights.shape
# l1-norm of neurons based on input weights to sort by importance
importance = torch.norm(weights, p=1, dim=1)
# chose number of indices to remove of the output neurons
idx = round(targeted_portion * weights_shape[0]) - 1
# when sorting the abs valued weights largest to smallest
# take the index of the targeted portion to get a threshold
importance_threshold = torch.sort(importance)[0][-idx] # TODO -idx
# only weights below threshold will be set to None
unimportance_mask = importance > importance_threshold #TODO > change < regular
# during evaluation, only use important weights, without dropout threshold
if not is_training:
weights = torch.reshape(weights, weights_shape)
return weights
# difference between dropout_rate and unimportance_mask (i.e. threshold)
idx_drop = np.random.choice(np.where(unimportance_mask)[0], size=n_replace, replace=False)
dropout_mask = torch.zeros_like(unimportance_mask)
dropout_mask[idx_drop] = 1
# delete dropped out units
weights = weights[~dropout_mask]
return weights, dropout_mask | 5,357,864 |
def calc_distance_two_points(long_from, lat_from, long_to, lat_to):
"""Calculate distance between two points
Parameters
----------
long_from : float
Longitute coordinate from point
lat_from : float
Latitute coordinate from point
long_to : float
Longitute coordinate to point
lat_to : float
Latitue coordinate to point
Return
------
distance : float
Distance
"""
distance_in_km = haversine(
(long_from, lat_from),
(long_to, lat_to),
miles=False)
return distance_in_km | 5,357,865 |
def modified_precision(reference_max_counts, hypothesis, n):
"""
Calculate modified ngram precision.
The normal precision method may lead to some wrong translations with
high-precision, e.g., the translation, in which a word of reference
repeats several times, has very high precision.
This function only returns the Fraction object that contains the numerator
and denominator necessary to calculate the corpus-level precision.
To calculate the modified precision for a single pair of hypothesis and
references, cast the Fraction object into a float.
The famous "the the the ... " example shows that you can get BLEU precision
by duplicating high frequency words.
>>> reference1 = 'the cat is on the mat'.split()
>>> reference2 = 'there is a cat on the mat'.split()
>>> hypothesis1 = 'the the the the the the the'.split()
>>> references = [reference1, reference2]
>>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS
0.2857...
In the modified n-gram precision, a reference word will be considered
exhausted after a matching hypothesis word is identified, e.g.
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will',
... 'forever', 'heed', 'Party', 'commands']
>>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the',
... 'Party']
>>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hypothesis = 'of the'.split()
>>> references = [reference1, reference2, reference3]
>>> float(modified_precision(references, hypothesis, n=1))
1.0
>>> float(modified_precision(references, hypothesis, n=2))
1.0
An example of a normal machine translation hypothesis:
>>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
... 'forever', 'hearing', 'the', 'activity', 'guidebook',
... 'that', 'party', 'direct']
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will',
... 'forever', 'heed', 'Party', 'commands']
>>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the',
... 'Party']
>>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> references = [reference1, reference2, reference3]
>>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS
0.9444...
>>> float(modified_precision(references, hypothesis2, n=1)) # doctest: +ELLIPSIS
0.5714...
>>> float(modified_precision(references, hypothesis1, n=2)) # doctest: +ELLIPSIS
0.5882352941176471
>>> float(modified_precision(references, hypothesis2, n=2)) # doctest: +ELLIPSIS
0.07692...
:param references: A list of reference translations.
:type references: list(list(str))
:param hypothesis: A hypothesis translation.
:type hypothesis: list(str)
:param n: The ngram order.
:type n: int
:return: BLEU's modified precision for the nth order ngram.
:rtype: Fraction
"""
# Extracts all ngrams in hypothesis
# Set an empty Counter if hypothesis is empty.
counts = Counter(ngrams(hypothesis, n)) if len(hypothesis) >= n else Counter()
# Extract a union of references' counts.
# max_counts = reduce(or_, [Counter(ngrams(ref, n)) for ref in references])
max_counts = reference_max_counts[n - 1]
# Assigns the intersection between hypothesis and references' counts.
clipped_counts = {
ngram: min(count, max_counts.get(ngram, 0)) for ngram, count in counts.items()
}
numerator = sum(clipped_counts.values())
# Ensures that denominator is minimum 1 to avoid ZeroDivisionError.
# Usually this happens when the ngram order is > len(reference).
denominator = max(1, sum(counts.values()))
return Fraction(numerator, denominator, _normalize=False) | 5,357,866 |
def bin_spectrum(bin_width, wavelength, doppler_shift, flux, flux_uncertainty,
final_uncertainty='combine'):
"""
Args:
wavelength:
doppler_shift:
flux:
flux_uncertainty:
Returns:
"""
bw = bin_width
wv = wavelength
ds = doppler_shift
f = flux
u = flux_uncertainty
v_bins = np.arange(min(ds), max(ds) + bw, bw)
binned_data, edges, inds = binned_statistic(ds, [wv, ds, f], bins=v_bins,
statistic='mean')
wv_bin = binned_data[0]
v_bin = binned_data[1]
f_bin = binned_data[2]
# Combine uncertainties assuming Gaussian regime
if final_uncertainty == 'combine':
u_bin, edges, inds = binned_statistic(ds, u ** 2, bins=v_bins,
statistic='sum')
u_count, edges, inds = binned_statistic(ds, u ** 2, bins=v_bins,
statistic='count')
u_bin = u_bin ** 0.5 / u_count ** 0.5
elif final_uncertainty == 'poisson':
confidence_interval = poisson_conf_interval(f_bin)
u_bin = np.mean(confidence_interval, axis=0)
else:
raise ValueError('This final uncertainty type is not implemented.')
return wv_bin, v_bin, f_bin, u_bin | 5,357,867 |
def ppo(env_fn,
# by default, use the neural network mlp we define in core
actor_critic=core.mlp_actor_critic,
ac_kwargs=dict(),
seed=0,
steps_per_epoch=4000,
epochs=50,
gamma=0.99,
clip_ratio=0.2,
pi_lr=3e-4,
vf_lr=1e-3,
train_pi_iters=80,
train_v_iters=80,
lam=0.97,
max_ep_len=1000,
target_kl=0.01,
logger_kwargs=dict(),
save_freq=10):
"""
"Args:
env_fn: A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
actor_critic: A function with takes in placeholder symbols
for state, ``x_ph``, and action ``a_ph``, and returns the main
outputs from the agent's Tensorflow computation graph:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``pi`` (batch, act_dim) | Samples actions from policy given states.
``logp`` (batch,) | Gives log probability according to
| the policy, of taking actions ``a_ph``
| in states ``x_ph``.
``logp_pi`` (batch,) | Gives log probability, according to
| the policy, of the action sampled by ``pi``.
``v`` (batch,) | Gives the value estimate for states
| in ``x_ph``. (Critical: make sure
| to flatten this!)
=========== ================ ======================================" -OpenAI
Okay, quick interruption to OpenAI documentation here.
actor_critic is the function which interfaces with tensorflow. It takes in
``x_ph`` (x placeholder), ie. a representation of the current state, and
``a_ph``, a representation of the some actions. (TODO: document
*what* these actions are).
actor_critic runs these inputs through the tensorflow graph and returns several
pieces of information that are relevant to PPO; these are described above.
Back to OpenAI:
"
ac_kwargs (dict): Any kwargs appropriate for actor_critic function
you provided to PPO.
seed (int): Seed for random number generators.
setps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs of interaction (equivalent to
number of policy updates) to perform.
gamma (float): Discount factor. (Always between 0 and 1.)
clip_ratio (float): Hyperparameter for clipping in the policy objective.
Roughly: how far can the new policy go from the old policy while
still profiting (improving the objective function)? The new policy
can still go farther than the clip_ratio says, but it doesn't help
on the objective anymore. (Usually small, 0.1 to 0.3.)
pi_lr (float): Learning rate for policy optimizer.
vf_lr (float): Learning rate for value function optimizer.
train_pi_iters (int): Maximum number of gradient descent steps to take
on policy loss per epoch. (Early stopping may cause optimizer
to take fewer than this.)
train_v_iters (int): Number of gradient descent steps to take on
value funciton per epoch.
lam (float): Lambda for GAE-Lambda. (Always between 0 and 1,
close to 1).
max_ep_len (int): Maximum length of trajectory / episode / rollout.
target_kl (float): Roughly what KL divergence we think is appropriate
between new and old policies after an update. This will get used
for early stopping. (Usually small, 0.01 or 0.05.)
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function." - OpenAI
"""
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
# modify the seed based on the process so if
# we run this in multiple processes
# simultaneously we don't do the
# exact same thing
seed += 10000 * proc_id()
# set up our random stuff with this seed
tf.set_random_seed(seed)
np.random.seed(seed)
# create the environment
env = env_fn()
obs_dim = env.observation_space.shape
act_dim = env.action_space.shape
# tell the policy (implemented in actor_critic function) what the action space is
ac_kwargs['action_space'] = env.action_space
# "Inputs to computation graph" -OpenAI
# create tensorflow placeholders for observations (x_ph), actions (a_ph),
# advantages (adv_ph), returns (ret_ph), log probabilities
# in the current state of the policy (logp_old_ph)
# (old since this is used compared to the newer version of the policy
# we are creating in the optimization step, comparing to this "old" version)
x_ph, a_ph = core.placeholders_from_spaces(env.observation_space, env.action_space)
adv_ph, ret_ph, logp_old_ph = core.placeholders(None, None, None)
# "Main outputs from computation graph" -OpenAI
# essentially here we fill in the tensorflow graph so we can compute
# the pi, logp, logp_pi, and v tensors based on the
# x_ph and a_ph we created above
pi, logp, logp_pi, v = actor_critic(x_ph, a_ph, **ac_kwargs)
# "Need all placeholders in *this* order later (to zip with data from buffer)" -OpenAI
all_phs = [x_ph, a_ph, adv_ph, ret_ph, logp_old_ph]
# "Every step, get: action, value, and logprob" -OpenAI
# we later feed this list into tf.session.run()
# to tell it to compute the value of pi, v, logp_pi
# using the tensorflow graph we have created
get_action_ops = [pi, v, logp_pi]
# Experience buffer
# number of steps per epoch per process
local_steps_per_epoch = int(steps_per_epoch / num_procs())
buf = PPOBuffer(obs_dim, act_dim, local_steps_per_epoch, gamma, lam)
# Count the number of parameters we are gonna be training,
# both for the policy and for the value function
var_counts = tuple(core.count_vars(scope) for scope in ['pi', 'v'])
logger.log('\nNumber of parameters: \t pi: %d, \t v: %d\n'%var_counts)
# PPO objectives
# ratio is the ratio of two probabilities:
# pi(a|s) / pi_old(a|s)
# where pi(a|s) is the probability of performing action a
# given state s GIVEN THE POLICY WHOSE PARAMETERS WE ARE CHANGING
# DURING THE OPTIMIZATION STEP
# and pi_old(a|s) is the probability of the policy,
# with fixed mlp parameters after the last update,
# performing a given state s
# we essentially use math to find the gradient of pi(a|s) with respect
# to the parameters of the mlp, and this is the core of how we calculate
# the gradient of the objective function for gradient descent
ratio = tf.exp(logp - logp_old_ph) # "pi(a|s) / pi_old(a|s)"-OpenAI
# this min_adv, along with the tf.minimum call in the next line of code,
# implement the PPO-clip functionality
# NOTE: calling this `min_adv` is a bit confusing; if advantage is negative
# this is the min value we allow the gradient descent to consider as the advantage;
# but it is the MAX value if advantage is positive.
min_adv = tf.where(adv_ph > 0, (1 + clip_ratio) * adv_ph, (1 - clip_ratio) * adv_ph)
# create the functions whose gradients we wish to use for gradient descent
# during optimization
# for our policy optimization, it is the PPO objective;
# for the value function it is simply an error-squared
# note that reduce_mean just calculates the mean of the values in the tensor;
# ie. this gives the expected value of the loss given the experimental values we have
pi_loss = -tf.reduce_mean(tf.minimum(ratio * adv_ph, min_adv))
v_loss = tf.reduce_mean((ret_ph - v) ** 2)
# Info (useful to watch during learning)
approx_kl = tf.reduce_mean(logp_old_ph - logp) # "a sample estimate for KL-divergence, easy to compute" -OpenAI
approx_ent = tf.reduce_mean(-logp) # "a sample estimate for entropy, also easy to compute" -OpenAI
clipped = tf.logical_or(ratio > (1 + clip_ratio), ratio < (1 - clip_ratio))
clipfrac = tf.reduce_mean(tf.cast(clipped, tf.float32)) # what fraction of advantages are clipped
# Optimizers
# These use gradient descent with the gradient of the objective
# functions we defined above to improve parameters for pi and v
train_pi = MpiAdamOptimizer(learning_rate=pi_lr).minimize(pi_loss)
train_v = MpiAdamOptimizer(learning_rate=vf_lr).minimize(v_loss)
# initialize the tensorflow computation graph's parameters
# with values
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# "Sync params across processes" -OpenAI
sess.run(sync_all_params())
# Setup model saving
logger.setup_tf_saver(sess, inputs={'x': x_ph}, outputs={'pi': pi, 'v': v})
def update():
# create a dictionary of values, which specify to tensorflow what
# to input for the placeholders: tensors containing the data from
# the trajectory we have stored in buf
inputs = {k:v for k, v in zip(all_phs, buf.get())}
# calculate these for logging later
pi_l_old, v_l_old, ent = sess.run([pi_loss, v_loss, approx_ent], feed_dict=inputs)
# Training
for i in range(train_pi_iters):
# run a training step for the policy, and estimate the kl-divergence
# (ie. how much the policy changed) on this step
_, kl = sess.run([train_pi, approx_kl], feed_dict=inputs)
kl = mpi_avg(kl)
# if the kl divergence is too high, stop training on this step
# TODO: understand better why it is important to do this
if kl > 1.5 * target_kl:
logger.log('Early stopping at step %d due to reaching max kl.'%i)
break
logger.store(StopIter=i)
# train our value function mlp
for _ in range(train_v_iters):
sess.run(train_v, feed_dict=inputs)
# "Log changes from update" -OpenAI
# TODO: This could be made a bit more computationally efficient by not recalculating pi_l_old each loop
# after having calculated the same thing as pi_l_new the previous run through the loop!
# Plus, does it really make the most sense to output pi_l_old and v_l_old as LossPi and LossV
# instead of pi_l_new and v_l_new?
pi_l_new, v_l_new, kl, cf = sess.run([pi_loss, v_loss, approx_kl, clipfrac], feed_dict=inputs)
logger.store(LossPi=pi_l_old, LossV=v_l_old,
KL=kl, Entropy=ent, ClipFrac=cf,
DeltaLossPi=(pi_l_new - pi_l_old),
DeltaLossV=(v_l_new - v_l_old))
start_time = time.time()
# initialize the variables we use while training
# o = observation (env.reset() returns initial observation)
# r = reward = (starts as 0)
# d = done? (whether current episode in env is over)
# ep_ret = episode return
# ep_len = length of episode so far
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
# "Main loop: collect experience in env and update/log each epoch"
for epoch in range(epochs):
for t in range(local_steps_per_epoch):
# run the computation of the action, value function, and probability of the action
# using the most recent observation in the x_ph slot
a, v_t, logp_t = sess.run(get_action_ops, feed_dict={x_ph: o.reshape(1,-1)})
# save and log
buf.store(o, a, r, v_t, logp_t)
logger.store(VVals=v_t)
# take the action we computed and advance the environment
o, r, d, _ = env.step(a[0])
ep_ret += r
ep_len += 1
terminal = d or (ep_len == max_ep_len)
if terminal or (t==local_steps_per_epoch - 1):
if not terminal:
print('Warning: trajectory cut off by epoch at %d steps'%ep_len)
# "if trajectory didn't reach terminal state, bootstrap value target" -OpenAI
# in other words, if the we are stopping this trajectory due to a termination
# signal from the env, last_val = the reward from the last step, r
# otherwise we stopped because we reached the max episode length or max local_steps_per_epoch,
# in which ase we set last_val = estimate of the value of current state based on v function
# we are training
last_val = r if d else sess.run(v, feed_dict={x_ph: o.reshape(1, -1)})
buf.finish_path(last_val)
# "only store EpRet / EpLen if trajectory finished" -OpenAI
if terminal:
logger.store(EpRet=ep_ret, EpLen=ep_len)
# reset our training variables and the training environment
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
# every save_freq epochs,
# save the state of the environment
# also save the current state of our value function model
# and policy
# these are automatically saved by the save_state function
# since we have already called logger.setup_tf_saver
if (epoch % save_freq == 0) or (epoch == epochs - 1):
logger.save_state({'env': env}, None)
# "Perform PPO update!"
update()
# "Log info about epoch"
logger.log_tabular('Epoch', epoch)
try:
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
except:
pass
logger.log_tabular('VVals', with_min_and_max=True)
logger.log_tabular('TotalEnvInteracts', (epoch + 1) * steps_per_epoch)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossV', average_only=True)
logger.log_tabular('DeltaLossPi', average_only=True)
logger.log_tabular('DeltaLossV', average_only=True)
logger.log_tabular('Entropy', average_only=True)
logger.log_tabular('KL', average_only=True)
logger.log_tabular('ClipFrac', average_only=True)
logger.log_tabular('StopIter', average_only=True)
logger.log_tabular('Time', time.time() - start_time)
logger.dump_tabular() | 5,357,868 |
def test_double_smirks():
"""Test filtering based on 2 different smirks patterns."""
molecules = []
for i in [0, 1, 2, 3, 5]:
molecules.append(
Molecule.from_file(get_data_file_path(f'1-validate_and_assign_graphs_and_confs/BBB-0000{i}-00.sdf'), "sdf"))
# filter P should only be one molecule, and F should also be one molecule
result = smirks_filter(input_molecules=molecules, filtered_smirks=["[P:1]", "[F:1]"], processors=1)
assert result.n_filtered == 2
assert result.n_molecules == 3 | 5,357,869 |
def scrub(old_fs: Vfs, new_fs: Vfs) -> Vfs:
"""Try to eliminate files which were previously installed but are no longer used."""
old_fs = old_fs.copy()
new_fs = new_fs.copy()
# Look for files in the old log which are no longer present in the new log
for txn in old_fs._log:
if txn[0] == "link" and txn not in new_fs._log:
new_fs.unlink(txn[2])
elif txn[0] == "mkdir" and txn not in new_fs._log:
new_fs.unlink(txn[1])
return new_fs | 5,357,870 |
def test_renamed_objects(pipeline, clean_db):
"""
Verify that we can dump and restore renamed CQs and streams
"""
pipeline.create_stream('s', x='int')
q = """
SELECT x, count(*) FROM s GROUP BY x;
"""
pipeline.create_cv('cv_0', q)
q = """
SELECT (new).x, combine((delta).count) AS count FROM output_of('cv_0') GROUP BY x
"""
pipeline.create_cv('combine_cv_0', q)
q = """
SELECT (new).count + 41 AS v FROM output_of('combine_cv_0')
"""
pipeline.create_ct('transform_combine_cv_0', q)
q = """
SELECT max(v), count(*) FROM output_of('transform_combine_cv_0')
"""
pipeline.create_cv('max_transform_combine_cv_0', q)
rows = [(x,) for x in range(1000)]
pipeline.insert('s', ('x',), rows)
result = pipeline.execute('SELECT combine(count) FROM cv_0')[0]
assert result['combine'] == 1000
pipeline.execute('ALTER VIEW cv_0 RENAME TO cv_0_renamed')
pipeline.execute('ALTER VIEW combine_cv_0 RENAME TO combine_cv_0_renamed')
pipeline.execute('ALTER VIEW transform_combine_cv_0 RENAME TO transform_combine_cv_0_renamed')
pipeline.execute('ALTER VIEW max_transform_combine_cv_0 RENAME TO max_transform_combine_cv_0_renamed')
pipeline.execute('ALTER FOREIGN TABLE s RENAME TO s_renamed')
result = pipeline.execute('SELECT combine(count) FROM cv_0_renamed')[0]
assert result['combine'] == 1000
result = pipeline.execute('SELECT combine(count) FROM combine_cv_0_renamed')[0]
assert result['combine'] == 1000
result = pipeline.execute('SELECT max, count FROM max_transform_combine_cv_0_renamed')[0]
assert result['max'] == 42
assert result['count'] == 1000
_dump(pipeline, 'test_renamed_cqs.sql')
pipeline.execute('DROP VIEW combine_cv_0_renamed CASCADE')
pipeline.drop_all()
_restore(pipeline, 'test_renamed_cqs.sql')
result = pipeline.execute('SELECT combine(count) FROM cv_0_renamed')[0]
assert result['combine'] == 1000
result = pipeline.execute('SELECT combine(count) FROM combine_cv_0_renamed')[0]
assert result['combine'] == 1000
result = pipeline.execute('SELECT max, count FROM max_transform_combine_cv_0_renamed')[0]
assert result['max'] == 42
assert result['count'] == 1000
# Now write some more rows to verify everything updates properly
rows = [(x,) for x in range(1000)]
pipeline.insert('s_renamed', ('x',), rows)
result = pipeline.execute('SELECT combine(count) FROM cv_0_renamed')[0]
assert result['combine'] == 2000
result = pipeline.execute('SELECT combine(count) FROM combine_cv_0_renamed')[0]
assert result['combine'] == 2000
result = pipeline.execute('SELECT max, count FROM max_transform_combine_cv_0_renamed')[0]
assert result['max'] == 43
assert result['count'] == 2000
pipeline.execute('DROP VIEW combine_cv_0_renamed CASCADE') | 5,357,871 |
def get_filename(row):
"""
Assembles the name of the feature file.
Parameters
----------
row : pandas.Series
A row fom the sequence dataframe. Must have the following index values:
"sample_name", "inj_number", "batch_name", "acquisition_date_and_time".
Returns
-------
filename : str
The filename of the feature file.
"""
acquisition = row.acquisition_date_and_time
if pd.isna(acquisition):
acquisition = "1900-01-01_000000"
filename = (
"_".join(
[
str(row.sample_name),
str(row.inj_number),
str(row.batch_name),
acquisition,
]
)
+ ".featureXML"
)
return filename | 5,357,872 |
def create_plotly_trace(data_x, data_y, namexy, chosen_mode='lines', use_gl = True, swap_xy = False):
"""
Создание одного trace по данным
:param data_x: данные для оси x
:param data_y: данные для оси y
:param namexy: название для trace
:param chosen_mode: настройка отображения 'lines', 'markers'
:return: один trace
"""
if swap_xy:
data_x, data_y = data_y, data_x
hovertemplate = namexy + ": %{x}<extra></extra>"
else:
hovertemplate = namexy + ": %{y}<extra></extra>"
if use_gl == True:
one_trace = go.Scattergl(
x=data_x,
y=data_y,
name=namexy,
mode=chosen_mode,
hovertemplate=hovertemplate
)
else:
one_trace = go.Scatter(
x=data_x,
y=data_y,
name=namexy,
mode=chosen_mode,
hovertemplate=hovertemplate
)
return one_trace | 5,357,873 |
def read_metadata() -> dict:
"""Reads and returns raw metadata."""
with open(metadata_path().resolve(), "r") as fd:
return yaml.safe_load(fd) | 5,357,874 |
def plot_plaid_contrast_tuning(bf_indices, base_contrasts, mask_contrasts, base_orientations,
mask_orientations, test_responses):
"""
Plot responses to orthogonal plaid stimulus at different base and mask contrasts
Inputs:
bf_indices: [list or array] of neuron indices to use
all indices should be less than test_responsees.shape[0]
base_contrasts: [list or array] of base contrasts.
mask_contrasts: [list or array] of mask contrasts.
each plot will have one line per mask_contrast
base_orientations: [list or array] of optimal base orientations for all neurons
should be a 1-D array with size = test_responses.shape[0]
mask_orientations: [list or array] of mask orientation values
function will compute the plaid response for orthogonal orientations
test_responses: [list or array] of responses to the base+mask stimulus
should be shape [num_neurons, num_base_contrasts, num_mask_contrasts, num_orientations]
"""
bf_indices = np.asarray(bf_indices)
mask_orientations = np.asarray(mask_orientations)
mask_contrasts = np.asarray(mask_contrasts)
num_bfs = bf_indices.size
num_orientations = mask_orientations.size
num_contrasts = mask_contrasts.size
# index of value in mask_orientations that is closest to orthogonal to base_orientations[bf_idx]
orthogonal_orientations = [base_orientations[bf_indices[bf_idx]]-(np.pi/2)
for bf_idx in range(num_bfs)]
orthogonal_orientations = np.asarray([val + np.pi if val < 0 else val
for val in orthogonal_orientations])
mask_or_idx = [np.argmin(orthogonal_orientations[bf_idx] - mask_orientations)
for bf_idx in range(num_bfs)]
cmap = plt.get_cmap('Greys')
cNorm = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0)
scalarMap = matplotlib.cm.ScalarMappable(norm=cNorm, cmap=cmap)
num_plots_y = np.int32(np.ceil(np.sqrt(num_bfs)))+1
num_plots_x = np.int32(np.ceil(np.sqrt(num_bfs)))
gs_widths = [1.0,]*num_plots_x
gs_heights = [1.0,]*num_plots_y
gs = gridspec.GridSpec(num_plots_y, num_plots_x, wspace=0.5, hspace=0.7,
width_ratios=gs_widths, height_ratios=gs_heights)
fig = plt.figure(figsize=(32,32)) #TODO: Adjust fig size according to num plots
bf_idx = 0
for plot_id in np.ndindex((num_plots_y, num_plots_x)):
(y_id, x_id) = plot_id
if y_id == 0 and x_id == 0:
ax = fig.add_subplot(gs[plot_id])
#ax.set_ylabel("Normalized Activation", fontsize=16)
#ax.set_xlabel("Base Contrast", fontsize=16)
#ax.set_ylim([0.0, 1.0])
ax00 = ax
else:
ax = fig.add_subplot(gs[plot_id], sharey=ax00)
if bf_idx < num_bfs:
for co_idx, mask_contrast in enumerate(mask_contrasts):
# vary base contrast for fixed mask contrast & orthogonal mask
activity = test_responses[bf_indices[bf_idx], :, co_idx, mask_or_idx[bf_idx]]
color_val = scalarMap.to_rgba(mask_contrast)
ax.plot(base_contrasts, activity, linestyle="-", color=color_val)
ax.scatter(base_contrasts, activity, s=4, c=color_val, label=str(mask_contrast))
ax.set_xticks([base_contrasts[0], base_contrasts[-1]])
bf_idx += 1
else:
ax = clear_axis(ax, spines="none")
plt.show()
return fig | 5,357,875 |
def new_token():
"""
Generate an access token for the user.
This endpoint requires basic auth with nickname and password.
"""
return jsonify({'token': generate_token(g.current_user['id'])}) | 5,357,876 |
def get_room_info(room_real_id: int, verify: utils.Verify = None, cookies = None):
"""
获取直播间信息(标题,简介等)
:param room_real_id: 真实房间ID
:param verify:
:return:
"""
if verify is None:
verify = utils.Verify()
api = API["live"]["info"]["room_info"]
if cookies is None:
resp = utils.get(api["url"], {"room_id": room_real_id}, cookies=verify.get_cookies())
else:
resp = utils.get(api["url"], {"room_id": room_real_id}, cookies=cookies)
return resp | 5,357,877 |
def sweep_deposit_eth_accounts_balances():
"""做以太充值账户的归账操作"""
# 计算合约的一次转账操作需要的gas(可以估计一个固定值)
token_contract_addr = app.config['BLOCKLINK_ERC20_CONTRACT_ADDRESS']
gas_limit = 100000 # TODO: 不同token合约可能需要不同的gas_limit
gas_price = 1 * (10**9)
encrypt_password = app.config['ETH_ENCRYPT_PASSWORD'].encode('utf8')
min_sweep_blocklink_token_amount = app.config['MIN_SWEEP_BLOCKLINK_TOKEN_AMOUNT']
sweep_to_eth_address = app.config['SWEEP_TO_ETH_ADDRESS']
sweep_gas_spender_eth_address = app.config['SWEEP_GAS_SPENDER_ETH_ADDRESS']
sweep_gas_spender_eth_private_key = app.config['SWEEP_GAS_SPENDER_ETH_PRIVATE_KEY']
# TODO: 充值账户中的ETH的归账(有可能是sweep_gas_spender转给这个地址的,所以不直接还给用户)
try:
eth_accounts = db.session.query(EthAccount).all()
token_balances_of_accounts = eth_helpers.query_eth_addresses_balances_of_token(
[account.address for account in eth_accounts], token_contract_addr)
eth_balances_of_accounts = eth_helpers.query_eth_addresses_balances_of_eth(
[account.address for account in eth_accounts])
print(token_balances_of_accounts, eth_balances_of_accounts)
nonce_of_sweep_gas_spender_address = eth_helpers.get_eth_address_nonce(sweep_gas_spender_eth_address)
for eth_account in eth_accounts:
eth_privatekey = eth_helpers.try_decrypt_eth_privatekey(eth_account.encrypted_private_key, encrypt_password)
# 检查以太充值账户的私钥和地址是否匹配,如果不匹配,跳过这个以太地址
if eth_privatekey is None:
logger.info(
"found eth address %s private key error when sweeping deposit eth accounts" % str(eth_account.address))
continue
recently_sweep_history = db.session.query(EthTokenSweepTransaction) \
.filter_by(from_address=eth_account.address) \
.filter(
EthTokenSweepTransaction.created_at > (datetime.datetime.utcnow() - datetime.timedelta(hours=3))) \
.order_by(EthTokenSweepTransaction.created_at.desc()).first()
if recently_sweep_history is not None:
# 如果此地址有3小时内的归账操作,跳过
continue
token_balance = token_balances_of_accounts.get(eth_account.address,
eth_helpers.EthAccountBalance(eth_account.address, 0,
token_contract_addr))
if token_balance.balance < min_sweep_blocklink_token_amount:
# token余额太少的不归账
print(token_balance.balance, token_balance.simple_balance, min_sweep_blocklink_token_amount)
logger.info(
"eth account has too little blocklink ERC20 token to sweep(%s)" % str(token_balance.simple_balance))
continue
eth_balance = eth_balances_of_accounts.get(eth_account.address,
eth_helpers.EthAccountBalance(eth_account.address, 0))
if int(eth_balance.balance) <= (gas_price * gas_limit):
# 以太充值账户的ETH余额不够做token转账的gas,从其他账户转一点以太过去
to_send_eth_amount = gas_limit * gas_price
transfer_eth_for_gas_tx_dict = {
# 'from': sweep_gas_spender_eth_address,
'to': eth_account.address,
'value': to_send_eth_amount,
'gas': 25200, # ETH转账需要的gas
'gasPrice': gas_price,
'nonce': nonce_of_sweep_gas_spender_address,
}
nonce_of_sweep_gas_spender_address += 1
signed_raw_tx = eth_helpers.eth_signtransaction(transfer_eth_for_gas_tx_dict,
sweep_gas_spender_eth_private_key)
logger.info("signed raw tx for send eth is: %s" % str(signed_raw_tx))
tx_id = eth_helpers.send_eth_rawtransaction_to_ether(signed_raw_tx)
logger.info(
"response of transfer gas eth from sweep address to %s is %s" % (eth_account.address, str(tx_id)))
# 等待下一个任务周期,这个以太充值地址有ETH后再继续归账
continue
# 发起从以太充值账户转账token到归账地址的交易并广播
account_nonce = eth_helpers.get_eth_address_nonce(eth_account.address)
transfer_token_tx_dict = eth_helpers.make_eth_call_params(eth_account.address, token_contract_addr,
gas_limit, gas_price, 0,
eth_helpers.get_eth_contract_token_transfer_signature(),
[sweep_to_eth_address, int(token_balance.balance)],
account_nonce)
signed_raw_tx = eth_helpers.eth_signtransaction(transfer_token_tx_dict, eth_privatekey)
logger.info("signed raw tx for send ERC20 token %s from %s to %s: %s" % (str(token_balance.simple_balance), eth_account.address, sweep_to_eth_address, str(signed_raw_tx)))
tx_id = eth_helpers.send_eth_rawtransaction_to_ether(signed_raw_tx)
logger.info(
"response of transfer token from %s to sweep eth address is %s" % (eth_account.address, str(tx_id)))
# 把归账交易记录到数据库
sweep_tx = EthTokenSweepTransaction(tx_id, eth_account.address, sweep_to_eth_address, token_contract_addr,
token_balance.simple_balance)
db.session.add(sweep_tx)
db.session.commit()
logger.info("processed one token sweep(amount %s) transaction of %s to %s" % (
str(token_balance.simple_balance), eth_account.address, sweep_to_eth_address))
except Exception as e:
logger.error("sweep deposit eth accounts balances error: %s" % str(e))
db.session.rollback() | 5,357,878 |
def parse_raw(setup, id=None, first_line_is_header=(-1,0,1)):
"""Used in conjunction with lazy_import and parse_setup in order to make alterations
before parsing.
Parameters
----------
setup : dict
Result of h2o.parse_setup
id : str, optional
An id for the frame.
first_line_is_header : int, optional
-1,0,1 if the first line is to be used as the header
Returns
-------
H2OFrame
"""
if id: setup["destination_frame"] = _quoted(id).replace("%",".").replace("&",".")
if first_line_is_header != (-1,0,1):
if first_line_is_header not in (-1, 0, 1): raise ValueError("first_line_is_header should be -1, 0, or 1")
setup["check_header"] = first_line_is_header
fr = H2OFrame()
fr._parse_raw(setup)
return fr | 5,357,879 |
def import_year(year: int = None) -> bool:
"""Downloads, extracts and imports the Losungen of a given year.
The year defaults to the next year."""
session: Session = SessionMaker()
repo = TagesLosungRepository(session)
year = datetime.date.today().year + 1 if year is None else year
losungen = repo.get_by_year(year)
session.close()
if losungen:
return True # Already imported
if download_zip(year):
extract_zip()
import_xml()
logger.info("Successfully imported Losungen for %i", year)
return True
logger.warning("Failed to download zip archive for %i", year)
return False | 5,357,880 |
def isURL(url: str) -> bool:
""" Check whether a given string is a URL. """
return url is not None and re.match(urlregex, url) is not None | 5,357,881 |
def rmse(Y_true, Y_hat):
"""
returns root mean squared error
Args:
Y_true : true outputs [N,(1)]
Y_hat : predicted outputs [N, (1)]
"""
if Y_true.ndim == 2:
Y_true = Y_true[:, 0]
if Y_hat.ndim == 2:
Y_hat = Y_hat[:, 0]
return np.sqrt(np.mean((Y_true - Y_hat)**2)) | 5,357,882 |
def getAllDescWords(itemList):
"""Returns a list of "description words" for each item named in itemList."""
itemList = list(set(itemList)) # make itemList unique
descWords = []
for item in itemList:
descWords.extend(NYCitems[item][DESCWORDS])
return list(set(descWords)) | 5,357,883 |
def plot_probe_trajectory_histology(
x, y, subject_ID, axc, axs,
provenance = 'Planned',
project = 'ibl_neuropixel_brainwide_01',
gr_percentile_min=0.2, rd_percentile_min=1, rd_percentile_max=99.99,
font_size = 8, label_size = 8 ):
"""Plot slices of Histology data along the insertion at [x,y] for subject ID.
Slices made in coronal and sagittal planes.
The slices through the Histology data can be made along any of the
provenances of the probe at [x,y] for subject ID - Planned,
Micro-manipulator, Histology track, Ephys aligned histology track.
axc : AxesSubplot, None
MUST pass an AxesSubplot object for plotting to! For coronal plot.
axs : AxesSubplot, None
MUST pass an AxesSubplot object for plotting to! For sagittal plot.
"""
from one.api import ONE
import ibllib.atlas as atlas
from ibllib.atlas import Insertion
import atlaselectrophysiology.load_histology as hist
import numpy as np
from scipy import ndimage
import sys
import matplotlib.pyplot as plt
# connect to ONE
one = ONE()
# get list of all trajectories at [x,y], for project
trajs = one.alyx.rest('trajectories', 'list', x=x, y=y, project=project)
# keeping subjs and labs for look-up later if needed..
subjs = [sess['session']['subject'] for sess in trajs]
labs = [sess['session']['lab'] for sess in trajs]
#aidx = subjs.index(atlas_ID)
sidx = subjs.index(subject_ID)
# Fetch trajectory metadata for traj:
traj = one.alyx.rest('trajectories', 'list', session=trajs[sidx]['session']['id'],
probe=trajs[sidx]['probe_name'], provenance=provenance)
if traj == []:
raise Exception("No trajectory found with provenance: " + provenance)
# get insertion object from ANY (the first) trajectory
ins = Insertion.from_dict(traj[0])
axis_labels = np.array(['ml (µm)', 'dv (µm)', 'ap (µm)'])
#fig1, ax1 = plt.subplots() # new figure and axes objects - CORONAL
#fig2, ax2 = plt.subplots() # new figure and axes objects - SAGITTAL
# set axes to local variables
ax1 = axc
ax2 = axs
lab = labs[ sidx ] # this returns index in labs where subject_ID is in subjs
hist_paths = hist.download_histology_data(subject_ID, lab)
# create the brain atlases from the data
ba_gr = atlas.AllenAtlas(hist_path=hist_paths[0]) # green histology channel autofl.
ba_rd = atlas.AllenAtlas(hist_path=hist_paths[1]) # red histology channel cm-dii
# CORONAL
# implementing tilted slice here to modify its cmap
# get tilted slice of the green and red channel brain atlases
# using the .image data as this contains the signal
gr_tslice, width, height, depth = ba_gr.tilted_slice(ins.xyz, 1, volume = ba_gr.image)
rd_tslice, width, height, depth = ba_rd.tilted_slice(ins.xyz, 1, volume = ba_rd.image)
gr_tslice_roi = gr_tslice[120:240, 150:300] # isolate large slice over thalamus for max pixel value
rd_tslice_roi = rd_tslice[120:240, 150:300]
width = width * 1e6
height = height * 1e6
depth = depth * 1e6
cmap = plt.get_cmap('bone')
# get the transfer function from y-axis to squeezed axis for second axe
ab = np.linalg.solve(np.c_[height, height * 0 + 1], depth)
height * ab[0] + ab[1]
# linearly scale the values in 2d numpy arrays to between 0-255 (8bit)
# Using gr_tslice min and gr_tslice_roi max to scale autofl.
# using rd_tslice min and percentile (99.99 default) to scale CM-DiI
gr_in = np.interp(gr_tslice, (np.percentile(gr_tslice, gr_percentile_min), gr_tslice_roi.max()), (0, 255))
rd_in = np.interp(rd_tslice, (np.percentile(rd_tslice, rd_percentile_min), np.percentile(rd_tslice, rd_percentile_max)), (0, 255))
# join together red, green, blue numpy arrays to form a RGB image ALONG A NEW DIMENSION
# NOTE need a blue component, have added a set of zeros as blue channel should be BLANK
# NOTE2: converted to unit8 bit, as pyplot imshow() method only reads this format
Z = np.stack([ rd_in.astype(dtype=np.uint8),
gr_in.astype(dtype=np.uint8),
np.zeros(np.shape(gr_tslice)).astype(dtype=np.uint8) ])
# transpose the columns to the FIRST one is LAST
# i.e the NEW DIMENSION [3] is the LAST DIMENSION
Zt = np.transpose(Z, axes=[1,2,0])
# can now add the RGB array to imshow()
ax1.imshow(Zt, interpolation='none', aspect='auto', extent=np.r_[width, height], cmap=cmap, vmin=np.min(gr_in), vmax=np.max(gr_in) )
sec_ax = ax1.secondary_yaxis('right', functions=(
lambda x: x * ab[0] + ab[1],
lambda y: (y - ab[1]) / ab[0]))
ax1.set_xlabel(axis_labels[0], fontsize=font_size)
ax1.set_ylabel(axis_labels[1], fontsize=font_size)
sec_ax.set_ylabel(axis_labels[2], fontsize=font_size)
ax1.tick_params(axis='x', labelrotation = 90)
ax1.tick_params(axis='x', labelsize = label_size)
ax1.tick_params(axis='y', labelsize = label_size)
sec_ax.tick_params(axis='y', labelsize = label_size)
# SAGITTAL
# implementing tilted slice here to modify its cmap
# get tilted slice of the green and red channel brain atlases
# using the .image data as this contains the signal
gr_tslice, width, height, depth = ba_gr.tilted_slice(ins.xyz, 0, volume = ba_gr.image)
rd_tslice, width, height, depth = ba_rd.tilted_slice(ins.xyz, 0, volume = ba_rd.image)
width = width * 1e6
height = height * 1e6
depth = depth * 1e6
cmap = plt.get_cmap('bone')
# get the transfer function from y-axis to squeezed axis for second axe
ab = np.linalg.solve(np.c_[height, height * 0 + 1], depth)
height * ab[0] + ab[1]
# linearly scale the values in 2d numpy arrays to between 0-255 (8bit)
# Using gr_tslice min and max to scale the image
# weirdly rd_in has very large min and max (problem with the original data acquisition?) so best to scale whole RGB with gr_in/1.5!
gr_in = np.interp(gr_tslice, (gr_tslice.min(), gr_tslice.max()), (0, 255))
rd_in = np.interp(rd_tslice, (gr_tslice.min(), gr_tslice.max()/1.5), (0, 255))
# join together red, green, blue numpy arrays to form a RGB image ALONG A NEW DIMENSION
# NOTE need a blue component, have added a set of zeros as blue channel should be BLANK
# NOTE2: converted to unit8 bit, as pyplot imshow() method only reads this format
Z = np.stack([ rd_in.astype(dtype=np.uint8),
gr_in.astype(dtype=np.uint8),
np.zeros(np.shape(gr_tslice)).astype(dtype=np.uint8) ])
# transpose the columns to the FIRST one is LAST
# i.e the NEW DIMENSION [3] is the LAST DIMENSION
Zt = np.transpose(Z, axes=[1,2,0])
# can now add the RGB array to ax2 via imshow()
ax2.imshow(Zt, interpolation='none', aspect='auto', extent=np.r_[width, height], cmap=cmap, vmin=np.min(gr_in), vmax=np.max(gr_in) )
#start = ins.xyz[:, 1] * 1e6
#end = ins.xyz[:, 2] * 1e6
#xCoords = np.array([start[0], end[0]])
sec_ax = ax2.secondary_yaxis('right', functions=(
lambda x: x * ab[0] + ab[1],
lambda y: (y - ab[1]) / ab[0]))
ax2.set_xlabel(axis_labels[2], fontsize=font_size)
ax2.set_ylabel(axis_labels[1], fontsize=font_size)
sec_ax.set_ylabel(axis_labels[0], fontsize=font_size)
ax2.tick_params(axis='x', labelrotation = 90)
ax2.tick_params(axis='x', labelsize = label_size)
ax2.tick_params(axis='y', labelsize = label_size)
sec_ax.tick_params(axis='y', labelsize = label_size)
plt.tight_layout() # tighten layout around xlabel & ylabel
# add a line of the Insertion object onto ax1 (cax - coronal)
# plotting PLANNED insertion
#ax1.plot(ins.xyz[:, 0] * 1e6, ins.xyz[:, 2] * 1e6, colour, linewidth=linewidth)
#ax2.plot(ins.xyz[:, 1] * 1e6, ins.xyz[:, 2] * 1e6, colour, linewidth=linewidth)
return {'coronal-slice': ax1, 'sagittal-slice': ax2, 'x': x, 'y': y,
'provenance': provenance, 'subject_id': subject_ID } | 5,357,884 |
def create_constrained_mechanical_system_from_component(structural_component, constant_mass=False,
constant_damping=False, constraint_formulation='boolean',
**formulation_options):
"""
Create a mechanical system from a component where the constraints are applied by a constraint formulation
Parameters
----------
structural_component : amfe.component.StructuralComponent
Structural component describing the mechanical system
constant_mass : bool
Flag indicating if mass matrix is constant
constant_damping : bool
Flag indicating if damping matrix is constant
constraint_formulation : str {'boolean', 'lagrange', 'nullspace_elimination'}
String describing the constraint formulation that shall be used
formulation_options : dict
options passed to the set_options method of the constraint formulation
Returns
-------
system : amfe.solver.translators.MechanicalSystem
formulation : amfe.constraint.ConstraintFormulation
"""
system_unconstrained = create_mechanical_system_from_structural_component(structural_component)
constraint_formulation = _create_constraint_formulation(system_unconstrained, structural_component,
constraint_formulation, **formulation_options)
if constant_mass:
M = MemoizeConstant(constraint_formulation.M)
else:
M = constraint_formulation.M
if constant_damping:
D = MemoizeConstant(constraint_formulation.D)
else:
D = constraint_formulation.D
f_int = constraint_formulation.f_int
K = constraint_formulation.K
f_ext = constraint_formulation.f_ext
dimension = constraint_formulation.dimension
system = MechanicalSystem(dimension, M, D, K, f_ext, f_int)
return system, constraint_formulation | 5,357,885 |
def parse_certificate_issuer_id(id):
"""
:param id: The resource collection type.
:type id: str
:rtype: KeyVaultId
"""
return parse_object_id('certificates/issuers', id) | 5,357,886 |
def create_schema(
url: str,
schema: str,
dbname: str = None
):
"""
Create a schema in the database.
:param url: the database URL
:param schema: the name of the schema
:param dbname: the name of the database
"""
# Figure out what database we're looking for.
_dbname = dbname if dbname else parse_dbname(url)
# Construct the query.
query = SQL(_PHRASEBOOK.gets('create_schema')).format(
schema=SQL(schema)
)
# Create the schema.
with connect(url=url, dbname=dbname) as cnx:
execute(cnx=cnx, query=query) | 5,357,887 |
def normalize_whitespace(
text, no_line_breaks=False, strip_lines=True, keep_two_line_breaks=False
):
"""
Given ``text`` str, replace one or more spacings with a single space, and one
or more line breaks with a single newline. Also strip leading/trailing whitespace.
"""
if strip_lines:
text = "\n".join([x.strip() for x in text.splitlines()])
if no_line_breaks:
text = constants.MULTI_WHITESPACE_TO_ONE_REGEX.sub(" ", text)
else:
if keep_two_line_breaks:
text = constants.NONBREAKING_SPACE_REGEX.sub(
" ", constants.TWO_LINEBREAK_REGEX.sub(r"\n\n", text)
)
else:
text = constants.NONBREAKING_SPACE_REGEX.sub(
" ", constants.LINEBREAK_REGEX.sub(r"\n", text)
)
return text.strip() | 5,357,888 |
def nancumprod(x1, **kwargs):
"""
Return the cumulative product of array elements over a given axis treating Not a Numbers (NaNs) as one.
For full documentation refer to :obj:`numpy.nancumprod`.
Limitations
-----------
Parameter ``x`` is supported as :obj:`dpnp.ndarray`.
Keyword arguments ``kwargs`` are currently unsupported.
Otherwise the functions will be executed sequentially on CPU.
Input array data types are limited by supported DPNP :ref:`Data types`.
.. seealso:: :obj:`dpnp.cumprod` : Return the cumulative product of elements along a given axis.
Examples
--------
>>> import dpnp as np
>>> a = np.array([1., np.nan])
>>> result = np.nancumprod(a)
>>> [x for x in result]
[1.0, 1.0]
>>> b = np.array([[1., 2., np.nan], [4., np.nan, 6.]])
>>> result = np.nancumprod(b)
>>> [x for x in result]
[1.0, 2.0, 2.0, 8.0, 8.0, 48.0]
"""
if not use_origin_backend(x1) and not kwargs:
if not isinstance(x1, dparray):
pass
else:
return dpnp_nancumprod(x1)
return call_origin(numpy.nancumprod, x1, **kwargs) | 5,357,889 |
def _attr_manually_specified_tests_get_errors(
yaml_path: str,
yaml_entry: Dict[str, Any],
tag: str,
attr: str,
grep_tags: List[str]
) -> List[str]:
"""Report incorrect manually-specified test attributes
This function ensures that manually-specified
tests refer to files that actually exist.
Arguments:
yaml_path: A path to a .drift-data.yml file
yaml_entry: The YAML entry to validate
tag: The region tag corresponding to the specified YAML entry
attr: The attribute of the YAML entry to validate
grep_tags: A list of tags existing (not necessarily parsed out of)
the source code
Returns:
An error message if the manually-specified tests are invalid; None
otherwise
"""
errors = []
yaml_dirname = os.path.dirname(yaml_path)
for test_path in yaml_entry.keys():
if test_path in constants.RESERVED_YAML_KEYS:
continue # Skip non-filepaths
if not os.path.isabs(test_path):
test_path = os.path.join(yaml_dirname, test_path)
if not os.path.exists(test_path):
errors.append(
cli_yaml_errors.MissingTestFileViolation(
test_path, yaml_path))
return errors | 5,357,890 |
def error(data, mn, mx, confidence):
"""
Compute the error components.
:param data: the collected data.
:param mn: the critical value (minimum).
:param mx: the critical value (maximum).
:param confidence: the confidence level.
:return: (Dict) the dictionary of errors.
"""
return errutils.error_two_tails(data, mn, mx, confidence) | 5,357,891 |
def retry_connection(f):
"""Decorator. Recconect on failure.
"""
def retry(*args, **kwargs):
seconds_to_retry = 5
success = False
while (not success):
try:
result = f(*args, **kwargs)
success = True
return result
except:
print "{0}: {1} --> connection problems . retry in {2} seconds.".format(curr_date(), f.__name__, seconds_to_retry)
time.sleep(seconds_to_retry)
# return None
return retry | 5,357,892 |
def shift_mean(x_mod, x_org):
"""
Shift the mean value of `x_mod` such that it equals the mean of `x_org`.
Parameters
----------
x_org : ndarray
The array which hold the "true" mean value.
x_mod : ndarray
The modified copy of `x_org` which must have its mean value shifted.
Returns
-------
shifted_x_mod : ndarray
A copy of `x_mod` with the same mean value as `x_org`.
Examples
--------
For example,
>>> import numpy as np
>>> from magni.imaging.visualisation import shift_mean
>>> x_org = np.arange(4).reshape(2, 2)
>>> x_mod = np.ones((2, 2))
>>> print('{:.1f}'.format(x_org.mean()))
1.5
>>> print('{:.1f}'.format(x_mod.mean()))
1.0
>>> shifted_x_mod = shift_mean(x_mod, x_org)
>>> print('{:.1f}'.format(shifted_x_mod.mean()))
1.5
>>> np.set_printoptions(suppress=True)
>>> shifted_x_mod
array([[ 1.5, 1.5],
[ 1.5, 1.5]])
"""
@_decorate_validation
def validate_input():
_numeric('x_mod', ('integer', 'floating', 'complex'), shape=(-1, -1))
_numeric('x_org', ('integer', 'floating', 'complex'),
shape=x_mod.shape)
validate_input()
return x_mod + (x_org.mean() - x_mod.mean()) | 5,357,893 |
def conversation_detail(request, pk):
"""
Retrieve, update or delete a conversation.
"""
try:
conversation = Conversation.objects.get(pk=pk)
except Conversation.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = Conv_Serializer(conversation)
return Response("serializer.data")
elif request.method == 'PUT':
serializer = Conv_Serializer(conversation, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
conversation.delete()
return Response(status=status.HTTP_204_NO_CONTENT) | 5,357,894 |
def PromptToEnableApi(project, service_token, exception,
is_batch_request=False):
"""Prompts to enable the API and throws if the answer is no.
Args:
project (str): The project that the API is not enabled on.
service_token (str): The service token of the API to prompt for.
exception (api_Exceptions.HttpException): Exception to throw if the prompt
is denied.
is_batch_request: If the request is a batch request. This determines how to
get apitools to retry the request.
Raises:
api_exceptions.HttpException: API not enabled error if the user chooses to
not enable the API.
"""
if console_io.PromptContinue(
default=False,
prompt_string=('API [{}] not enabled on project [{}]. '
'Would you like to enable and retry? ')
.format(service_token, project)):
enable_api.EnableServiceIfDisabled(project, service_token)
# In the case of a batch request, as long as the error's retryable code
# (in this case 403) was set, after this runs it should retry. This
# error code should be consistent with apis.GetApiEnablementInfo
if not is_batch_request:
raise apitools_exceptions.RequestError('Retry')
else:
raise exception | 5,357,895 |
def purchase_products(product_id):
"""Purchase a product"""
app.logger.info("Request to purchase product with id %s", product_id)
check_content_type("application/json")
product = Product.find(product_id)
if not product:
abort(
status.HTTP_404_NOT_FOUND, "product with id '{}' was not found.".format(product_id)
)
return make_response(jsonify(product.serialize()), status.HTTP_200_OK) | 5,357,896 |
def process_alerts(data):
"""
Returns a Pandas DataFrame from the API call.
:return: A pandas DataFrame.
"""
data_dicts = data.get("data", [])
lines = []
for data_dict in data_dicts:
data_dict["alertDescription"] = helper.extract_json_field(
data_dict.get("alertProps", {}), "description.descriptionId")
description_dict = helper.extract_json_field(
data_dict.get("alertProps", {}), "description.descriptionObj")
data_dict.update(description_dict)
alert_context = helper.extract_json_field(
data_dict.get("keys", {}), "src.keys.alert")
if alert_context:
data_dict.update(alert_context)
lines.append(data_dict)
return pd.DataFrame(lines) | 5,357,897 |
def validate(df):
"""Validate the timeseries dataframe
"""
err_msgs = []
warn_msgs = []
# check column names
for col in EXP_COLS:
if col not in df:
err_msgs.append(f"**{col}** column missing")
msgs = {
"errors": err_msgs,
"warnings": warn_msgs
}
is_valid_file = len(err_msgs) == 0
return msgs, is_valid_file | 5,357,898 |
def _gcs_uri_rewriter(raw_uri):
"""Rewrite GCS file paths as required by the rewrite_uris method.
The GCS rewriter performs no operations on the raw_path and simply returns
it as the normalized URI. The docker path has the gs:// prefix replaced
with gs/ so that it can be mounted inside a docker image.
Args:
raw_uri: (str) the raw GCS URI, prefix, or pattern.
Returns:
normalized: a cleaned version of the uri provided by command line.
docker_path: the uri rewritten in the format required for mounting inside
a docker worker.
"""
docker_path = raw_uri.replace('gs://', 'gs/', 1)
return raw_uri, docker_path | 5,357,899 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.