content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def test_dims(a):
""" update dimensions
"""
assert a.dims == ('d0','d1')
a.dims = ('newa','newb')
assert a.dims == ('newa','newb')
assert a.axes[0].name == 'newa'
assert a.axes[1].name == 'newb' | 5,358,500 |
async def statuslist(ctx, *, statuses: str):
"""Manually make a changing status with each entry being in the list."""
bot.x = 0
statuses = statuses.replace("\n", bot.split)
status_list = statuses.split(bot.split)
if len(status_list) <= 1:
return await bot.send_embed(ctx, f"You cannot have a list with only {len(status_list)} entry.", negative=True)
bot.statuses = status_list
bot.autostatus = True
await bot.send_embed(ctx, "Changed statuslist.") | 5,358,501 |
def edgeplot(LA, Kbar=Kbar, Lbar=Lbar, alpha=alpha, beta=beta):
"""Draw an edgeworth box
arguments:
LA -- labor allocated to ag, from which calculate QA(Ka(La),La)
"""
KA = edgeworth(LA, Kbar, Lbar, alpha, beta)
RTS = (alpha/(1-alpha))*(KA/LA)
QA = F(KA, LA, alpha)
QM = G(Kbar-KA, Lbar-LA, beta)
print("(LA,KA)=({:4.1f}, {:4.1f}) (QA, QM)=({:4.1f}, {:4.1f}) RTS={:4.1f}"
.format(LA,KA,QA,QM,RTS))
La = np.arange(1,Lbar)
fig, ax = plt.subplots(figsize=(7,6))
ax.set_xlim(0, Lbar)
ax.set_ylim(0, Kbar)
ax.plot(La, edgeworth(La, Kbar, Lbar, alpha, beta),'k-')
#ax.plot(La, La,'k--')
ax.plot(La, isoq(La, alpha, QA))
ax.plot(La, Kbar - isoq(Lbar-La, beta, QM),'g-')
ax.plot(LA, KA, 'ob')
ax.vlines(LA, 0, KA, linestyles="dashed")
ax.hlines(KA, 0, LA, linestyles="dashed")
ax.text(-6,-6,r'$O_A$',fontsize=16)
ax.text(Lbar,Kbar,r'$O_M$',fontsize=16)
ax.set_xlabel(r'$L_A - Labor$', fontsize=16)
ax.set_ylabel(r'$K_A - Capital$', fontsize=16)
#plt.show() | 5,358,502 |
def kebab(string):
"""kebab-case"""
return "-".join(string.split()) | 5,358,503 |
def getUserID(person):
"""
Gets Humhub User ID using name information
:param person: Name of the person to get the Humhub User ID for
:type person: str.
"""
# search for person string in humhub db
# switch case for only one name (propably lastname) or
# two separate strings (firstname + lastname)
firstname = ''
lastname = ''
if len(person.split()) == 1:
# only lastname
lastname = person
else:
firstname = person.split()[0]
lastname = person.split()[1]
global offlinemode
if offlinemode:
return 8
# search in humhub db
cnx = establishDBConnection(dbconfig)
cursor = cnx.cursor()
query = ''
if firstname == '':
query = ("""SELECT user_id FROM profile WHERE lastname = {}
""").format(lastname)
else:
query = ("""SELECT user_id FROM profile WHERE firstname = {}
AND lastname = {}
""").format(firstname, lastname)
cursor.execute(query)
for user_id in cursor:
userid = user_id
cnx.close()
return userid | 5,358,504 |
def analyze_dir(data_dir,px_size,category,ch_actin,sigma_actin,version):
""" Analyzes all linescan pairs in a directory full of linescans
Args:
data_dir (str): the directory containing the linescans
px_size (float): the pixel size for the linescans (for the whole directory)
category (str): the category for the experiment
ch_actin (int): the channel of the actin linescan (1 or 2)
version (str): version number (for output filenames)
"""
#makes necessary directories in data_dir for saving
save_dir = data_dir + '/ls_data'
uf.make_dir(save_dir)
#makes a list of parameters to extract from cortex data
data_to_write = [['basename','category',
'delta', 'h', 'i_c', 'density', 'X_c', 'solution',
'ch1.i_tot','ch1.H','ch1.x_peak','ch1.i_peak','ch1.i_in','ch1.i_out','ch1.fwhm',
'ch2.i_tot','ch2.H','ch2.x_peak','ch2.i_peak','ch2.i_in','ch2.i_out','ch2.fwhm'
]]
#gets and sorts list of average linescans
linescan_list = [x for x in os.listdir(data_dir) if 'average.dat' in x]
for _ in linescan_list:
print(_)
print(re.search('frame' + '_([0-9]+)_', _).group(1))
linescan_list = sort_ls_list(linescan_list)
#extracts linescan parameters and thickness/density
for i in range(int(len(linescan_list)/2)):
file_ch1 = data_dir + '/' + linescan_list[2*i]
file_ch2 = data_dir + '/' + linescan_list[2*i + 1]
basename = file_ch1.split('/')[-1][:-4]
cortex = analyze_cortex(file_ch1,file_ch2,px_size,ch_actin,sigma_actin)
# plots raw linescans
cortex.plot_lss()
pylab.savefig(save_dir + "/" + basename + ".png")
pylab.close()
# plots linescans with h fits
if cortex.h != None:
cortex.plot_fits()
pylab.savefig(save_dir + "/" + basename + "_fit.png")
pylab.close()
# gets extracted linescan data
data_temp = [basename,category]
for param in data_to_write[0][2:]:
data_temp.append(eval("cortex.%s"%param))
data_to_write.append(data_temp)
# print data_to_write
uf.save_data_array(data_to_write,save_dir + "/ls_data.dat") | 5,358,505 |
def test_frequency_encoder_strings():
"""Test the FrequencyEncoder on string data.
Ensure that the FrequencyEncoder can fit, transform, and reverse
transform on string data. Expect that the reverse transformed data
is the same as the input.
Input:
- 4 rows of string data
Output:
- The reverse transformed data
"""
# setup
data = pd.DataFrame(['a', 'b', 'a', 'c'], columns=['column_name'])
column = 'column_name'
transformer = FrequencyEncoder()
# run
transformer.fit(data, column)
reverse = transformer.reverse_transform(transformer.transform(data))
# assert
pd.testing.assert_frame_equal(data, reverse) | 5,358,506 |
def assert_equal(actual: Literal["lstsq"], desired: Literal["lstsq"]):
"""
usage.statsmodels: 1
"""
... | 5,358,507 |
def parse_atom(s, atom_index=-1, debug=False):
""" Parses an atom in a string s
:param s: The string to parse
:type s: str
:param atom_index: the atom_index counter for continous parsing. Default is -1.
:type atom_index: int
:return: a list of atoms, a list of bonds and an updated atom_index for the next atom
:rtype: list
"""
if len(s) == 0:
raise ValueError("parse_atom: argument 's' cannot have length 0.")
if debug:
print(" Smiles.parse_atom: '{}'".format(s))
Z = 0
if len(s) == 1:
try:
Z = LABEL2Z[s]
except KeyError:
raise IllegalAtomError("The atom '{}' is invalid.".format(s))
else:
# just return the atom
return [Atom(Z, idx=atom_index)], [], atom_index +1
idx_atom_end = -1 # atomic label from 0:idx_atom_end
# find indices for hydrogens + counts
n_hydrogens = 0
idx_hydrogen = s.find("H")
if idx_hydrogen > 0: # ignore atomic hydrogen (or proton)
idx_atom_end = idx_hydrogen
n_hydrogens = 1
idx_hydrogen_count = idx_hydrogen + 1
try:
n_hydrogens = int(s[idx_hydrogen_count])
except IndexError: # ran past the end of string
pass
except ValueError: # hit something other than a number
pass
idx_cat = s.find("+")
idx_ani = s.find("-")
idx_charge = max(idx_cat, idx_ani)
charge = 0
if idx_cat > 0:
charge = 1
elif idx_ani > 0:
charge = -1
if idx_charge > 0:
if idx_hydrogen > 0:
idx_atom_end = min(idx_charge, idx_hydrogen)
else:
idx_atom_end = idx_charge
try:
charge = int(s[idx_charge+1])
except IndexError: # ran past the end of string
pass
except ValueError: # hit another + or -
charge = charge * sum(count_items_exclusive(s, ["+", "-"]))
if idx_atom_end == -1:
idx_atom_end = len(s)
if debug:
print(" n_hydrogens :", n_hydrogens)
print(" n_charge :", charge)
print(" base atom : s[0:{}] = {}".format(idx_atom_end, s[0:idx_atom_end]))
try:
Z = LABEL2Z[s[0:idx_atom_end]]
except KeyError:
raise IllegalAtomError("The atom '{}' is invalid.".format(s[0:idx_atom_end]))
atoms = [Atom(Z, idx=atom_index, fcharge=charge)]
bonds = []
for i in range(n_hydrogens):
atoms.append(Atom(1, idx=atom_index+1+i))
bonds.append(Bond(atom_index, atom_index+1+i))
return atoms, bonds, atom_index+1+n_hydrogens | 5,358,508 |
def b58decode(v, length):
""" decode v into a string of len bytes
"""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = ''
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result | 5,358,509 |
def get_submission_by_id(request, submission_id):
"""
Returns a list of test results assigned to the submission with the given id
"""
submission = get_object_or_404(Submission, pk=submission_id)
data = submission.tests.all()
serializer = TestResultSerializer(data, many=True)
return Response(serializer.data, status.HTTP_200_OK) | 5,358,510 |
def get_resource_path(resource_name):
"""Get the resource path.
Args:
resource_name (str): The resource name relative to the project root
directory.
Returns:
str: The true resource path on the system.
"""
package = pkg_resources.Requirement.parse(PACKAGE_NAME)
return pkg_resources.resource_filename(package, resource_name) | 5,358,511 |
def die_info_rec(die, indent_level=' '):
""" A recursive function for showing information about a DIE and its
children.
"""
print(indent_level + 'DIE tag=%s, attrs=' % die.tag)
for name, val in die.attributes.items():
print(indent_level + ' %s = %s' % (name, val))
child_indent = indent_level + ' '
for child in die.iter_children():
die_info_rec(child, child_indent) | 5,358,512 |
def do_cluster(items, mergefun, distfun, distlim):
"""Pairwise nearest merging clusterer.
items -- list of dicts
mergefun -- merge two items
distfun -- distance function
distlim -- stop merging when distance above this limit
"""
def heapitem(d0, dests):
"""Find nearest neighbor for d0 as sortable [distance, nearest, d0]"""
dists = (
Sort0List([distfun(d0, d1), d1, d0])
for d1 in dests if d1 is not d0
)
return min(dists)
heap = [Sort0List([None, None, d]) for d in items]
d0 = d1 = merged = None
while len(heap) > 1:
for item in heap:
# rescan nearest where nearest was merged away, or not yet set
if item[1] in (None, d0, d1):
item[:] = heapitem(item[2], (x[2] for x in heap))
continue
# update others where merged now nearest
if item[2] is not merged:
distance = distfun(item[2], merged)
if item[0] > distance:
item[0:2] = distance, merged
# arrange heap, pop out one end of shortest edge
heapify(heap)
distance, d1, d0 = item = heappop(heap)
# if shortest edge is long enough, unpop and stop
if distance is None or distance > distlim:
heappush(heap, item) # unspill the milk
break
# replace other end with merged destination
merged = mergefun(d0, d1)
for i in range(len(heap)):
if heap[i][2] is d1:
heap[i] = Sort0List([None, None, merged])
break
return [x[2] for x in heap] | 5,358,513 |
def write_features(features, path):
"""
Write a list of features to a file at `path`. The repr of each
feature is written on a new line.
@param features list of features to write
@param path path to write to
"""
with open(path,'w') as f:
for feat in features:
print >>f, repr(feat) | 5,358,514 |
def read_csv(input_file, quotechar='"'):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f,quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines | 5,358,515 |
def test_get_mode(ecomax: EcoMAX) -> None:
"""Test getting mode."""
data = _test_data
ecomax.set_data(data)
assert ecomax.mode == MODES[MODE_HEATING]
# Test with unknown mode.
data[DATA_MODE] = 69
ecomax.set_data(data)
assert ecomax.mode == DATA_UNKNOWN | 5,358,516 |
def rotate_coordinates(local3d, angles):
"""
Rotate xyz coordinates from given view_angles.
local3d: numpy array. Unit LOCAL xyz vectors
angles: tuple of length 3. Rotation angles around each GLOBAL axis.
"""
cx, cy, cz = np.cos(angles)
sx, sy, sz = np.sin(angles)
mat33_x = np.array([
[1, 0, 0],
[0, cx, sx],
[0, -sx, cx]
], dtype='float')
mat33_y = np.array([
[cy, 0, sy],
[0, 1, 0],
[-sy, 0, cy]
], dtype='float')
mat33_z = np.array([
[cz, sz, 0],
[-sz, cz, 0],
[0, 0, 1]
], dtype='float')
local3d = local3d @ mat33_x @ mat33_y @ mat33_z
return local3d | 5,358,517 |
def verify_selected_option_by_text(element, text):
"""Verify an element has a selected option by the option text
Parameters:
element : element
text : value
"""
element = get_browser().find(element)
with _verify_step('Verify selected option text of element {} is {}'
.format(element.name, text)) as s:
selected_option_text = element.select.first_selected_option.text
s.error = ('Expected selected option in element {} to be {} but was {}'
.format(element.name, text, selected_option_text))
s.condition = selected_option_text == text | 5,358,518 |
def quantize_enumerate(x_real, min, max):
"""
Randomly quantize in a way that preserves probability mass.
We use a piecewise polynomial spline of order 3.
"""
assert min < max
lb = x_real.detach().floor()
# This cubic spline interpolates over the nearest four integers, ensuring
# piecewise quadratic gradients.
s = x_real - lb
ss = s * s
t = 1 - s
tt = t * t
probs = torch.stack([
t * tt,
4 + ss * (3 * s - 6),
4 + tt * (3 * t - 6),
s * ss,
], dim=-1) * (1/6)
logits = safe_log(probs)
q = torch.arange(-1., 3.)
x = lb.unsqueeze(-1) + q
x = torch.max(x, 2 * min - 1 - x)
x = torch.min(x, 2 * max + 1 - x)
return x, logits | 5,358,519 |
def plot_speed(dataframe1, colour, legend_label):
"""Plot the speed of the vessel throughout the cruise to identify outlying speeds."""
# Plot speed data
plt.scatter(dataframe1.iloc[::60].longitude, dataframe1.iloc[::60].speed, c=colour, label=legend_label)
plt.title("Speed of vessel along track")
plt.xlabel("Longitude")
plt.ylabel("Speed of vessel, knots")
plt.grid(True)
plt.legend()
plt.show()
# Plot of frequency distribution of speed of vessel.
plt.subplot(211)
dataframe1['speed'].hist()
plt.title("Frequency distribution of speed of vessel")
plt.xlabel("Speed of vessel, knots")
plt.ylabel("Count")
plt.grid(True)
plt.subplot(212)
dataframe1['speed'].hist(bins=80,range=[0,20])
plt.title("Frequency distribution of speed of vessel")
plt.xlabel("Speed of vessel, knots")
plt.ylabel("Count")
plt.grid(True)
plt.tight_layout()
plt.show() | 5,358,520 |
def counter_format(counter):
"""Pretty print a counter so that it appears as: "2:200,3:100,4:20" """
if not counter:
return "na"
return ",".join("{}:{}".format(*z) for z in sorted(counter.items())) | 5,358,521 |
def drot(x, y, c, s):
"""
Apply the Givens rotation {(c,s)} to {x} and {y}
"""
# compute
gsl.blas_drot(x.data, y.data, c, s)
# and return
return x, y | 5,358,522 |
def _update_dict_within_dict(items, config):
""" recursively update dict within dict, if any """
for key, value in items:
if isinstance(value, dict):
config[key] = _update_dict_within_dict(
value.items(), config.get(key, {})
)
else:
config[key] = value
return config | 5,358,523 |
def masterProductFieldUpdate(objectId: str):
"""
Submit handler for updating & removing field overrides.
:param objectId: The mongodb master product id.
"""
key = request.form.get("field-key")
value = request.form.get("field-value")
# Clean up and trim tags if being set.
if key == MASTER_PRODUCT_FIELD__TAGS:
tags = value.strip().split(",")
if len(tags):
tags = ", ".join([tag.strip() for tag in set(tags) if tag.strip()])
value = tags
if thk.products.overrideProductField(objectId, key, value):
# If the product is active, mark it for upserting.
product = thk.products.getOneProduct(objectId)
if product and THK_ACTIVE in product and product[THK_ACTIVE]:
thk.products.rebuildActiveProduct(objectId)
flash("Field modified successfully.", "success")
else:
flash("Field could not be modified.", "danger")
return redirect(url_for("products.masterProductEdit", objectId=objectId)) | 5,358,524 |
def recalc_Th(Pb, age):
"""Calculates the equivalent amount of ThO_2 that would be required to produce the
measured amount of PbO if there was no UO_2 in the monazite.
INPUTS:
Pb: the concentration of Pb in parts per million
age: the age in million years
"""
return (232. / 208.) * Pb / (np.exp(4.95e-11 * (age * 1e6)) - 1) | 5,358,525 |
def relative_periodic_trajectory_wrap(
reference_point: ParameterVector,
trajectory: ArrayOfParameterVectors,
period: float = 2 * np.pi,
) -> ArrayOfParameterVectors:
"""Function that returns a wrapped 'copy' of a parameter trajectory such that
the distance between the final point of the trajectory and the reference point
is minimal inside the specified period.
The rest of the trajectory is being transformed in the same manner.
NOTE:
It only works as intended if the period is larger than the distance
between the consecutive points in the trajectory.
Args:
reference_point: Reference point for periodic wrapping of the trajectory.
trajectory: Trajectory that is wrapped to a copy of itself such that
the distance between the final point in the trajectory
and the reference point is minimal.
period: Periodicity of each parameter in each point of the trajectory.
Defaults to 2*np.pi.
"""
if not np.all(np.linalg.norm(np.diff(trajectory, axis=0), axis=1) < period):
raise ValueError(
"Distances between consecutive points must be smaller than period."
)
wrapped_trajectory = np.copy(trajectory).astype(float)
wrapped_trajectory[-1] = relative_periodic_wrap(
reference_point, trajectory[-1], period=period
)
for ii in range(2, len(wrapped_trajectory) + 1):
wrapped_trajectory[-ii] = relative_periodic_wrap(
wrapped_trajectory[-ii + 1], trajectory[-ii], period=period
)
return wrapped_trajectory | 5,358,526 |
def planToSet(world,robot,target,
edgeCheckResolution=1e-2,
extraConstraints=[],
equalityConstraints=[],
equalityTolerance=1e-3,
ignoreCollisions=[],
movingSubset=None,
**planOptions):
"""
Creates a MotionPlan object that can be called to solve a standard motion
planning problem for a robot in a world. The plan starts from the robot's
current configuration and ends in a target set.
Args:
world (WorldModel): the world in which the robot lives, including
obstacles
robot (RobotModel): the moving robot. The plan starts from
robot.getConfig()
target (function or CSpace): a function f(q) returning a bool which is
True if the configuration q is a goal, OR an instance of a CSpace
subclass where sample() generates a sample in the target set and
feasible(x) tests whether a sample is in the target set.
.. note::
The function should accept vectors of the same dimensionality
as the robot, not the moving subset. Similarly, the CSpace
should have the same dimensionality as the robot.
edgeCheckResolution (float, optional): the resolution at which edges in the path are
checked for feasibility
extraConstraints (list, optional): possible extra constraint functions, each
of which needs to return True if satisfied.
.. note::
Don't put cartesian constraints here! Instead place your function in equalityConstraints.
equalityConstraints (list, optional): a list of IKObjectives or equality
constraints f(x)=0 that must be satisfied during the motion. Equality
constraints may return a float or a list of floats. In the latter case, this
is interpreted as a vector function, in which all entries of the vector must be 0.
equalityTolerance (float, optional): a tolerance to which all the equality constraints
must be satisfied.
ignoreCollisions (list): a list of ignored collisions. Each element may be
a body in the world, or a pair (a,b) where a, b are bodies in the world.
movingSubset (optional): if 'auto', 'all', or None (default), all joints
will be allowed to move. If this is a list, then only these joint
indices will be allowed to move.
planOptions (keywords): keyword options that will be sent to the planner. See
the documentation for MotionPlan.setOptions for more details.
Returns:
MotionPlan: a planner instance that can be called to get a
kinematically-feasible plan. (see :meth:`MotionPlan.planMore` )
The underlying configuration space (a RobotCSpace, ClosedLoopRobotCSpace, or
EmbeddedRobotCSpace) can be retrieved using the "space" attribute of the
resulting MotionPlan object.
"""
q0 = robot.getConfig()
subset = []
if movingSubset == 'auto' or movingSubset == 'all' or movingSubset == None:
subset = list(range(len(q0)))
else:
subset = movingSubset
space = makeSpace(world=world,robot=robot,
edgeCheckResolution=edgeCheckResolution,
extraConstraints=extraConstraints,
equalityConstraints=equalityConstraints,
equalityTolerance=equalityTolerance,
ignoreCollisions=ignoreCollisions,
movingSubset=subset)
if hasattr(space,'lift'): #the planning takes place in a space of lower dimension than #links
plan = EmbeddedMotionPlan(space,q0,**planOptions)
else:
plan = MotionPlan(space,**planOptions)
#convert target to a (test,sample) pair if it's a cspace
if isinstance(target,CSpace):
goal = [(lambda x:target.feasible(x)),(lambda : target.sample())]
else:
if not callable(target):
if not isinstance(target,(tuple,list)) or len(target)!=2 or not callable(target[0]) or not callable(target[1]):
raise TypeError("target must be a predicate function or CSpace object")
goal = target
try:
plan.setEndpoints(q0,goal)
except RuntimeError:
#the start configuration is infeasible, print it out
if space.cspace==None: space.setup()
sfailures = space.cspace.feasibilityFailures(plan.space.project(q0))
warnings.warn("Start configuration fails {}".format(sfailures))
raise
return plan | 5,358,527 |
def itkimage_to_json(itkimage, manager=None):
"""Serialize a Python itk.Image object.
Attributes of this dictionary are to be passed to the JavaScript itkimage
constructor.
"""
if itkimage is None:
return None
else:
direction = itkimage.GetDirection()
directionMatrix = direction.GetVnlMatrix()
directionList = []
dimension = itkimage.GetImageDimension()
pixelArr = itk.array_view_from_image(itkimage)
compressor = zstd.ZstdCompressor(level=3)
compressed = compressor.compress(pixelArr.data)
pixelArrCompressed = memoryview(compressed)
for col in range(dimension):
for row in range(dimension):
directionList.append(directionMatrix.get(row, col))
componentType, pixelType = _image_to_type(itkimage)
imageType = dict(
dimension=dimension,
componentType=componentType,
pixelType=pixelType,
components=itkimage.GetNumberOfComponentsPerPixel()
)
return dict(
imageType=imageType,
origin=tuple(itkimage.GetOrigin()),
spacing=tuple(itkimage.GetSpacing()),
size=tuple(itkimage.GetBufferedRegion().GetSize()),
direction={'data': directionList,
'rows': dimension,
'columns': dimension},
compressedData=pixelArrCompressed
) | 5,358,528 |
def test_two_agents(tmp_path, empty_ensemble):
"""
:tmp_path: https://docs.pytest.org/en/stable/tmpdir.html
"""
@fdb.transactional
def get_started(tr):
return joshua_model._get_snap_counter(tr, ensemble_id, "started")
assert len(joshua_model.list_active_ensembles()) == 0
ensemble_id = joshua_model.create_ensemble(
"joshua", {"max_runs": 1, "timeout": 1}, open(empty_ensemble, "rb")
)
agents = []
for rank in range(2):
agent = threading.Thread(
target=joshua_agent.agent,
args=(),
kwargs={
"work_dir": os.path.join(tmp_path, str(rank)),
"agent_idle_timeout": 1,
},
)
agent.setDaemon(True)
agent.start()
agents.append(agent)
# before starting agent two, wait until agent one has started on this ensemble
while get_started(joshua_model.db) != 1:
time.sleep(0.001)
joshua.tail_ensemble(ensemble_id, username="joshua")
@fdb.transactional
def get_started(tr):
return joshua_model._get_snap_counter(tr, ensemble_id, "started")
# The second agent won't have started this ensemble (unless somehow > 10
# seconds passed without the first agent completing the ensemble)
assert get_started(joshua_model.db) == 1
for agent in agents:
agent.join() | 5,358,529 |
def project(pnt, norm):
"""Projects a point following a norm."""
t = -np.sum(pnt*norm)/np.sum(norm*norm)
ret = pnt+norm*t
return ret/np.linalg.norm(ret) | 5,358,530 |
def RunExtraTreesClassifier(trainDf, testDf):
"""RunExtraTreesClassifier
Runs a Extra Trees Classifier on training and testing dataframes.
Input:
trainDf -- the training DataFrame (pandas)
testDf -- the testing DataFrame (pandas)
"""
train_X, train_y, test_X = createArrays(trainDf, testDf)
# Split the training set into training and validation sets
X, X_, y, y_ = train_test_split(train_X, train_y, test_size=0.2)
etc = ExtraTreesClassifier(n_estimators=10, max_depth=None, random_state=0, verbose=True)
y_test_etc = trainAndRetrainClassifier(etc, X, X_, y, y_, train_X, train_y, test_X)
savePredictions(y_test_etc, 'etc.csv') | 5,358,531 |
def fix_trajectory(traj):
"""Remove duplicate waypoints that are introduced during smoothing.
"""
cspec = openravepy.ConfigurationSpecification()
cspec.AddDeltaTimeGroup()
iwaypoint = 1
num_removed = 0
while iwaypoint < traj.GetNumWaypoints():
waypoint = traj.GetWaypoint(iwaypoint, cspec)
delta_time = cspec.ExtractDeltaTime(waypoint)
if delta_time == 0.0:
traj.Remove(iwaypoint, iwaypoint + 1)
num_removed += 1
else:
iwaypoint += 1
return num_removed | 5,358,532 |
def calculate_edt(im, outpath=''):
"""Calculate distance from mask."""
mask = im.ds[:].astype('bool')
abs_es = np.absolute(im.elsize)
dt = distance_transform_edt(~mask, sampling=abs_es)
# mask = im.ds[:].astype('uint32')
# dt = edt.edt(mask, anisotropy=im.elsize, black_border=True, order='F', parallel=1)
# TODO?: leverage parallel
mo = write_output(outpath, dt, im.get_props())
return mo, mask | 5,358,533 |
def select_report_data(conn):
""" select report data to DB """
cur = conn.cursor()
cur.execute("SELECT * FROM report_analyze")
report = cur.fetchall()
cur.close()
return report | 5,358,534 |
def test__get_obj__nonentity(obj, get_func):
"""Test getting of entities"""
with patch.object(SYN, get_func, return_value=obj) as patch_get:
return_obj = GET_CLS._get_obj(obj)
if isinstance(obj, (synapseclient.Team, synapseclient.Evaluation)):
patch_get.assert_called_once_with(obj.name)
elif isinstance(obj, synapseclient.Wiki):
patch_get.assert_called_once_with(obj.ownerId)
assert return_obj == obj | 5,358,535 |
def func_lorentz_by_h_pv(z, h_pv, flag_z: bool = False, flag_h_pv: bool = False):
"""Gauss function as function of h_pv
"""
inv_h_pv = 1./h_pv
inv_h_pv_sq = numpy.square(inv_h_pv)
z_deg = z * 180./numpy.pi
c_a = 2./numpy.pi
a_l = c_a * inv_h_pv
b_l = 4.*inv_h_pv_sq
z_deg_sq = numpy.square(z_deg)
res = numpy.expand_dims(a_l, axis=-1) /(1+ numpy.expand_dims(b_l, axis=-1) * z_deg_sq)
dder = {}
if flag_z:
dder["z"] = -2.*z_deg*numpy.expand_dims(b_l,axis=-1)*res/(1.+numpy.expand_dims(b_l, axis=-1)*z_deg_sq) * 180./numpy.pi
if flag_h_pv:
dder["h_pv"] = (c_a * (numpy.expand_dims(h_pv, axis=-1) + 4*z_deg_sq) - \
c_a * numpy.expand_dims(h_pv, axis=-1))/numpy.square(numpy.expand_dims(h_pv, axis=-1) + 4*z_deg_sq)
return res, dder | 5,358,536 |
def test_train_rl_main(tmpdir):
"""Smoke test for imitation.scripts.train_rl.rollouts_and_policy."""
run = train_rl.train_rl_ex.run(
named_configs=["cartpole"] + ALGO_FAST_CONFIGS["rl"],
config_updates=dict(
common=dict(log_root=tmpdir),
),
)
assert run.status == "COMPLETED"
assert isinstance(run.result, dict) | 5,358,537 |
async def test_update_not_playing(hass, lastfm_network):
"""Test update when no playing song."""
lastfm_network.return_value.get_user.return_value = MockUser(None)
assert await async_setup_component(
hass,
sensor.DOMAIN,
{"sensor": {"platform": "lastfm", "api_key": "secret-key", "users": ["test"]}},
)
await hass.async_block_till_done()
entity_id = "sensor.test"
state = hass.states.get(entity_id)
assert state.state == STATE_NOT_SCROBBLING | 5,358,538 |
def backup_postgres_db() -> Tuple[Optional[str], bytes]:
"""Backup postgres db to a file."""
try:
time_str = datetime.now().strftime("%d-%m-%YT%H:%M:%S")
filename = f"backup_restore/backups/{time_str}-{settings.POSTGRES_CUSTOM_DB}.dump"
backup_name = f"{time_str}-{settings.POSTGRES_CUSTOM_DB}.dump.gz"
# create the dump with pg dump and terminal command
pass
# check if command worked
pass
# compress the dump
compress_file(filename)
# upload the zipped dump
error = upload_backup(backup_name)
if error is not None:
return error, bytes()
# remove created files
# with temporary file it would be the cleaner way
try:
for file in listdir("backup_restore/backups"):
if file != "__init__.py":
os.remove(f"backup_restore/backups/{file}")
logger.info("Removed backup files")
except Exception:
logger.error("Could not remove backup files")
return None, bytes()
except Exception as e:
logger.error(e)
return "Backup failed", bytes() | 5,358,539 |
def get_profile(aid):
"""
get profile image of author with the aid
"""
if 'logged_in' in session and aid ==session['logged_id']:
try:
re_aid = request.args.get("aid")
re = aController.getAuthorByAid(re_aid)
if re != None:
return re
return redirect(url_for('/'))
except KeyError:
return redirect(url_for('/'))
return redirect(url_for('/')) | 5,358,540 |
def SystemSettings_GetMetric(*args, **kwargs):
"""SystemSettings_GetMetric(int index, Window win=None) -> int"""
return _misc_.SystemSettings_GetMetric(*args, **kwargs) | 5,358,541 |
def all_files(dir, pattern):
"""Recursively finds every file in 'dir' whose name matches 'pattern'."""
return [f.as_posix() for f in [x for x in Path(dir).rglob(pattern)]] | 5,358,542 |
def get_identity_groups(ctx):
"""Load identity groups definitions."""
return render_template('identity-groups', ctx) | 5,358,543 |
def main():
"""
Entry point
"""
# Prepare factorials for O(1) lookup
factorials = [1]
for i in range(1, 101):
factorials.append(factorials[-1] * i)
# Now start counting
ncr_count = 0
for n in range(1, 101):
for r in range(1, n+1):
ncr = factorials[n] / (factorials[r] * factorials[n - r])
if ncr > 1000000:
ncr_count += 1
print(f"Count: {ncr_count}") | 5,358,544 |
def get_video_stream(consumer):
"""
Here is where we recieve streamed images from the Kafka Server and convert
them to a Flask-readable format.
"""
meta = False
while True:
if not meta:
yield(b'<meta http-equiv="refresh" content="300">\n')
meta = True
else:
msg = consumer.poll()
if msg is not None:
yield (b'--frame\r\n' + b'Content-Type: image/jpg\r\n\r\n' + msg.value() + b'\r\n\r\n') | 5,358,545 |
def test_api_challenges_get_ctftime_public():
"""Can a public user get /api/v1/challenges if ctftime is over"""
app = create_ctfd()
with app.app_context(), freeze_time("2017-10-7"):
set_config("challenge_visibility", "public")
with app.test_client() as client:
r = client.get("/api/v1/challenges")
assert r.status_code == 200
set_config(
"start", "1507089600"
) # Wednesday, October 4, 2017 12:00:00 AM GMT-04:00 DST
set_config(
"end", "1507262400"
) # Friday, October 6, 2017 12:00:00 AM GMT-04:00 DST
r = client.get("/api/v1/challenges")
assert r.status_code == 403
destroy_ctfd(app) | 5,358,546 |
def write_buffer_values(output_dir, buffer_filename, buffer_quant, i_bits=12, q_bits=None):
"""
:param output_dir:
:param buffer_filename:
:param buffer_quant:
:param i_bits:
:param q_bits:
:return:
"""
q_bits = i_bits if not q_bits else q_bits
with open(os.path.join(output_dir, buffer_filename), 'w+') as rbf:
for val in buffer_quant:
i_bin = bin_num(val.real, i_bits)
q_bin = bin_num(val.imag, q_bits)
rbf.write(i_bin + q_bin + "\n") | 5,358,547 |
def fix_behaviour_contrib_auth_user_is_anonymous_is_authenticated_callability(utils):
"""
Make user.is_anonymous and user.is_authenticated behave both as properties and methods,
by preserving their callability like in earlier Django version.
"""
utils.skip_if_app_not_installed("django.contrib.contenttypes") # BEFORE IMPORTS!
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.models import AnonymousUser
from ..django_legacy.django2_0.utils.deprecation import CallableFalse, CallableTrue
@property
def is_anonymous_for_AbstractBaseUser(self):
"""
Always return False. This is a way of comparing User objects to
anonymous users.
"""
return CallableFalse
@property
def is_authenticated_for_AbstractBaseUser(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return CallableTrue
utils.inject_attribute(AbstractBaseUser, "is_anonymous", is_anonymous_for_AbstractBaseUser)
utils.inject_attribute(AbstractBaseUser, "is_authenticated", is_authenticated_for_AbstractBaseUser)
@property
def is_anonymous_for_AnonymousUser(self):
return CallableTrue
@property
def is_authenticated_for_AnonymousUser(self):
return CallableFalse
utils.inject_attribute(AnonymousUser, "is_anonymous", is_anonymous_for_AnonymousUser)
utils.inject_attribute(AnonymousUser, "is_authenticated", is_authenticated_for_AnonymousUser) | 5,358,548 |
def BZPoly(pnts, poly, mag, openPoly=False):
"""TODO WRITEME.
Parameters
----------
pnts : list
Measurement points [[p1x, p1z], [p2x, p2z],...]
poly : list
Polygon [[p1x, p1z], [p2x, p2z],...]
mag : [M_x, M_y, M_z]
Magnetization = [M_x, M_y, M_z]
"""
dgz = calcPolyGz(pnts, poly, density=1.0, openPoly=openPoly)[1]
dgz[:,2] *= -1
return poissonEoetvoes(adot(mag, -dgz)) | 5,358,549 |
def conv(structure_file, file_format):
"""
Convert a structure into the conventional unit cell.
"""
from pybat.cli.commands.util import conventional_structure
conventional_structure(structure_file=structure_file,
fmt=file_format) | 5,358,550 |
def verify_rotation_speeds_have_units(step, human_readable):
"""Verify all rotation speeds have units."""
for prop, val in step.properties.items():
if step.PROP_LIMITS.get(prop, None) is ROTATION_SPEED_PROP_LIMIT:
if step.properties.get('stir', None):
assert f'{format_number(val)} RPM' in human_readable | 5,358,551 |
def matches(spc, shape_):
"""
Return True if the shape adheres to the spc (spc has optional color/shape
restrictions)
"""
(c, s) = spc
matches_color = c is None or (shape_.color == c)
matches_shape = s is None or (shape_.name == s)
return matches_color and matches_shape | 5,358,552 |
def jsonsafe(obj: Any) -> ResponseVal:
"""
Catch the TypeError which results from encoding non-encodable types
This uses the serialize function from my.core.serialize, which handles
serializing most types in HPI
"""
try:
return Response(dumps(obj), status=200, headers={"Content-Type": "application/json"})
except TypeError as encode_err:
return {
"error": "Could not encode response from HPI function as JSON",
"exception": str(encode_err),
}, 400 | 5,358,553 |
def get_appliances(self) -> list:
"""Get all appliances from Orchestrator
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - appliance
- GET
- /appliance
:return: Returns list of dictionaries of each appliance
:rtype: list
"""
return self._get("/appliance") | 5,358,554 |
def delete_result_files(path, name):
""" Deletes opensees result files.
Parameters:
path (str): Path to the opensees input file.
name (str): Name of the structure.
Returns:
None
"""
out_path = os.path.join(path, name + '_output')
shutil.rmtree(out_path) | 5,358,555 |
def save_from_form(event, POST):
"""Save an event from form data."""
# save items
names = POST.getlist('item_name')
quantities = POST.getlist('item_quantity')
prices_per_unit = POST.getlist('item_price_per_unit')
funding_already_received =\
POST.getlist('item_funding_already_received')
categories = POST.getlist('item_category')
revenues = POST.getlist('item_revenue')
for item in event.item_set.all():
if not Grant.objects.filter(item=item):
item.delete()
zipped_items = zip(names, quantities, prices_per_unit,
funding_already_received, categories, revenues)
for name, quantity, price, funding, cat, rev in zipped_items:
if Item.objects.filter(event=event, name=name):
continue
funding = funding or 0
# set correct category letter
cat = cat.strip().upper()
for tup in CATEGORIES:
if tup[1].strip().upper() == cat:
cat = tup[0]
break
# Remove unwanted commas for int parsing
rev = rev.replace(",", "")
if name:
event.item_set.create(name=name,
quantity=quantity,
price_per_unit=price,
funding_already_received=funding,
category=cat,
revenue=int(rev))
# save questions
# delete existing answers
event.commonfollowupanswer_set.all().delete()
event.followupanswer_set.all().delete()
event.eligibilityanswer_set.all().delete()
event.commonfreeresponseanswer_set.all().delete()
event.freeresponseanswer_set.all().delete()
# clear existing funders to re-add new ones
event.applied_funders.clear()
# create new answers and save funders
# unchecked checkboxes will have neither answers nor funders
# associated with them
for k, v in POST.items():
if k.startswith('eligibility'):
q_id = re.search("[0-9]+", k).group(0)
question = EligibilityQuestion.objects.get(id=q_id)
event.eligibilityanswer_set.create(question=question,
event=event, answer='Y')
elif k.startswith('commonfollowup'):
q_id = re.search("[0-9]+", k).group(0)
question = CommonFollowupQuestion.objects.get(id=q_id)
event.commonfollowupanswer_set.create(question=question,
event=event, answer=v)
elif k.startswith('followup'):
q_id = re.search("[0-9]+", k).group(0)
question = FollowupQuestion.objects.get(id=q_id)
event.followupanswer_set.create(question=question,
event=event, answer=v)
elif k.startswith('commonfreeresponse'):
q_id = re.search("[0-9]+", k).group(0)
question = CommonFreeResponseQuestion.objects.get(id=q_id)
event.commonfreeresponseanswer_set.create(question=question,
event=event,
answer=v)
elif k.startswith('freeresponse'):
q_id = re.search("[0-9]+", k).group(0)
question = FreeResponseQuestion.objects.get(id=q_id)
event.freeresponseanswer_set.create(question=question,
event=event, answer=v)
elif k.startswith('funder'):
funder_id = re.search("[0-9]+", k).group(0)
funder = CFAUser.objects.get(id=funder_id)
event.applied_funders.add(funder) | 5,358,556 |
def is_str(element):
"""True if string else False"""
check = isinstance(element, str)
return check | 5,358,557 |
def worker(repo_url: str, project_path: str, docker_port: int, host_port: int):
"""Main worker that will:
- create required directories
- clone repository
- build Dockerfile
- start container
- remove container and temporary directory
"""
if not project_path:
project_path = mkdtemp()
else:
os.makedirs(project_path, exist_ok=True)
clone_git_repo(repo_url=repo_url, project_path=project_path)
docker_client = get_docker_client()
build_container(docker_client, project_path)
container = run_container(docker_client, docker_port, host_port)
with suppress(KeyboardInterrupt):
LOGGER.info("To stop container press `Ctrl+C`")
while True:
pass
clean_after_running(container, docker_client, project_path) | 5,358,558 |
def anonymize_and_streamline(old_file, target_folder):
"""
This function loads the edfs of a folder and
1. removes their birthdate and patient name
2. renames the channels to standardized channel names
3. saves the files in another folder with a non-identifyable
4. verifies that the new files have the same content as the old
"""
# load the two csvs with the edfs that we dont process and where the ECG is upside down
pre_coding_discard = [line[0] for line in misc.read_csv(cfg.edfs_discard) if line[2]=='1']
to_invert = [line[0] for line in misc.read_csv(cfg.edfs_invert)]
# Here we read the list of controls and patients with their age and gender
mappings = misc.read_csv(cfg.controls_csv)
mappings.extend(misc.read_csv(cfg.patients_csv))
mappings = dict([[name, {'gender':gender, 'age':age}] for name, gender, age,*_ in mappings])
# old name is the personalized file without file extension, e.g. thomas_smith(1)
old_name = ospath.splitext(ospath.basename(old_file))[0]
# new name is the codified version without extension e.g '123_45678'
new_name = codify(old_name)
# use a temporary file to write and then move it,
# this avoids half-written files that cannot be read later
tmp_name = tempfile.TemporaryFile(prefix='anonymize').name
if old_name in pre_coding_discard:
print('EDF is marked as corrupt and will be discarded')
return
# this is where the anonymized file will be stored
new_file = ospath.join(target_folder, new_name + '.edf')
if ospath.exists(new_file):
print ('New file extists already {}'.format(new_file))
else:
# anonymize
print ('Writing {} from {}'.format(new_file, old_name))
assert ospath.isfile(old_file), f'{old_file} does not exist'
signals, signal_headers, header = sleep_utils.read_edf(old_file,
digital=True,
verbose=False)
# remove patient info
header['birthdate'] = ''
header['patientname'] = new_name
header['patientcode'] = new_name
header['gender'] = mappings[old_name]['gender']
header['age'] = mappings[old_name]['age']
# rename channels to a unified notation, e.g. EKG becomes ECG I
for shead in signal_headers:
ch = shead['label']
if ch in ch_mapping:
ch = ch_mapping[ch]
shead['label'] = ch
# Invert the ECG channel if necessary
if old_name in to_invert:
for i,sig in enumerate(signals):
label = signal_headers[i]['label'].lower()
if label == cfg.ecg_channel.lower():
signals[i] = -sig
# we write to tmp to prevent that corrupted files are not left
print ('Writing tmp for {}'.format(new_file))
sleep_utils.write_edf(tmp_name, signals, signal_headers, header,
digital=True, correct=True)
# verify that contents for both files match exactly
print ('Verifying tmp for {}'.format(new_file))
# embarrasing hack, as dmin/dmax dont in this files after inverting
if not old_name=='B0036':
sleep_utils.compare_edf(old_file, tmp_name, verbose=False)
# now we move the tmp file to its new location.
shutil.move(tmp_name, new_file)
# also copy additional file information ie hypnograms and kubios files
old_dir = ospath.dirname(old_file)
pattern = old_name.replace('_m', '').replace('_w', '') # remove gender from weitere nt1 patients
add_files = ospath.list_files(old_dir, patterns=[f'{pattern}*txt', f'{pattern}*dat', f'{pattern}*mat'])
for add_file in add_files:
# e.g. .mat or .npy etc etc
new_add_file = ospath.join(target_folder,
ospath.basename(add_file.replace(pattern, new_name)))
if ospath.exists(new_add_file):continue
# hypnograms will be copied to .hypno
try:
new_add_file = new_add_file.replace('-Schlafprofil', '')
new_add_file = new_add_file.replace('_sl','')
new_add_file = new_add_file.replace('.txt', '.hypno').replace('.dat', '.hypno')
shutil.copy(add_file, new_add_file)
except Exception as e:
print(e)
return old_name, new_name | 5,358,559 |
def test_normal():
"""Test multi_map with no default_dict."""
# first char is key
data = iter(SEQ)
res = multi_map(lambda x: x[0], data)
assert res == {'A': ['APPLE', 'AARDVARK'],
'B': ['BANANA']}
assert list(data) == []
with pytest.raises(KeyError):
tmp = res['C'] # pylint: disable=unused-variable
# second char is key
data = iter(SEQ)
res = multi_map(lambda x: x[1], data)
assert res == {'P': ['APPLE'],
'A': ['AARDVARK', 'BANANA']}
assert list(data) == []
with pytest.raises(KeyError):
tmp = res['C'] # pylint: disable=unused-variable
# third char is key
data = iter(SEQ)
res = multi_map(lambda x: x[2], data)
assert res == {'P': ['APPLE'],
'R': ['AARDVARK'],
'N': ['BANANA']}
assert list(data) == []
with pytest.raises(KeyError):
tmp = res['C'] # pylint: disable=unused-variable
# identity is key
data = iter(SEQ)
res = multi_map(lambda x: x, data)
assert res == {'APPLE': ['APPLE'],
'AARDVARK': ['AARDVARK'],
'BANANA': ['BANANA']}
assert list(data) == []
with pytest.raises(KeyError):
tmp = res['C'] # pylint: disable=unused-variable
# fixed key
data = iter(SEQ)
res = multi_map(lambda x: 'B', data)
assert res == {'B': ['APPLE', 'AARDVARK', 'BANANA']}
assert list(data) == []
with pytest.raises(KeyError):
tmp = res['C'] # pylint: disable=unused-variable
# empty
data = ()
res = multi_map(lambda x: x[0], data)
assert res == {}
with pytest.raises(KeyError):
tmp = res['C'] # pylint: disable=unused-variable | 5,358,560 |
def test_registry_delete_key_async(cbcsdk_mock):
"""Test the response to the 'reg delete key' command."""
cbcsdk_mock.mock_request('POST', '/appservices/v6/orgs/test/liveresponse/sessions', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/appservices/v6/orgs/test/liveresponse/sessions/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/appservices/v6/orgs/test/devices/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('POST', '/appservices/v6/orgs/test/liveresponse/sessions/1:2468/commands',
REG_DELETE_KEY_START_RESP)
cbcsdk_mock.mock_request('GET', '/appservices/v6/orgs/test/liveresponse/sessions/1:2468/commands/64',
REG_DELETE_KEY_END_RESP)
cbcsdk_mock.mock_request('DELETE', '/appservices/v6/orgs/test/liveresponse/sessions/1:2468', None)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(2468) as session:
c_id, _ = session.delete_registry_key('HKLM\\SYSTEM\\CurrentControlSet\\services\\ACPI\\Nonsense',
async_mode=True)
assert c_id == 64 | 5,358,561 |
def main():
"""
This program will ask user to enter a number to check the movement of alphabet's order, then user can enter a
ciphered word or sentence base on their alphabet movement.
Then function 'deciphered(x,y)' will transform those ciphered characters into right word or sentence.
"""
secret = int(input('Secret number: '))
string = input('What\'s the ciphered string?')
string = string.upper()
print('The deciphered string is: ' + deciphered(after_move(secret), string)) | 5,358,562 |
def readiter(inputFile, *args):
"""Returns an iterator that calls read(*args) on the inputFile."""
while True:
ch = inputFile.read(*args)
if ch:
yield ch
else:
raise StopIteration | 5,358,563 |
def main() -> None:
"""main function"""
# Look for default ini file in "/etc/actworkers.ini" and ~/config/actworkers/actworkers.ini
# (or replace .config with $XDG_CONFIG_DIR if set)
args = worker.handle_args(parseargs())
actapi = worker.init_act(args)
if not args.apikey:
worker.fatal("You must specify --apikey on command line or in config file")
in_data = sys.stdin.read().strip()
proxies = {
'http': args.proxy_string,
'https': args.proxy_string
} if args.proxy_string else None
vtapi = VirusTotalApi(args.apikey, proxies=proxies)
if args.hexdigest:
handle_hexdigest(actapi, vtapi, in_data, output_format=args.output_format)
if args.ip:
handle_ip(actapi, vtapi, in_data, output_format=args.output_format)
if args.domain:
handle_domain(actapi, vtapi, in_data, output_format=args.output_format) | 5,358,564 |
def deal_text(text: str) -> str:
"""deal the text
Args:
text (str): text need to be deal
Returns:
str: dealed text
"""
text = " "+text
text = text.replace("。","。\n ")
text = text.replace("?","?\n ")
text = text.replace("!","!\n ")
text = text.replace(";",";\n ")
return text | 5,358,565 |
def extract_zip(src, dest):
"""extract a zip file"""
bundle = zipfile.ZipFile(src)
namelist = bundle.namelist()
for name in namelist:
filename = os.path.realpath(os.path.join(dest, name))
if name.endswith('/'):
os.makedirs(filename)
else:
path = os.path.dirname(filename)
if not os.path.isdir(path):
os.makedirs(path)
_dest = open(filename, 'wb')
_dest.write(bundle.read(name))
_dest.close()
bundle.close()
return namelist | 5,358,566 |
def refresh_cuda_memory():
"""
Re-allocate all cuda memory to help alleviate fragmentation
"""
# Run a full garbage collect first so any dangling tensors are released
gc.collect()
# Then move all tensors to the CPU
locations = {}
for obj in gc.get_objects():
if not isinstance(obj, torch.Tensor):
continue
locations[obj] = obj.device
obj.data = obj.data.cpu()
if isinstance(obj, torch.nn.Parameter) and obj.grad is not None:
obj.grad.data = obj.grad.cpu()
# Now empty the cache to flush the allocator
torch.cuda.empty_cache()
# Finally move the tensors back to their associated GPUs
for tensor, device in locations.items():
tensor.data = tensor.to(device)
if isinstance(tensor, torch.nn.Parameter) and tensor.grad is not None:
tensor.grad.data = tensor.grad.to(device) | 5,358,567 |
def _find_timepoints_1D(single_stimulus_code):
"""
Find the indexes where the value of single_stimulus_code turn from zero to non_zero
single_stimulus_code : 1-D array
>>> _find_timepoints_1D([5,5,0,0,4,4,4,0,0,1,0,2,0])
array([ 0, 4, 9, 11])
>>> _find_timepoints_1D([0,0,1,2,3,0,1,0,0])
array([2, 6])
>>> _find_timepoints_1D([0,0,1,2,0,1])
array([2, 5])
>>> _find_timepoints_1D([5,0,0,1,2,5])
array([0, 3])
"""
flag = True # whether have seen 0 so far
timepoints = []
for index, timepoint in enumerate(single_stimulus_code):
if timepoint != 0 and flag:
timepoints.append(index)
flag = False
if timepoint == 0 and not flag:
flag = True
return np.array(timepoints) | 5,358,568 |
def stiffness_tric(
components: np.ndarray = None,
components_d: dict = None
) -> np.ndarray:
"""Generate triclinic fourth-order stiffness tensor.
Parameters
----------
components : np.ndarray
21 components of triclinic tensor, see
stiffness_component_dict
components_d : dictionary
dictionary with 21 components
of triclinic tensor, see
stiffness_component_dict
Returns
-------
np.ndarray
Fourth-order triclinic tensor with minor
and major symmetries
"""
out = np.zeros(shape=[3, 3, 3, 3])
if not isinstance(components, type(None)):
components_d = stiffness_component_dict(components)
for k, v in components_d.items():
i = [int(s)-1 for s in k]
out[i[0], i[1], i[2], i[3]] = v
# tt_l
out[i[1], i[0], i[2], i[3]] = v
# tt_r
out[i[0], i[1], i[3], i[2]] = v
out[i[1], i[0], i[3], i[2]] = v # + tt_l
# tt_m
out[i[2], i[3], i[0], i[1]] = v
out[i[3], i[2], i[0], i[1]] = v # + tt_l
out[i[2], i[3], i[1], i[0]] = v # + tt_r
out[i[3], i[2], i[1], i[0]] = v # + tt_l + tt_r
return out | 5,358,569 |
def get_dataset(
dataset_name: str,
path: Optional[Path] = None,
regenerate: bool = False,
) -> TrainDatasets:
"""
Get the repository dataset.
Currently only [Retail Dataset](https://archive.ics.uci.edu/ml/datasets/online+retail) is available
Parameters:
dataset_name:
name of the dataset, for instance "retail"
regenerate:
whether to regenerate the dataset even if a local file is present.
If this flag is False and the file is present, the dataset will not
be downloaded again.
path:
where the dataset should be saved
Returns:
dataset obtained by either downloading or reloading from local file.
"""
if path is None:
path = default_dataset_path
dataset_path = materialize_dataset(dataset_name, path, regenerate)
return load_datasets(
metadata=dataset_path,
train=dataset_path / "train",
test=dataset_path / "test",
) | 5,358,570 |
def get_info_safe(obj, attr, default=None):
"""safely retrieve @attr from @obj"""
try:
oval = obj.__getattribute__(attr)
except:
logthis("Attribute does not exist, using default", prefix=attr, suffix=default, loglevel=LL.WARNING)
oval = default
return oval | 5,358,571 |
def mkviewcolbg(view=None, header=u'', colno=None, cb=None,
width=None, halign=None, calign=None,
expand=False, editcb=None, maxwidth=None):
"""Return a text view column."""
i = gtk.CellRendererText()
if cb is not None:
i.set_property(u'editable', True)
i.connect(u'edited', cb, colno)
if calign is not None:
i.set_property(u'xalign', calign)
j = gtk.TreeViewColumn(header, i, background=colno)
if halign is not None:
j.set_alignment(halign)
if expand:
if width is not None:
j.set_min_width(width)
j.set_expand(True)
else:
if width is not None:
j.set_min_width(width)
if maxwidth is not None:
j.set_max_width(maxwidth)
view.append_column(j)
if editcb is not None:
i.connect(u'editing-started', editcb)
return i | 5,358,572 |
def list2str(lst, indent=0, brackets=True, quotes=True):
"""
Generate a Python syntax list string with an indention
:param lst: list
:param indent: indention as integer
:param brackets: surround the list expression by brackets as boolean
:param quotes: surround each item with quotes
:return: string
"""
if quotes:
lst_str = str(lst)
if not brackets:
lst_str = lst_str[1:-1]
else:
lst_str = ', '.join(lst)
if brackets:
lst_str = '[' + lst_str + ']'
lb = ',\n' + indent*' '
return lst_str.replace(', ', lb) | 5,358,573 |
def full_path(path):
"""
Get an absolute path.
"""
if path[0] == "/":
return path
return os.path.realpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", path)
) | 5,358,574 |
def _roi_pool_shape(op):
"""Shape function for the RoiPool op.
"""
dims_data = op.inputs[0].get_shape().as_list()
channels = dims_data[3]
dims_rois = op.inputs[1].get_shape().as_list()
num_rois = dims_rois[0]
pooled_height = op.get_attr('pooled_height')
pooled_width = op.get_attr('pooled_width')
output_shape = tf.TensorShape([num_rois, pooled_height, pooled_width, channels])
return [output_shape, output_shape] | 5,358,575 |
def get_users(*, limit: int, order_by: str = "id", offset: Optional[str] = None) -> APIResponse:
"""Get users"""
appbuilder = current_app.appbuilder
session = appbuilder.get_session
total_entries = session.query(func.count(User.id)).scalar()
to_replace = {"user_id": "id"}
allowed_filter_attrs = [
"user_id",
'id',
"first_name",
"last_name",
"user_name",
"email",
"is_active",
"role",
]
query = session.query(User)
query = apply_sorting(query, order_by, to_replace, allowed_filter_attrs)
users = query.offset(offset).limit(limit).all()
return user_collection_schema.dump(UserCollection(users=users, total_entries=total_entries)) | 5,358,576 |
def extract_sicd(
img_header: Union[ImageSegmentHeader, ImageSegmentHeader0],
transpose: True,
nitf_header: Optional[Union[NITFHeader, NITFHeader0]] = None) -> SICDType:
"""
Extract the best available SICD structure from relevant nitf header structures.
Parameters
----------
img_header : ImageSegmentHeader|ImageSegmentHeader0
transpose : bool
nitf_header : None|NITFHeader|NITFHeader0
Returns
-------
SICDType
"""
def get_collection_info() -> CollectionInfoType:
isorce = img_header.ISORCE.strip()
collector_name = None if len(isorce) < 1 else isorce
iid2 = img_header.IID2.strip()
core_name = img_header.IID1.strip() if len(iid2) < 1 else iid2
class_str = img_header.Security.CLAS
if class_str == 'T':
classification = 'TOPSECRET'
elif class_str == 'S':
classification = 'SECRET'
elif class_str == 'C':
classification = 'CONFIDENTIAL'
elif class_str == 'U':
classification = 'UNCLASSIFIED'
else:
classification = ''
ctlh = img_header.Security.CTLH.strip()
if len(ctlh) < 1:
classification += '//' + ctlh
code = img_header.Security.CODE.strip()
if len(code) < 1:
classification += '//' + code
return CollectionInfoType(
CollectorName=collector_name,
CoreName=core_name,
Classification=classification)
def get_image_data() -> ImageDataType:
pvtype = img_header.PVTYPE
if pvtype == 'C':
if img_header.NBPP != 64:
logger.warning(
'This NITF has complex bands that are not 64-bit.\n\t'
'This is not currently supported.')
pixel_type = 'RE32F_IM32F'
elif pvtype == 'R':
if img_header.NBPP == 64:
logger.warning(
'The real/imaginary data in the NITF are stored as 64-bit floating point.\n\t'
'The closest Pixel Type, RE32F_IM32F, will be used,\n\t'
'but there may be overflow issues if converting this file.')
pixel_type = 'RE32F_IM32F'
elif pvtype == 'SI':
pixel_type = 'RE16I_IM16I'
else:
raise ValueError('Got unhandled PVTYPE {}'.format(pvtype))
if transpose:
rows = img_header.NCOLS
cols = img_header.NROWS
else:
rows = img_header.NROWS
cols = img_header.NCOLS
return ImageDataType(
PixelType=pixel_type,
NumRows=rows,
NumCols=cols,
FirstRow=0,
FirstCol=0,
FullImage=(rows, cols),
SCPPixel=(0.5 * rows, 0.5 * cols))
def append_country_code(cc) -> None:
if len(cc) > 0:
if the_sicd.CollectionInfo is None:
the_sicd.CollectionInfo = CollectionInfoType(CountryCodes=[cc, ])
elif the_sicd.CollectionInfo.CountryCodes is None:
the_sicd.CollectionInfo.CountryCodes = [cc, ]
elif cc not in the_sicd.CollectionInfo.CountryCodes:
the_sicd.CollectionInfo.CountryCodes.append(cc)
def set_image_corners(icps: numpy.ndarray, override: bool = False) -> None:
if the_sicd.GeoData is None:
the_sicd.GeoData = GeoDataType(ImageCorners=icps)
elif the_sicd.GeoData.ImageCorners is None or override:
the_sicd.GeoData.ImageCorners = icps
def set_arp_position(arp_ecf: numpy.ndarray, override: bool = False) -> None:
if the_sicd.SCPCOA is None:
the_sicd.SCPCOA = SCPCOAType(ARPPos=arp_ecf)
elif override:
# prioritize this information first - it should be more reliable than other sources
the_sicd.SCPCOA.ARPPos = arp_ecf
def set_scp(scp_ecf: numpy.ndarray, scp_pixel: Union[numpy.ndarray, list, tuple], override: bool = False) -> None:
def set_scppixel():
if the_sicd.ImageData is None:
the_sicd.ImageData = ImageDataType(SCPPixel=scp_pixel)
else:
the_sicd.ImageData.SCPPixel = scp_pixel
if the_sicd.GeoData is None:
the_sicd.GeoData = GeoDataType(SCP=SCPType(ECF=scp_ecf))
set_scppixel()
elif the_sicd.GeoData.SCP is None or override:
the_sicd.GeoData.SCP = SCPType(ECF=scp_ecf)
set_scppixel()
def set_collect_start(
collect_start: Union[str, datetime, numpy.datetime64], override: bool = False) -> None:
if the_sicd.Timeline is None:
the_sicd.Timeline = TimelineType(CollectStart=collect_start)
elif the_sicd.Timeline.CollectStart is None or override:
the_sicd.Timeline.CollectStart = collect_start
def set_uvects(row_unit: numpy.ndarray, col_unit: numpy.ndarray) -> None:
if the_sicd.Grid is None:
the_sicd.Grid = GridType(
Row=DirParamType(UVectECF=row_unit),
Col=DirParamType(UVectECF=col_unit))
return
if the_sicd.Grid.Row is None:
the_sicd.Grid.Row = DirParamType(UVectECF=row_unit)
elif the_sicd.Grid.Row.UVectECF is None:
the_sicd.Grid.Row.UVectECF = row_unit
if the_sicd.Grid.Col is None:
the_sicd.Grid.Col = DirParamType(UVectECF=col_unit)
elif the_sicd.Grid.Col.UVectECF is None:
the_sicd.Grid.Col.UVectECF = col_unit
def try_CMETAA() -> None:
# noinspection PyTypeChecker
tre = None if tres is None else tres['CMETAA'] # type: CMETAA
if tre is None:
return
cmetaa = tre.DATA
if the_sicd.GeoData is None:
the_sicd.GeoData = GeoDataType()
if the_sicd.SCPCOA is None:
the_sicd.SCPCOA = SCPCOAType()
if the_sicd.Grid is None:
the_sicd.Grid = GridType()
if the_sicd.Timeline is None:
the_sicd.Timeline = TimelineType()
if the_sicd.RadarCollection is None:
the_sicd.RadarCollection = RadarCollectionType()
if the_sicd.ImageFormation is None:
the_sicd.ImageFormation = ImageFormationType()
the_sicd.SCPCOA.SCPTime = 0.5*float(cmetaa.WF_CDP)
the_sicd.GeoData.SCP = SCPType(ECF=tre.get_scp())
the_sicd.SCPCOA.ARPPos = tre.get_arp()
the_sicd.SCPCOA.SideOfTrack = cmetaa.CG_LD.strip().upper()
the_sicd.SCPCOA.SlantRange = float(cmetaa.CG_SRAC)
the_sicd.SCPCOA.DopplerConeAng = float(cmetaa.CG_CAAC)
the_sicd.SCPCOA.GrazeAng = float(cmetaa.CG_GAAC)
the_sicd.SCPCOA.IncidenceAng = 90 - float(cmetaa.CG_GAAC)
if hasattr(cmetaa, 'CG_TILT'):
the_sicd.SCPCOA.TwistAng = float(cmetaa.CG_TILT)
if hasattr(cmetaa, 'CG_SLOPE'):
the_sicd.SCPCOA.SlopeAng = float(cmetaa.CG_SLOPE)
the_sicd.ImageData.SCPPixel = [int(cmetaa.IF_DC_IS_COL), int(cmetaa.IF_DC_IS_ROW)]
img_corners = tre.get_image_corners()
if img_corners is not None:
the_sicd.GeoData.ImageCorners = img_corners
if cmetaa.CMPLX_SIGNAL_PLANE.upper() == 'S':
the_sicd.Grid.ImagePlane = 'SLANT'
elif cmetaa.CMPLX_SIGNAL_PLANE.upper() == 'G':
the_sicd.Grid.ImagePlane = 'GROUND'
else:
logger.warning(
'Got unexpected CMPLX_SIGNAL_PLANE value {},\n\t'
'setting ImagePlane to SLANT'.format(cmetaa.CMPLX_SIGNAL_PLANE))
the_sicd.Grid.Row = DirParamType(
SS=float(cmetaa.IF_RSS),
ImpRespWid=float(cmetaa.IF_RGRES),
Sgn=1 if cmetaa.IF_RFFTS.strip() == '-' else -1, # opposite sign convention
ImpRespBW=float(cmetaa.IF_RFFT_SAMP)/(float(cmetaa.IF_RSS)*float(cmetaa.IF_RFFT_TOT)))
the_sicd.Grid.Col = DirParamType(
SS=float(cmetaa.IF_AZSS),
ImpRespWid=float(cmetaa.IF_AZRES),
Sgn=1 if cmetaa.IF_AFFTS.strip() == '-' else -1, # opposite sign convention
ImpRespBW=float(cmetaa.IF_AZFFT_SAMP)/(float(cmetaa.IF_AZSS)*float(cmetaa.IF_AZFFT_TOT)))
cmplx_weight = cmetaa.CMPLX_WEIGHT.strip().upper()
if cmplx_weight == 'UWT':
the_sicd.Grid.Row.WgtType = WgtTypeType(WindowName='UNIFORM')
the_sicd.Grid.Col.WgtType = WgtTypeType(WindowName='UNIFORM')
elif cmplx_weight == 'HMW':
the_sicd.Grid.Row.WgtType = WgtTypeType(WindowName='HAMMING')
the_sicd.Grid.Col.WgtType = WgtTypeType(WindowName='HAMMING')
elif cmplx_weight == 'HNW':
the_sicd.Grid.Row.WgtType = WgtTypeType(WindowName='HANNING')
the_sicd.Grid.Col.WgtType = WgtTypeType(WindowName='HANNING')
elif cmplx_weight == 'TAY':
the_sicd.Grid.Row.WgtType = WgtTypeType(
WindowName='TAYLOR',
Parameters={
'SLL': '-{0:d}'.format(int(cmetaa.CMPLX_RNG_SLL)),
'NBAR': '{0:d}'.format(int(cmetaa.CMPLX_RNG_TAY_NBAR))})
the_sicd.Grid.Col.WgtType = WgtTypeType(
WindowName='TAYLOR',
Parameters={
'SLL': '-{0:d}'.format(int(cmetaa.CMPLX_AZ_SLL)),
'NBAR': '{0:d}'.format(int(cmetaa.CMPLX_AZ_TAY_NBAR))})
else:
logger.warning(
'Got unsupported CMPLX_WEIGHT value {}.\n\tThe resulting SICD will '
'not have valid weight array populated'.format(cmplx_weight))
the_sicd.Grid.Row.define_weight_function()
the_sicd.Grid.Col.define_weight_function()
# noinspection PyBroadException
try:
date_str = cmetaa.T_UTC_YYYYMMMDD
time_str = cmetaa.T_HHMMSSUTC
date_time = _iso_date_format.format(
date_str[:4], date_str[4:6], date_str[6:8],
time_str[:2], time_str[2:4], time_str[4:6])
the_sicd.Timeline.CollectStart = numpy.datetime64(date_time, 'us')
except Exception:
logger.info('Failed extracting start time from CMETAA')
pass
the_sicd.Timeline.CollectDuration = float(cmetaa.WF_CDP)
the_sicd.Timeline.IPP = [
IPPSetType(TStart=0,
TEnd=float(cmetaa.WF_CDP),
IPPStart=0,
IPPEnd=numpy.floor(float(cmetaa.WF_CDP)*float(cmetaa.WF_PRF)),
IPPPoly=[0, float(cmetaa.WF_PRF)])]
the_sicd.RadarCollection.TxFrequency = TxFrequencyType(
Min=float(cmetaa.WF_SRTFR),
Max=float(cmetaa.WF_ENDFR))
the_sicd.RadarCollection.TxPolarization = cmetaa.POL_TR.upper()
the_sicd.RadarCollection.Waveform = [WaveformParametersType(
TxPulseLength=float(cmetaa.WF_WIDTH),
TxRFBandwidth=float(cmetaa.WF_BW),
TxFreqStart=float(cmetaa.WF_SRTFR),
TxFMRate=float(cmetaa.WF_CHRPRT)*1e12)]
tx_rcv_pol = '{}:{}'.format(cmetaa.POL_TR.upper(), cmetaa.POL_RE.upper())
the_sicd.RadarCollection.RcvChannels = [
ChanParametersType(TxRcvPolarization=tx_rcv_pol)]
the_sicd.ImageFormation.TxRcvPolarizationProc = tx_rcv_pol
if_process = cmetaa.IF_PROCESS.strip().upper()
if if_process == 'PF':
the_sicd.ImageFormation.ImageFormAlgo = 'PFA'
scp_ecf = tre.get_scp()
fpn_ned = numpy.array(
[float(cmetaa.CG_FPNUV_X), float(cmetaa.CG_FPNUV_Y), float(cmetaa.CG_FPNUV_Z)], dtype='float64')
ipn_ned = numpy.array(
[float(cmetaa.CG_IDPNUVX), float(cmetaa.CG_IDPNUVY), float(cmetaa.CG_IDPNUVZ)], dtype='float64')
fpn_ecf = ned_to_ecf(fpn_ned, scp_ecf, absolute_coords=False)
ipn_ecf = ned_to_ecf(ipn_ned, scp_ecf, absolute_coords=False)
the_sicd.PFA = PFAType(FPN=fpn_ecf, IPN=ipn_ecf)
elif if_process in ['RM', 'CD']:
the_sicd.ImageFormation.ImageFormAlgo = 'RMA'
# the remainder of this is guesswork to define required fields
the_sicd.ImageFormation.TStartProc = 0 # guess work
the_sicd.ImageFormation.TEndProc = float(cmetaa.WF_CDP)
the_sicd.ImageFormation.TxFrequencyProc = TxFrequencyProcType(
MinProc=float(cmetaa.WF_SRTFR), MaxProc=float(cmetaa.WF_ENDFR))
# all remaining guess work
the_sicd.ImageFormation.STBeamComp = 'NO'
the_sicd.ImageFormation.ImageBeamComp = 'SV' if cmetaa.IF_BEAM_COMP[0] == 'Y' else 'NO'
the_sicd.ImageFormation.AzAutofocus = 'NO' if cmetaa.AF_TYPE[0] == 'N' else 'SV'
the_sicd.ImageFormation.RgAutofocus = 'NO'
def try_AIMIDA() -> None:
tre = None if tres is None else tres['AIMIDA']
if tre is None:
return
aimida = tre.DATA
append_country_code(aimida.COUNTRY.strip())
create_time = datetime.strptime(aimida.CREATION_DATE, '%d%b%y')
if the_sicd.ImageCreation is None:
the_sicd.ImageCreation = ImageCreationType(DateTime=create_time)
elif the_sicd.ImageCreation.DateTime is None:
the_sicd.ImageCreation.DateTime = create_time
collect_start = datetime.strptime(aimida.MISSION_DATE+aimida.TIME, '%d%b%y%H%M')
set_collect_start(collect_start, override=False)
def try_AIMIDB() -> None:
tre = None if tres is None else tres['AIMIDB']
if tre is None:
return
aimidb = tre.DATA
append_country_code(aimidb.COUNTRY.strip())
if the_sicd.ImageFormation is not None and the_sicd.ImageFormation.SegmentIdentifier is None:
the_sicd.ImageFormation.SegmentIdentifier = aimidb.CURRENT_SEGMENT.strip()
date_str = aimidb.ACQUISITION_DATE
collect_start = numpy.datetime64(_iso_date_format.format(
date_str[:4], date_str[4:6], date_str[6:8],
date_str[8:10], date_str[10:12], date_str[12:14]), 'us')
set_collect_start(collect_start, override=False)
def try_ACFT() -> None:
if tres is None:
return
tre = tres['ACFTA']
if tre is None:
tre = tres['ACFTB']
if tre is None:
return
acft = tre.DATA
sensor_id = acft.SENSOR_ID.strip()
if len(sensor_id) > 1:
if the_sicd.CollectionInfo is None:
the_sicd.CollectionInfo = CollectionInfoType(CollectorName=sensor_id)
elif the_sicd.CollectionInfo.CollectorName is None:
the_sicd.CollectionInfo.CollectorName = sensor_id
row_ss = float(acft.ROW_SPACING)
col_ss = float(acft.COL_SPACING)
if hasattr(acft, 'ROW_SPACING_UNITS') and acft.ROW_SPACING_UNITS.strip().lower() == 'f':
row_ss *= foot
if hasattr(acft, 'COL_SPACING_UNITS') and acft.COL_SPACING_UNITS.strip().lower() == 'f':
col_ss *= foot
# NB: these values are actually ground plane values, and should be
# corrected to slant plane if possible
if the_sicd.SCPCOA is not None:
if the_sicd.SCPCOA.GrazeAng is not None:
col_ss *= numpy.cos(numpy.deg2rad(the_sicd.SCPCOA.GrazeAng))
if the_sicd.SCPCOA.TwistAng is not None:
row_ss *= numpy.cos(numpy.deg2rad(the_sicd.SCPCOA.TwistAng))
if the_sicd.Grid is None:
the_sicd.Grid = GridType(Row=DirParamType(SS=row_ss), Col=DirParamType(SS=col_ss))
return
if the_sicd.Grid.Row is None:
the_sicd.Grid.Row = DirParamType(SS=row_ss)
elif the_sicd.Grid.Row.SS is None:
the_sicd.Grid.Row.SS = row_ss
if the_sicd.Grid.Col is None:
the_sicd.Grid.Col = DirParamType(SS=col_ss)
elif the_sicd.Grid.Col.SS is None:
the_sicd.Grid.Col.SS = col_ss
def try_BLOCKA() -> None:
tre = None if tres is None else tres['BLOCKA']
if tre is None:
return
blocka = tre.DATA
icps = []
for fld_name in ['FRFC_LOC', 'FRLC_LOC', 'LRLC_LOC', 'LRFC_LOC']:
value = getattr(blocka, fld_name)
# noinspection PyBroadException
try:
lat_val = float(value[:10])
lon_val = float(value[10:21])
except ValueError:
lat_val = lat_lon_parser(value[:10])
lon_val = lat_lon_parser(value[10:21])
icps.append([lat_val, lon_val])
set_image_corners(icps, override=False)
def try_MPDSRA() -> None:
def valid_array(arr):
return numpy.all(numpy.isfinite(arr)) and numpy.any(arr != 0)
tre = None if tres is None else tres['MPDSRA']
if tre is None:
return
mpdsra = tre.DATA
scp_ecf = foot*numpy.array(
[float(mpdsra.ORO_X), float(mpdsra.ORO_Y), float(mpdsra.ORO_Z)], dtype='float64')
if valid_array(scp_ecf):
set_scp(scp_ecf, (int(mpdsra.ORP_COLUMN) - 1, int(mpdsra.ORP_ROW) - 1), override=False)
arp_pos_ned = foot*numpy.array(
[float(mpdsra.ARP_POS_N), float(mpdsra.ARP_POS_E), float(mpdsra.ARP_POS_D)], dtype='float64')
arp_vel_ned = foot*numpy.array(
[float(mpdsra.ARP_VEL_N), float(mpdsra.ARP_VEL_E), float(mpdsra.ARP_VEL_D)], dtype='float64')
arp_acc_ned = foot*numpy.array(
[float(mpdsra.ARP_ACC_N), float(mpdsra.ARP_ACC_E), float(mpdsra.ARP_ACC_D)], dtype='float64')
arp_pos = ned_to_ecf(arp_pos_ned, scp_ecf, absolute_coords=True) if valid_array(arp_pos_ned) else None
set_arp_position(arp_pos, override=False)
arp_vel = ned_to_ecf(arp_vel_ned, scp_ecf, absolute_coords=False) if valid_array(arp_vel_ned) else None
if the_sicd.SCPCOA.ARPVel is None:
the_sicd.SCPCOA.ARPVel = arp_vel
arp_acc = ned_to_ecf(arp_acc_ned, scp_ecf, absolute_coords=False) if valid_array(arp_acc_ned) else None
if the_sicd.SCPCOA.ARPAcc is None:
the_sicd.SCPCOA.ARPAcc = arp_acc
if the_sicd.PFA is not None and the_sicd.PFA.FPN is None:
# TODO: is this already in meters?
fpn_ecf = numpy.array(
[float(mpdsra.FOC_X), float(mpdsra.FOC_Y), float(mpdsra.FOC_Z)], dtype='float64') # *foot
if valid_array(fpn_ecf):
the_sicd.PFA.FPN = fpn_ecf
def try_MENSRB() -> None:
tre = None if tres is None else tres['MENSRB']
if tre is None:
return
mensrb = tre.DATA
arp_llh = numpy.array(
[lat_lon_parser(mensrb.ACFT_LOC[:12]),
lat_lon_parser(mensrb.ACFT_LOC[12:25]),
foot*float(mensrb.ACFT_ALT)], dtype='float64')
scp_llh = numpy.array(
[lat_lon_parser(mensrb.RP_LOC[:12]),
lat_lon_parser(mensrb.RP_LOC[12:25]),
foot*float(mensrb.RP_ELV)], dtype='float64')
# TODO: handle the conversion from msl to hae
arp_ecf = geodetic_to_ecf(arp_llh)
scp_ecf = geodetic_to_ecf(scp_llh)
set_arp_position(arp_ecf, override=True)
set_scp(scp_ecf, (int(mensrb.RP_COL)-1, int(mensrb.RP_ROW)-1), override=False)
row_unit_ned = numpy.array(
[float(mensrb.C_R_NC), float(mensrb.C_R_EC), float(mensrb.C_R_DC)], dtype='float64')
col_unit_ned = numpy.array(
[float(mensrb.C_AZ_NC), float(mensrb.C_AZ_EC), float(mensrb.C_AZ_DC)], dtype='float64')
set_uvects(ned_to_ecf(row_unit_ned, scp_ecf, absolute_coords=False),
ned_to_ecf(col_unit_ned, scp_ecf, absolute_coords=False))
def try_MENSRA() -> None:
tre = None if tres is None else tres['MENSRA']
if tre is None:
return
mensra = tre.DATA
arp_llh = numpy.array(
[lat_lon_parser(mensra.ACFT_LOC[:10]),
lat_lon_parser(mensra.ACFT_LOC[10:21]),
foot*float(mensra.ACFT_ALT)], dtype='float64')
scp_llh = numpy.array(
[lat_lon_parser(mensra.CP_LOC[:10]),
lat_lon_parser(mensra.CP_LOC[10:21]),
foot*float(mensra.CP_ALT)], dtype='float64')
# TODO: handle the conversion from msl to hae
arp_ecf = geodetic_to_ecf(arp_llh)
scp_ecf = geodetic_to_ecf(scp_llh)
set_arp_position(arp_ecf, override=True)
# TODO: is this already zero based?
set_scp(geodetic_to_ecf(scp_llh), (int(mensra.CCRP_COL), int(mensra.CCRP_ROW)), override=False)
row_unit_ned = numpy.array(
[float(mensra.C_R_NC), float(mensra.C_R_EC), float(mensra.C_R_DC)], dtype='float64')
col_unit_ned = numpy.array(
[float(mensra.C_AZ_NC), float(mensra.C_AZ_EC), float(mensra.C_AZ_DC)], dtype='float64')
set_uvects(ned_to_ecf(row_unit_ned, scp_ecf, absolute_coords=False),
ned_to_ecf(col_unit_ned, scp_ecf, absolute_coords=False))
def extract_corners() -> None:
icps = extract_image_corners(img_header)
if icps is None:
return
# TODO: include symmetry transform issue
set_image_corners(icps, override=False)
def extract_start() -> None:
# noinspection PyBroadException
try:
date_str = img_header.IDATIM
collect_start = numpy.datetime64(
_iso_date_format.format(
date_str[:4], date_str[4:6], date_str[6:8],
date_str[8:10], date_str[10:12], date_str[12:14]), 'us')
except Exception:
logger.info('failed extracting start time from IDATIM tre')
return
set_collect_start(collect_start, override=False)
# noinspection PyUnresolvedReferences
tres = None if img_header.ExtendedHeader.data is None \
else img_header.ExtendedHeader.data # type: Union[None, TREList]
collection_info = get_collection_info()
image_data = get_image_data()
the_sicd = SICDType(
CollectionInfo=collection_info,
ImageData=image_data)
# apply the various tres and associated logic
# NB: this should generally be in order of preference
try_CMETAA()
try_AIMIDB()
try_AIMIDA()
try_ACFT()
try_BLOCKA()
try_MPDSRA()
try_MENSRA()
try_MENSRB()
extract_corners()
extract_start()
return the_sicd | 5,358,577 |
def ppo_clip_policy_loss(
logps: torch.Tensor,
logps_old: torch.Tensor,
advs: torch.Tensor,
clipratio: Optional[float] = 0.2
) -> torch.Tensor:
"""
Loss function for a PPO-clip policy.
See paper for full loss function math: https://arxiv.org/abs/1707.06347
Args:
- logps (torch.Tensor): Action log-probabilities under the current policy.
- logps_old (torch.Tensor): Action log-probabilities under the old (pre-update) policy.
- advs (torch.Tensor): Advantage estimates for the actions taken.
- clipratio (float): Clipping parameter for PPO-clip loss. In general, is fine with being left as default.
Returns:
- ppo_loss (torch.Tensor): Loss term for PPO agent.
- kl (torch.Tensor): KL-divergence estimate between new and old policies.
"""
policy_ratio = torch.exp(logps - logps_old)
clipped_adv = torch.clamp(policy_ratio, 1 - clipratio, 1 + clipratio) * advs
ppo_loss = -(torch.min(policy_ratio * advs, clipped_adv)).mean()
kl = (logps_old - logps).mean().item()
return ppo_loss, kl | 5,358,578 |
def reconstruct_modelseed_model(genome_id, model_id, template_reference=None):
""" Reconstruct a draft ModelSEED model for an organism.
Parameters
----------
genome_id : str
Genome ID or workspace reference to genome
model_id : str
ID of output model
template_reference : str, optional
Workspace reference to template model
Returns
-------
dict
Dictionary of current model statistics
"""
# Confirm genome ID is available in PATRIC.
get_genome_summary(genome_id)
# Set input parameters for method.
params = dict()
params['genome'] = 'PATRICSOLR:' + genome_id
# params['fulldb'] = 0
params['output_file'] = model_id
if template_reference is not None:
params['template_model'] = template_reference
params['gapfill'] = 0
params['predict_essentiality'] = 0
# Workaround for ModelSEED workspace bug. The user's modelseed folder must exist before saving
# the model. Otherwise the type of the folder created for the model is not "modelfolder" and
# subsequent operations on the model will fail.
if ms_client.username is None:
ms_client.set_authentication_token()
folder_reference = '/{0}/{1}'.format(ms_client.username, model_folder)
try:
get_workspace_object_meta(folder_reference)
except ObjectNotFoundError:
put_workspace_object(folder_reference, 'folder')
LOGGER.info('Created modelseed folder in workspace for "%s"', ms_client.username)
# Run the server method.
try:
job_id = ms_client.call('ModelReconstruction', params)
LOGGER.info('Started job %s to run model reconstruction for "%s"', job_id, params['genome'])
_wait_for_job(job_id)
except ServerError as e:
references = None
if template_reference is not None:
references = [template_reference]
raise handle_server_error(e, references)
# Get the model statistics for the model.
stats = get_modelseed_model_stats(model_id)
if stats['num_genes'] == 0: # ModelSEED does not return an error if the genome ID is invalid
warn('Model for genome ID {0} has no genes, verify genome ID is valid'.format(genome_id))
return stats | 5,358,579 |
def tide_pred_correc(modfile,lon,lat,time,dbfile,ID,z=None,conlist=None):
"""
Performs a tidal prediction at all points in [lon,lat] at times in vector [time]
Applies an amplitude and phase correction based on a time series
"""
from timeseries import timeseries, loadDBstation
print('Calculating tidal correction factors from time series...')
# Load using the timeseries module
t0 = datetime.strftime(time[0],'%Y%m%d.%H%M%S')
t1 = datetime.strftime(time[-1],'%Y%m%d.%H%M%S')
dt = time[1]-time[0]
print(t0, t1, dt.total_seconds())
timeinfo = (t0,t1,dt.total_seconds())
TS,meta = loadDBstation(dbfile,ID,'waterlevel',timeinfo=timeinfo,filttype='low',cutoff=2*3600,output_meta=True)
lonpt=meta['longitude']
latpt=meta['latitude']
print(lonpt,latpt)
# Extract the OTIS tide prediction
u_re, u_im, v_re, v_im, h_re, h_im, omega, conlist = extract_HC(modfile,lonpt,latpt)
h_amp = np.abs(h_re+1j*h_im)[:,0]
h_phs = np.angle(h_re+1j*h_im)[:,0]
# Harmonic analysis of observation time series
amp, phs, frq, frqnames, htide = TS.tidefit(frqnames=conlist)
TS_harm = timeseries(time,htide)
residual = TS.y - htide
# Calculate the amp and phase corrections
dphs = phs - h_phs + np.pi
damp = amp/h_amp
# Extract the data along the specified points
u_re, u_im, v_re, v_im, h_re, h_im, omega, conlist = extract_HC(modfile,lon,lat,z=z,conlist=conlist)
h_amp = np.abs(h_re+1j*h_im)
h_phs = np.angle(h_re+1j*h_im)
u_amp = np.abs(u_re+1j*u_im)
u_phs = np.angle(u_re+1j*u_im)
v_amp = np.abs(v_re+1j*v_im)
v_phs = np.angle(v_re+1j*v_im)
# Initialise the output arrays
sz = lon.shape
nx = np.prod(sz)
nt = time.shape[0]
h=np.zeros((nt,nx))
u=np.zeros((nt,nx))
v=np.zeros((nt,nx))
# Rebuild the time series
#tsec=TS_harm.tsec - TS_harm.tsec[0]
tsec = othertime.SecondsSince(time,basetime=time[0])
print(tsec[0])
for nn,om in enumerate(omega):
for ii in range(0,nx):
h[:,ii] += damp[nn]*h_amp[nn,ii] * np.cos(om*tsec - (h_phs[nn,ii] + dphs[nn]))
u[:,ii] += damp[nn]*u_amp[nn,ii] * np.cos(om*tsec - (u_phs[nn,ii] + dphs[nn]))
v[:,ii] += damp[nn]*v_amp[nn,ii] * np.cos(om*tsec - (v_phs[nn,ii] + dphs[nn]))
szo = (nt,)+sz
return h.reshape(szo), u.reshape(szo), v.reshape(szo), residual | 5,358,580 |
def generate_new_filename(this_key):
"""Generates filename for processed data from information in this_key."""
[_, _, source_id, experiment_id, _, _] = this_key.split('.')
this_fname = THIS_VARIABLE_ID+'_'+experiment_id+'_'+source_id
return this_fname | 5,358,581 |
def cls(cpu):
"""Clears the display"""
cpu.display.clear() | 5,358,582 |
def get_connection(user, pwd):
""" Obtiene la conexion a Oracle """
try:
connection = cx_Oracle.connect(user + '/' + pwd + '@' +
config.FISCO_CONNECTION_STRING)
connection.autocommit = False
print('Connection Opened')
return connection
except Exception as e:
print('Exception: ' + str(e)) | 5,358,583 |
def process_dataset(file_name):
""" Evaluate the tivita index values for each record.
The evaluation is defined in task(args) where args are the entries of the
attached tables.
"""
start = timer()
file_path = os.path.join(data_path, file_name)
with tables.open_file(file_path, "r+") as file:
reader = HSStore(file, path="/records")
writer = HSStore(file, path="/records")
reader.attache_table("patient")
reader.attache_table("hsidata")
reader.attache_table("masks")
tivita_table = writer.create_table(
name="tivita",
dtype=np.dtype([
("oxy", "<f8", (480, 640)),
("nir", "<f8", (480, 640)),
("thi", "<f8", (480, 640)),
("twi", "<f8", (480, 640)),
]),
title="Tivita Index Values",
expectedrows=len(reader),
)
tivita_entry = tivita_table.row
print(f"Tables to read: {reader.get_table_names()}")
print(f"Tables to write: {writer.get_table_names()}")
print(f"Number of entries: {len(reader)}")
# serial evaluation
# for args in iter(reader):
# param = task(args)
# tivita_entry["nir"] = param["nir"]
# tivita_entry["oxy"] = param["oxy"]
# tivita_entry["thi"] = param["thi"]
# tivita_entry["twi"] = param["twi"]
# tivita_entry.append()
#
# tivita_table.flush()
# parallel evaluation
pool = multiprocessing.Pool(processes=7)
for param in pool.imap(task, iter(reader)): # , chunksize=1):
tivita_entry["oxy"] = param["oxy"]
tivita_entry["nir"] = param["nir"]
tivita_entry["thi"] = param["thi"]
tivita_entry["twi"] = param["twi"]
tivita_entry.append()
pool.close()
tivita_table.flush()
print("\nElapsed time for processing dataset: %f sec" %
(timer() - start)) | 5,358,584 |
def _generate_select_expression_for_extended_string_unix_timestamp_ms_to_timestamp(source_column, name):
"""
More robust conversion from StringType to TimestampType. It is assumed that the
timezone is already set to UTC in spark / java to avoid implicit timezone conversions.
Is able to additionally handle (compared to implicit Spark conversion):
* Unix timestamps in milliseconds
* Preceding whitespace
* Trailing whitespace
* Preceeding and trailing whitespace
Hint
----
Please have a look at the tests to get a better feeling how it behaves under
tests/unit/transformer/test_mapper_custom_data_types.py::TestExtendedStringConversions and
tests/data/test_fixtures/mapper_custom_data_types_fixtures.py
Example
-------
>>> from spooq.transformer import Mapper
>>>
>>> input_df.head(3)
[Row(input_string="2020-08-12T12:43:14+0000"),
Row(input_string="1597069446000"),
Row(input_string="2020-08-12")]
>>> mapping = [("output_value", "input_string", "extended_string_to_timestamp")]
>>> output_df = Mapper(mapping).transform(input_df)
>>> output_df.head(3)
[Row(input_string=datetime.datetime(2020, 8, 12, 12, 43, 14)),
Row(input_string=datetime.datetime(2020, 8, 10, 14, 24, 6)),
Row(input_string=datetime.datetime(2020, 8, 12, 0, 0, 0))]
"""
return (
F.when(
F.trim(source_column).cast(T.LongType()).isNotNull(), (F.trim(source_column) / 1000).cast(T.TimestampType())
)
.otherwise(F.trim(source_column).cast(T.TimestampType()))
.alias(name)
) | 5,358,585 |
def get_webf_session():
"""
Return an instance of a Webfaction server and a session for authentication
to make further API calls.
"""
import xmlrpclib
server = xmlrpclib.ServerProxy("https://api.webfaction.com/")
print("Logging in to Webfaction as %s." % env.user)
if env.password is None:
env.password = getpass(
"Enter Webfaction password for user %s: " % env.user)
session, account = server.login(env.user, env.password)
print("Succesfully logged in as %s." % env.user)
return server, session, account | 5,358,586 |
def _unpack_school_column_aliases() -> Dict[str, List[str]]:
"""
Unpack the known aliases for lookup table of alias_column_name -> schema_column_name.
:return: lookup table.
:raises: ValueError if an alias has more than one mapping to a schema column
"""
result = dict()
# add to the lookup table all the known aliases from School_aliases module
for (schema_column_name, aliases) in School_aliases.items():
for alias_column_name in aliases:
k = alias_column_name.lower()
v = schema_column_name.lower()
if result.get(k) is not None:
raise ValueError(f"duplicate alias {v} for column name: {k}")
result[k] = v
return result | 5,358,587 |
def suntimecorr(ra, dec, obst, coordtable, verbose=False):
"""
This function calculates the light-travel time correction from
observer to a standard location. It uses the 2D coordinates (RA
and DEC) of the object being observed and the 3D position of the
observer relative to the standard location. The latter (and the
former, for solar-system objects) may be gotten from JPL's
Horizons system.
Parameters:
-----------
ra : Float
Right ascension of target object in radians.
dec : Float
Declination of target object in radians.
obst : Float or Numpy Float array
Time of observation in Julian Date (may be a vector)
coordtable : String
Filename of output table from JPL HORIZONS specifying
the position of the observatory relative to the
standard position.
verbose : Boolean
If True, print X,Y,Z coordinates.
Returns:
--------
This function returns the time correction in seconds to be ADDED
to the observation time to get the time when the observed photons
would have reached the plane perpendicular to their travel and
containing the reference position.
Notes:
------
The position vectors from coordtable are given in the following
coordinate system:
Reference epoch: J2000.0
xy-plane: plane of the Earth's mean equator at the reference epoch
x-axis : out along ascending node of instantaneous plane of the Earth's
orbit and the Earth's mean equator at the reference epoch
z-axis : along the Earth mean north pole at the reference epoch
Ephemerides are often calculated for BJD, barycentric Julian date.
That is, they are correct for observations taken at the solar
system barycenter's distance from the target. The BJD of our
observation is the time the photons we observe would have crossed
the sphere centered on the object and containing the barycenter.
We must thus add the light-travel time from our observatory to
this sphere. For non-solar-system observations, we approximate
the sphere as a plane, and calculate the dot product of the vector
from the barycenter to the telescope and a unit vector to from the
barycenter to the target, and divide by the speed of light.
Properly, the coordinates should point from the standard location
to the object. Practically, for objects outside the solar system,
the adjustment from, e.g., geocentric (RA-DEC) coordinates to
barycentric coordinates has a negligible effect on the trig
functions used in the routine.
The horizons file in coordtable should be in the form of the
following example, with a subject line of JOB:
!$$SOF
!
! Example e-mail command file. If mailed to "[email protected]"
! with subject "JOB", results will be mailed back.
!
! This example demonstrates a subset of functions. See main doc for
! full explanation. Send blank e-mail with subject "BATCH-LONG" to
! [email protected] for complete example.
!
EMAIL_ADDR = '[email protected]' ! Send output to this address
! (can be blank for auto-reply)
COMMAND = '-79' ! Target body, closest apparition
OBJ_DATA = 'YES' ! No summary of target body data
MAKE_EPHEM = 'YES' ! Make an ephemeris
START_TIME = '2005-Aug-24 06:00' ! Start of table (UTC default)
STOP_TIME = '2005-Aug-25 02:00' ! End of table
STEP_SIZE = '1 hour' ! Table step-size
TABLE_TYPE = 'VECTOR' ! Specify VECTOR ephemeris table type
CENTER = '@10' ! Set observer (coordinate center)
REF_PLANE = 'FRAME' ! J2000 equatorial plane
VECT_TABLE = '3' ! Selects output type (3=all).
OUT_UNITS = 'KM-S' ! Vector units# KM-S, AU-D, KM-D
CSV_FORMAT = 'NO' ! Comma-separated output (YES/NO)
VEC_LABELS = 'YES' ! Label vectors in output (YES/NO)
VECT_CORR = 'NONE' ! Correct for light-time (LT),
! or lt + stellar aberration (LT+S),
! or (NONE) return geometric
! vectors only.
!$$EOF
Example:
---------
>>> # Spitzer is in nearly the Earth's orbital plane. Light coming from
>>> # the north ecliptic pole should hit the observatory and the sun at
>>> # about the same time.
>>> import suntimecorr as sc
>>> ra = 18.0 * np.pi / 12 # ecliptic north pole coordinates in radians
>>> dec = 66.5 * np.pi / 180 # "
>>> obst = np.array([2453607.078]) # Julian date of 2005-08-24 14:00
>>> print( sc.suntimecorr(ra, dec, obst,
'/home/esp01/ancil/horizons/cs41_spitzer.vec') )
1.00810877 # about 1 sec, close to zero
>>> # If the object has the RA and DEC of Spitzer, light time should be
>>> # about 8 minutes to the sun.
>>> obs = np.array([111093592.8346969, -97287023.315796047,
-42212080.826677799])
>>> # vector to the object
>>> obst = np.array([2453602.5])
>>> print( np.sqrt(np.sum(obs**2.0)) )
153585191.481 # about 1 AU, good
>>> raobs = np.arctan(obs[1]/ obs[0])
>>> decobs = np.arctan(obs[2]/ np.sqrt(obs[0]**2 + obs[1]**2))
>>> print(raobs, decobs)
-0.7192383661, -0.2784282118
>>> print( sc.suntimecorr(raobs, decobs, obst,
'/home/esp01/ancil/horizons/cs41_spitzer.vec') / 60.0)
8.5228630 # good, about 8 minutes light time to travel 1 AU
Modification History:
---------------------
2005-12-01 statia Written by Statia Luszcz.
2006-03-09 jh Corrected 90deg error in algorithm, renamed,
updated header, made Coordtable a positional
arg since it's required, switched to radians.
2007-06-28 jh Renamed to suntimecorr since we now use
barycentric Julian date.
2009-01-28 jh Change variables to long, use spline instead
of linfit so we can use one HORIZONS file for
the whole mission.
2009-02-22 jh Reshape spline results to shape of obst. Make
it handle unsorted unput data properly.
Header update.
2010-07-10 patricio Converted to python. ([email protected])
2010-11-01 patricio Docstring updated.
"""
start_data = '$$SOE'
end_data = '$$EOE'
# Read in whole table as an list of strings, one string per line
ctable = open(coordtable, 'r')
wholetable = ctable.readlines()
ctable.close()
# Find start and end line
i = 0
# while end has not been found:
while wholetable[i].find(end_data) == -1:
# if start is found get the index of next line:
if wholetable[i].find(start_data) != -1:
start = i + 1
i += 1
# Chop table
data = wholetable[start:i-2]
# Extract values:
x, y, z, time = getcoords(data)
# Interpolate to observing times:
# We must preserve the shape and order of obst. Spline takes
# monotonic input and produces linear output. x, y, z, time are
# sorted as HORIZONS produces them.
# Save shape of obst
tshape = np.shape(obst)
# Reshape to 1D and sort
obstime = obst.flatten()
ti = np.argsort(obstime) # indexes of sorted array by time
tsize = np.size(obstime)
# Allocate output arrays
obsx = np.zeros(tsize)
obsy = np.zeros(tsize)
obsz = np.zeros(tsize)
# Interpolate sorted arrays
obsx[ti] = splinterp(obstime[ti], time, x)
obsy[ti] = splinterp(obstime[ti], time, y)
obsz[ti] = splinterp(obstime[ti], time, z)
if verbose:
print( 'X, Y, Z = ', obsx, obsy, obsz)
# Change ra and dec into unit vector n_hat
object_unit_x = np.cos(dec) * np.cos(ra)
object_unit_y = np.cos(dec) * np.sin(ra)
object_unit_z = np.sin(dec)
# Dot product the vectors with n_hat
rdotnhat = ( obsx * object_unit_x +
obsy * object_unit_y +
obsz * object_unit_z )
# Reshape back to the original shape
rdotnhat = rdotnhat.reshape(tshape)
# Time correction is: dt = length/velocity
# Divide by the speed of light and return
return rdotnhat / ( c / 1000.0 ) | 5,358,588 |
def validate_numeric(array, name="array", caller=None):
""" Ensure that the array has some numeric dtype
If the shapes are not equal, then raise a ValueError.
Parameters
----------
array : np.array
A numpy array
name : string
A name for the variable in the error message
caller : string
A name for the caller in the error message
"""
if not np.issubdtype(array.dtype, np.number):
msg = ("{caller}{name} invalid dtype for numeric sequences. found: " +
array.dtype)
_raise_value_error(msg, name, caller) | 5,358,589 |
def normalize_address(address: str, asHex: bool=False) -> Union[Tuple[str, str], Tuple[str, bytes]]:
"""Takes an address as raw byte or id__ and provides both formats back"""
try:
# convert recipient to raw if provided as id__
if address.startswith("id__"):
address_raw = NyzoStringEncoder.decode(address).get_bytes().hex()
if VERBOSE:
print(f"Raw address is {address_raw}")
else:
raise RuntimeWarning("Not an id__")
except:
if VERBOSE:
print(f"address was not a proper id_ nyzostring")
address_raw = re.sub(r"[^0-9a-f]", "", address.lower())
# print(address_raw)
if len(address_raw) != 64:
raise ValueError("Wrong address format. 64 bytes as hex or id_ nyzostring required")
if VERBOSE:
print(f"Trying with {address_raw}")
address = NyzoStringEncoder.encode(NyzoStringPublicIdentifier.from_hex(address_raw))
# Here we should have both recipient and recipient_raw in all cases.
if asHex:
return address, address_raw
else:
return address, bytes.fromhex(address_raw) | 5,358,590 |
async def test_update_missing_mac_unique_id_added_from_dhcp(hass, remotews: Mock):
"""Test missing mac and unique id added."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_OLD_ENTRY, unique_id=None)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.samsungtv.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.samsungtv.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=MOCK_DHCP_DATA,
)
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data[CONF_MAC] == "aa:bb:cc:dd:ee:ff"
assert entry.unique_id == "be9554b9-c9fb-41f4-8920-22da015376a4" | 5,358,591 |
def do_gen_binaryimage(inName, outName):
"""Generate binary image for testing"""
f_in = open(inName, "r")
contour = pickle.load(f_in)
f_in.close()
imageBinary, width, height = gen_binaryimage_from_contour( contour )
f_out = open( outName, "w" )
f_out.write( str(width) + " " + str(height) + "\n")
for i in range(0, height):
st = "";
for j in range(0, width):
st = st + str(imageBinary[i][j] )
f_out.write(st)
f_out.write("\n")
f_out.close() | 5,358,592 |
def get_welcome_response():
""" Prompt the user for the prayer
"""
session_attributes = {}
card_title = "Welcome"
speech_output = "What would you like me to pray with you? I can pray the Rosary and the Divine Mercy Chaplet."
reprompt_text = "What would you like me to pray with you?"
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, speech_output,
should_end_session, [])) | 5,358,593 |
async def test_bad_trigger_platform(hass):
"""Test bad trigger platform."""
with pytest.raises(vol.Invalid) as ex:
await async_validate_trigger_config(hass, [{"platform": "not_a_platform"}])
assert "Invalid platform 'not_a_platform' specified" in str(ex) | 5,358,594 |
def get_news(
limit: int = 60,
post_kind: str = "news",
filter_: Optional[str] = None,
region: str = "en",
) -> pd.DataFrame:
"""Get recent posts from CryptoPanic news aggregator platform. [Source: https://cryptopanic.com/]
Parameters
----------
limit: int
number of news to fetch
post_kind: str
Filter by category of news. Available values: news or media.
filter_: Optional[str]
Filter by kind of news. One from list: rising|hot|bullish|bearish|important|saved|lol
region: str
Filter news by regions. Available regions are: en (English), de (Deutsch), nl (Dutch), es (Español),
fr (Français), it (Italiano), pt (Português), ru (Русский)
Returns
-------
pd.DataFrame
DataFrame with recent news from different sources filtered by provided parameters.
"""
if post_kind not in CATEGORIES:
post_kind = "news"
results = []
response = make_request(post_kind=post_kind, filter_=filter_, region=region)
if response:
data, next_page, _ = (
response["results"],
response.get("next"),
response.get("count"),
)
for post in data:
results.append(_parse_post(post))
number_of_pages = math.ceil(limit // 20)
counter = 0
while counter < number_of_pages and next_page:
counter += 1
try:
time.sleep(0.2)
res = requests.get(next_page).json()
for post in res["results"]:
results.append(_parse_post(post))
next_page = res.get("next")
except Exception as e: # noqa: F841
logger.exception(str(e))
console.print(
"[red]Something went wrong while fetching news from API[/red]\n"
)
return pd.DataFrame()
try:
df = pd.DataFrame(results)
df["title"] = df["title"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=66))
if isinstance(x, str)
else x
)
return df
except Exception as e: # noqa: F841
logger.exception(str(e))
console.print("[red]Something went wrong with DataFrame creation[/red]\n")
return pd.DataFrame()
return pd.DataFrame() | 5,358,595 |
def login():
""" Display log in form and handle user login."""
email = request.args.get('emailLogin')
password = request.args.get('passwordLogin')
user = User.query.filter(User.email == email).first()
if user is None or not check_password_hash(user.password, password):
flash('Invalid email or password')
return redirect('/')
else:
login_user(user)
flash('Welcome back!')
return redirect('/')
return render_template('login.html') | 5,358,596 |
def build():
"""Build benchmark."""
# Backup the environment.
new_env = os.environ.copy()
# Build afl with qemu (shared build code afl/afl++)
afl_fuzzer_qemu.build()
# Next, build a binary for Eclipser.
src = os.getenv('SRC')
work = os.getenv('WORK')
eclipser_outdir = get_eclipser_outdir(os.environ['OUT'])
os.mkdir(eclipser_outdir)
new_env['CC'] = 'clang'
new_env['CXX'] = 'clang++'
new_env['CFLAGS'] = ' '.join(utils.NO_SANITIZER_COMPAT_CFLAGS)
cxxflags = [utils.LIBCPLUSPLUS_FLAG] + utils.NO_SANITIZER_COMPAT_CFLAGS
new_env['CXXFLAGS'] = ' '.join(cxxflags)
new_env['OUT'] = eclipser_outdir
new_env['FUZZER_LIB'] = '/libStandaloneFuzzTarget.a'
new_env['FUZZ_TARGET'] = os.path.join(
eclipser_outdir, os.path.basename(os.getenv('FUZZ_TARGET')))
print('[build] Re-building benchmark for eclipser fuzzing target.')
with utils.restore_directory(src), utils.restore_directory(work):
utils.build_benchmark(env=new_env) | 5,358,597 |
def create_heart_rate(df):
"""Create heart rate based on provided."""
min = 50
max = 110
increments = 1
amount = 1000
integer_list = randomize_int(min, max, increments, amount)
heart_rate_array = np.array(integer_list)
df["HR"] = heart_rate_array | 5,358,598 |
def model_from_queue(model):
""" Returns the model dict if model is enqueued, else None."""
return _queue.get(model, None) | 5,358,599 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.