content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def fits_downloaded_correctly(fits_loc):
"""
Is there a readable fits image at fits_loc?
Does NOT check for bad pixels
Args:
fits_loc (str): location of fits file to open
Returns:
(bool) True if file at fits_loc is readable, else False
"""
try:
img, _ = fits.getdata(fits_loc, 0, header=True)
return True
except Exception: # image fails to open
return False | 5,358,200 |
def union_of_rects(rects):
"""
Calculates union of two rectangular boxes
Assumes both rects of form N x [xmin, ymin, xmax, ymax]
"""
xA = np.min(rects[:, 0])
yA = np.min(rects[:, 1])
xB = np.max(rects[:, 2])
yB = np.max(rects[:, 3])
return np.array([xA, yA, xB, yB], dtype=np.int32) | 5,358,201 |
def configure_services(config: List[Dict]) -> Dict[str, GcpServiceQuery]:
"""
Generate GcpServiceQuery list from config
:param config: list with GcpServieQuery's configuration
:return: mapping of service name to GcpServiceQuery objects
"""
if not isinstance(config, list):
raise GcpServiceQueryConfigError(f"Invalid GcpServiceQuery config {config}")
result = {}
for entry in config:
if not isinstance(entry, dict):
raise GcpServiceQueryConfigError(f"Invalid GcpServiceQuery entry type: '{entry}'. "
f"Should be dict, is {type(entry)}")
serviceName = entry.get(SERVICE_NAME, None)
version = entry.get(VERSION, None)
queries = entry.get(QUERIES, None)
if not serviceName or not version or not queries:
raise GcpServiceQueryConfigError(f"Missing required key for entry {entry}")
gcp_service_query = GcpServiceQuery(serviceName, version)
# Check multiple entries with same name
if serviceName in result:
raise GcpServiceQueryConfigError(f"Multiple GCP service with same name: {serviceName}")
result[serviceName] = gcp_service_query
return result | 5,358,202 |
def tags_get():
"""
Get endpoint /api/tag
args:
optional company_filter(int) - id of a company, will only return tag relation to said company
optional crowd(int) - 0 - 2 specifing crowd sourcing option. Key:
0 - all tags
1 - Only crowd sourced tags
2 - Only non crowd sourced tags
optional only_ids - if set only returns ids of tags
return:
List Tags - A json list of all tags that match the optional args.
"""
request_data = request.get_json()
company_filter = get_if_exist(request_data, "company_filter")
only_ids = get_if_exist(request_data,"only_ids")
crowd = get_if_exist(request_data, "crowd")
if crowd:
if crowd > 2:
return status.HTTP_400_BAD_REQUEST
crowd = 0
if company_filter:
t = db.session.query(
Tag_company.tag,
).filter(Tag_company.company == int(company_filter)).group_by(Tag_company.tag).subquery('t')
Tag_query = Tag.query.filter(
Tag.id == t.c.tag
)
else:
Tag_query = Tag.query
if crowd != 0:
crowd = (1==crowd)
Tag_query = Tag_query.filter_by(crowd_soured = crowd)
tags = Tag_query.all()
if only_ids:
return jsonify([tag.id for tag in tags]), status.HTTP_200_OK
else:
return jsonify([tag.serialize for tag in tags]), status.HTTP_200_OK | 5,358,203 |
def gunzip(filename, targetdir):
"""Decompress a gzip-compressed file into a target directory.
Args:
filename: Full path to gzip file.
targetdir: Directory to decompress file into.
Returns:
The output file name.
Raises:
FileNotFoundError: `filename` does not exist.
"""
# We delete the .gz suffix and put the decompressed file into `targetdir`.
if not os.path.isfile(filename):
raise FileNotFoundError(f"File '{filename}' does not exist.")
targetfile = os.path.join(
targetdir, re.sub('\\.gz$', '', os.path.basename(filename))
)
cprint(f"Decompressing '{filename}'...", 'yellow')
try:
with open(targetfile, 'xb') as o, gzip.open(filename, 'rb') as i:
shutil.copyfileobj(i, o)
except Exception:
# Clean up target file.
if os.path.isfile(targetfile):
cprint(f"Removing file '{targetfile}'...", 'red')
os.remove(targetfile)
raise
assert targetfile
assert os.path.isfile(targetfile)
cprint(f"Successfully created file '{targetfile}'.", 'green')
return targetfile | 5,358,204 |
def async_sendmail(subject, message, to):
"""异步邮件发送,可用于多线程及非Web上下文环境"""
def send_async_email(app):
with app.test_request_context():
app.preprocess_request()
sendmail(subject, message, to)
app = current_app._get_current_object()
t = Thread(target=send_async_email, args=[app])
t.start() | 5,358,205 |
def get_accept_languages(accept):
"""Returns a list of languages, by order of preference, based on an
HTTP Accept-Language string.See W3C RFC 2616
(http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html) for specification.
"""
langs = parse_http_accept_header(accept)
for index, lang in enumerate(langs):
langs[index] = lang_in_gettext_format(lang)
return langs | 5,358,206 |
def get_princ_axes_xyz(tensor):
"""
Gets the principal stress axes from a stress tensor.
Modified from beachball.py from ObsPy, written by Robert Barsch.
That code is modified from Generic Mapping Tools (gmt.soest.hawaii.edu)
Returns 'PrincipalAxis' classes, which have attributes val, trend, plunge
Returns T, N, P
"""
tensor = np.array(tensor)
(D, V) = sorted_eigens(tensor)
pl = np.arcsin( -V[2] ) # 2
az = np.arctan2( V[0], -V[1] ) # 0 # 1
for i in range(0, 3):
if pl[i] <= 0:
pl[i] = -pl[i]
az[i] += np.pi
if az[i] < 0:
az[i] += 2 * np.pi
if az[i] > 2 * np.pi:
az[i] -= 2 * np.pi
pl *= 180 / np.pi
az *= 180 / np.pi
T = PrincipalAxis( D[0], az[0], pl[0] ) # 0 0 0
N = PrincipalAxis( D[1], az[1], pl[1] )
P = PrincipalAxis( D[2], az[2], pl[2] ) # 2 2 2
return(T, N, P) | 5,358,207 |
def extractYoushoku(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if 'The Other World Dining Hall' in item['tags'] and (chp or vol):
return buildReleaseMessageWithType(item, 'The Other World Dining Hall', vol, chp, frag=frag, postfix=postfix)
return False | 5,358,208 |
def _perform_Miecalculations(diam, wavelength, n, noOfAngles=100.):
"""
Performs Mie calculations
Parameters
----------
diam: NumPy array of floats
Array of diameters over which to perform Mie calculations; units are um
wavelength: float
Wavelength of light in um for which to perform calculations
n: complex
Ensemble complex index of refraction
Returns
panda DataTable with the diameters as the index and the mie results in the different collumns
total_extinction_coefficient: this takes the sum of all particles crossections of the particular diameter in a qubic
meter. This is in principle the AOD of an L
"""
diam = np.asarray(diam)
extinction_efficiency = np.zeros(diam.shape)
scattering_efficiency = np.zeros(diam.shape)
absorption_efficiency = np.zeros(diam.shape)
extinction_crossection = np.zeros(diam.shape)
scattering_crossection = np.zeros(diam.shape)
absorption_crossection = np.zeros(diam.shape)
# phase_function_natural = pd.DataFrame()
angular_scattering_natural = pd.DataFrame()
# extinction_coefficient = np.zeros(diam.shape)
# scattering_coefficient = np.zeros(diam.shape)
# absorption_coefficient = np.zeros(diam.shape)
# Function for calculating the size parameter for wavelength l and radius r
sp = lambda r, l: 2. * np.pi * r / l
for e, d in enumerate(diam):
radius = d / 2.
# print('sp(radius, wavelength)', sp(radius, wavelength))
# print('n', n)
# print('d', d)
mie = bhmie.bhmie_hagen(sp(radius, wavelength), n, noOfAngles, diameter=d)
values = mie.return_Values_as_dict()
extinction_efficiency[e] = values['extinction_efficiency']
# print("values['extinction_crosssection']",values['extinction_crosssection'])
scattering_efficiency[e] = values['scattering_efficiency']
absorption_efficiency[e] = values['extinction_efficiency'] - values['scattering_efficiency']
extinction_crossection[e] = values['extinction_crosssection']
scattering_crossection[e] = values['scattering_crosssection']
absorption_crossection[e] = values['extinction_crosssection'] - values['scattering_crosssection']
# phase_function_natural[d] = values['phaseFct_natural']['Phase_function_natural'].values
angular_scattering_natural[d] = mie.get_angular_scatt_func().natural.values
# print('\n')
# phase_function_natural.index = values['phaseFct_natural'].index
angular_scattering_natural.index = mie.get_angular_scatt_func().index
out = pd.DataFrame(index=diam)
out['extinction_efficiency'] = pd.Series(extinction_efficiency, index=diam)
out['scattering_efficiency'] = pd.Series(scattering_efficiency, index=diam)
out['absorption_efficiency'] = pd.Series(absorption_efficiency, index=diam)
out['extinction_crossection'] = pd.Series(extinction_crossection, index=diam)
out['scattering_crossection'] = pd.Series(scattering_crossection, index=diam)
out['absorption_crossection'] = pd.Series(absorption_crossection, index=diam)
return out, angular_scattering_natural | 5,358,209 |
def load_config_dict(pipette_id: str) -> Tuple[
'PipetteFusedSpec', 'PipetteModel']:
""" Give updated config with overrides for a pipette. This will add
the default value for a mutable config before returning the modified
config value.
"""
override = load_overrides(pipette_id)
model = override['model']
config = fuse_specs(model)
if 'quirks' not in override.keys():
override['quirks'] = {key: True for key in config['quirks']}
for top_level_key in config.keys():
if top_level_key != 'quirks':
add_default(config[top_level_key]) # type: ignore
config.update(override) # type: ignore
return config, model | 5,358,210 |
def ErrorAddEncKey(builder, encKey):
"""This method is deprecated. Please switch to AddEncKey."""
return AddEncKey(builder, encKey) | 5,358,211 |
def test_run_without_exception():
"""Not a good test at all. :("""
try:
settings = {
"LINKEDIN_USER": os.getenv("LINKEDIN_USER"),
"LINKEDIN_PASSWORD": os.getenv("LINKEDIN_PASSWORD"),
"LINKEDIN_BROWSER": "Chrome",
"LINKEDIN_BROWSER_DRIVER": "/Users/dayhatt/workspace/drivers/chromedriver",
"LINKEDIN_BROWSER_HEADLESS": 0,
"LINKEDIN_BROWSER_CRON": 0,
"LINKEDIN_CRON_USER": "dayhatt",
"LINKEDIN_PREFERRED_USER": "./data/user_preferred.txt",
"LINKEDIN_NOT_PREFERRED_USER": "./data/user_not_preferred.txt",
}
run_script(settings=settings)
except Exception as e: # noqa
pytest.fail("Test failed: Scripts failed to run.") | 5,358,212 |
def _read_txs_from_file(f):
"""
Validate headers and read buy/sell transactions from the open file-like object 'f'.
Note: we use the seek method on f.
"""
ans = []
f.seek(0)
workbook = openpyxl.load_workbook(f)
sheet = workbook.active
all_contents = list(sheet.rows)
_validate_header(all_contents[0])
contents = all_contents[1:]
for row in contents:
item = _tx_from_gemini_row(row)
if item is not None:
ans.append(item)
return ans | 5,358,213 |
def get_generator_regulation_lower_term_4(data, trader_id, intervention) -> Union[float, None]:
"""Get L5RE term 4 in FCAS availability calculation"""
# Term parameters
enablement_min = get_effective_enablement_min(data, trader_id, 'L5RE')
energy_target = lookup.get_trader_solution_attribute(data, trader_id, '@EnergyTarget', float, intervention)
lower_slope_coefficient = get_lower_slope_coefficient(data, trader_id, 'L5RE')
# Ignore limit if slope coefficient = 0
if lower_slope_coefficient == 0:
return None
return 0 if (lower_slope_coefficient is None) else (energy_target - enablement_min) / lower_slope_coefficient | 5,358,214 |
def count_parameters(model):
"""count model parameters"""
return sum(p.numel() for p in model.parameters() if p.requires_grad) | 5,358,215 |
def scenario(l_staticObject, l_cross, l_road):
"""
Coordinates of objects in a scenario. Include:
l_staticObject: list of static objects
l_cross: list of pedestrian crosses
l_road: list of road layouts
"""
# road layout
road = Road(
left=np.array([[-100, 2], [100, 2]]),
right=np.array([[-100, -4], [100, -4]]),
lane=np.array([[-100, -1], [100, -1]])
)
l_road.append(road)
sx = [-20, 180]
sy = [-10, -4]
sz = 5
step = 7
idx = 1
for i in range(sx[0], sx[1], step):
if i > 10 and i < 40:
continue
idx += 1
obs = StaticObject(
idx=idx,
poly=np.array([[i, sy[0]], [i+sz, sy[0]],
[i+sz, sy[1]], [i, sy[1]]]))
l_staticObject.append(obs)
obs2 = StaticObject(
idx=21,
poly=np.array([[-60, -20], [-25, -20], [-25, -5],
[-60, -5]]))
l_staticObject.append(obs2)
obs3 = StaticObject(
idx=31,
poly=np.array([[-40, 20], [-2, 20], [-2, 8],
[-40, 8]]))
l_staticObject.append(obs3)
obs4 = StaticObject(
idx=41,
poly=np.array([[5, 20], [60, 20], [60, 5],
[5, 5]]))
l_staticObject.append(obs4)
# # static object
# obs11 = StaticObject(
# idx=11,
# poly=np.array([[-40, -20], [-15, -20], [-14, -10],
# [-20, -6], [-40, -6]]))
# l_staticObject.append(obs11)
# obs12 = StaticObject(
# idx=12,
# poly=np.array([[-80, -20], [-43, -20], [-43, -8],
# [-50, -6], [-70, -6]]))
# l_staticObject.append(obs12)
# obs13 = StaticObject(
# idx=13,
# poly=np.array([[-25, -70], [-16, -23], [-40, -23],
# [-49, -70]]))
# l_staticObject.append(obs13)
# obs21 = StaticObject(
# idx=21,
# poly=np.array([[5, -20], [30, -20], [30, -5], [8, -5]]))
# l_staticObject.append(obs21)
# obs22 = StaticObject(
# idx=22,
# poly=np.array([[4, -23], [10, -23], [8, -50],
# [-2, -50]]))
# l_staticObject.append(obs22)
# obs23 = StaticObject(
# idx=23,
# poly=np.array([[33, -20], [50, -20], [50, -5], [33, -5]]))
# l_staticObject.append(obs23)
# obs31 = StaticObject(
# idx=31,
# poly=np.array([[-80, 5], [-70, 5], [-63, 8],
# [-63, 20], [-80, 20]]))
# l_staticObject.append(obs31)
# obs32 = StaticObject(
# idx=32,
# poly=np.array([[-60, 5], [-50, 5], [-43, 8],
# [-43, 20], [-60, 20]]))
# l_staticObject.append(obs32)
# obs33 = StaticObject(
# idx=33,
# poly=np.array([[-40, 5], [-30, 5], [-23, 8],
# [-23, 20], [-40, 20]]))
# l_staticObject.append(obs33)
# obs34 = StaticObject(
# idx=34,
# poly=np.array([[-20, 5], [-15, 5], [-10, 8],
# [-6, 20], [-20, 20]]))
# l_staticObject.append(obs34)
# obs35 = StaticObject(
# idx=35,
# poly=np.array([[-20, 23], [-6, 23], [-6, 35],
# [-8, 35], [-20, 35]]))
# l_staticObject.append(obs35)
# obs41 = StaticObject(
# idx=41,
# poly=np.array([[10, 7], [20, 7], [20, 20],
# [11, 20]]))
# l_staticObject.append(obs41)
# obs42 = StaticObject(
# idx=42,
# poly=np.array([[12, 23], [20, 23], [20, 40],
# [14, 40]]))
# l_staticObject.append(obs42)
# obs43 = StaticObject(
# idx=43,
# poly=np.array([[23, 7], [40, 7], [40, 30],
# [23, 30]]))
# l_staticObject.append(obs43)
# # pedestrian cross
# cross1 = PedestrianCross(
# left=np.array([[-10, -6], [-6, 6]]),
# right=np.array([[-8, -6], [-4, 6]]),
# density=0.8
# )
# cross2 = PedestrianCross(
# left=np.array([[2, -6], [6, 6]]),
# right=np.array([[4, -6], [8, 6]]),
# density=0.8
# )
# l_cross.append(cross1)
# l_cross.append(cross2)
# # road layout
# road1 = Road(
# left=np.array([[-100, 4], [-4, 4]]),
# right=np.array([[-100, -4], [-6, -4]]),
# lane=np.array([[-100, 0], [-4, 0]])
# )
# road2 = Road(
# left=np.array([[-4, 4], [16, 100]]),
# right=np.array([[4, 4], [24, 100]]),
# lane=np.array([[0, 4], [20, 100]])
# )
# road3 = Road(
# left=np.array([[4, 4], [100, 4]]),
# right=np.array([[2, -4], [100, -4]]),
# lane=np.array([[4, 0], [100, 0]])
# )
# road4 = Road(
# left=np.array([[-26, -100], [-6, -4]]),
# right=np.array([[-18, -100], [2, -4]]),
# lane=np.array([[-22, -100], [-2, -4]])
# )
# l_road.extend([road1, road2, road3, road4]) | 5,358,216 |
def string_rule_variable(label=None, params=None, options=None, public=True):
"""
Decorator to make a function into a string rule variable.
NOTE: add **kwargs argument to receive Rule as parameters
:param label: Label for Variable
:param params: Parameters expected by the Variable function
:param options: Options parameter to specify expected options for the variable.
The value used in the Condition IS NOT checked against this list.
:param public: Flag to identify if a variable is public or not
:return: Decorator function wrapper
"""
return _rule_variable_wrapper(StringType, label, params=params, options=options, public=public) | 5,358,217 |
def detect_horizon_lines(image_thre, row, busbar, cell_size, thre=0.6, split=50, peak_interval=None, margin=None):
""" Detect horizontal edges by segmenting image into vertical splits
Parameters
---------
image_thre: array
Adaptive threshold of raw images
row: int
Number of rows of solar module
busbar: int
Number of busbars of a solar cell
cell_size: int
Output cell size in pixel
thre: float
Peak intensity above THRE will be set as 1.
Note that the edge's peak intensity should be lowest because edges are black
split: int
Number of splits
peak_interval: int
Distance between each peak.
Returns
-------
hline_abs_couple: array
Suppose a line is y=a*x+b.
Return 'a' and 'b' of a couple edges (top and bottom of a cell).
"""
#width = image_thre.shape[1]
#end = int(width / split)
#image_vsplits = np.hsplit(image_thre[:, :end * split], split) # vertical splits
#image_vsplits.append(image_thre[:, end * split:])
image_vsplits = split_img(image_thre, split=split, direction=1)
edge_y = []
inx_x = []
for inx, im_split in enumerate(image_vsplits):
#sum_split = np.sum(im_split, axis=1)
#sum_split = sum_split / np.max(sum_split)
#sum_split[sum_split > thre] = 1
#if peak_interval is None:
# peak_interval = int(cell_size / (busbar + 1) * 0.5)
#peak, _ = find_peaks(-1 * sum_split, distance=peak_interval)
peak = detect_peaks(im_split, 1, cell_size, busbar, thre, peak_interval, margin=margin)
if len(peak) >= row * (busbar + 1) - 1:
peak_new = [peak[0]]
for i in range(1, len(peak) - 1):
if np.abs(peak[i] - peak[i + 1]) < 15:
peak_mean = (peak[i] + peak[i + 1]) / 2
peak_new.append(peak_mean)
elif np.abs(peak[i] - peak[i - 1]) > 15:
peak_new.append(peak[i])
peak_new.append(peak[-1])
peak_new = np.array(peak_new)
peak_new_a = np.delete(peak_new, 0)
peak_new_b = np.delete(peak_new, -1)
peak_new_detect = peak_new[detectoutliers(np.abs(peak_new_a - peak_new_b), rate=0.5, option=1)]
if len(peak_new_detect) == (busbar + 1) * row + 1:
edge_y.append(peak_new_detect)
inx_mean = ((2 * inx + 1) * (image_thre.shape[1] / split) - 1) / 2
inx_x.append(inx_mean)
edge_y = np.array(edge_y)
hlines = list(zip(*edge_y))
hlines = np.array(hlines)
inx_x = np.array(inx_x)
# for lines in hlines:
# lines_new = self.detectoutliers(lines, option=0)
# while np.std(lines_new) > 10:
# lines_new = self.detectoutliers(lines, rate=1, option=0)
# hb_abs = [] # all lines including busbar
hb_abs = linear_regression(inx_x, hlines, outlier_filter=True)
hline_abs_couple = [] # all lines excluding busbar
# for horizonline in hlines:
# ab, _ = curve_fit(self.linear, inx_x, horizonline) # y = ax + b
# hb_abs.append(ab)
hline_abs_couple = [(hb_abs[(busbar + 1) * i], hb_abs[(busbar + 1) * (i + 1)]) for i in range(row)]
# hline_abs = [(hb_abs[(4+1)*i],hb_abs[(4+1)*(i+1)]) for i in range(6)]
# hline_abs = [(hb_abs[(self.busbar+2)*i],hb_abs[(self.busbar+2)*(i+1)-1]) for i in range(self.row)]
return hline_abs_couple | 5,358,218 |
def countRoem(cards, trumpSuit=None):
"""Counts the amount of roem (additional points) in a list of cards
Args:
Returns:
Integer value how many points of roem are in the cards in total
"""
roem = 0
# Stuk
# Without a trumpSuit, stuk is impossible
if trumpSuit is not None:
#trumpKing = list(filter(lambda c: c.suit == trumpSuit and c.rank == 4, cards))
#trumpQueen = list(filter(lambda c: c.suit == trumpSuit and c.rank == 5, cards))
trumpKing = [card for card in cards if card.suit == trumpSuit and card.rank == 4]
trumpQueen = [card for card in cards if card.suit == trumpSuit and card.rank == 5]
if trumpKing and trumpQueen:
roem += 20
# Normal roem
# For each suit we check whether there are 3 cards in that suit, if so there is chance for roem
for i in range(4):
#cardsInSuit = list(filter(lambda c: c.suit == i, cards))
cardsInSuit = [card for card in cards if card.suit == i]
if len(cardsInSuit) >= 3:
cards = cardsInSuit
# We sort the list and check the difference between consecutive cards
cards.sort(key=lambda c: c.rank)
subtractList = []
for i in range(len(cards) - 1):
#subtract = abs(cards[i].roemRank - cards[i+1].roemRank)
subtract = abs(ROEMRANKS[cards[i].rank] - ROEMRANKS[cards[i].rank])
subtractList.append(subtract)
# If more than 1 difference equals 1, we know at least 3 cards have consecutive ranks
#lenOfOnes = len(list(filter(lambda x: x == 1, subtractList)))
lenOfOnes = len([x for x in subtractList if x == 1])
if lenOfOnes == 2:
roem += 20
elif lenOfOnes == 3:
roem += 50
return roem | 5,358,219 |
def batch_to_space(
data: NodeInput,
block_shape: NodeInput,
crops_begin: NodeInput,
crops_end: NodeInput,
name: Optional[str] = None,
) -> Node:
"""Perform BatchToSpace operation on the input tensor.
BatchToSpace permutes data from the batch dimension of the data tensor into spatial dimensions.
:param data: Node producing the data tensor.
:param block_shape: The sizes of the block of values to be moved.
:param crops_begin: Specifies the amount to crop from the beginning along each axis of `data`.
:param crops_end: Specifies the amount to crop from the end along each axis of `data`.
:param name: Optional output node name.
:return: The new node performing a BatchToSpace operation.
"""
return _get_node_factory_opset2().create(
"BatchToSpace", as_nodes(data, block_shape, crops_begin, crops_end),
) | 5,358,220 |
def test_DetectionMAP():
"""
test DetectionMAP
:return:
"""
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(train_program, startup_program):
detect_res = fluid.layers.data(
name='detect_res',
shape=[10, 6],
append_batch_size=False,
dtype='float32')
label = fluid.layers.data(
name='label',
shape=[10, 1],
append_batch_size=False,
dtype='float32')
box = fluid.layers.data(
name='bbox',
shape=[10, 4],
append_batch_size=False,
dtype='float32')
map_eval = fluid.metrics.DetectionMAP(
detect_res, label, box, class_num=21)
cur_map, accm_map = map_eval.get_map_var()
assert cur_map is not None
assert accm_map is not None | 5,358,221 |
def url_in(url):
""" Send a URL and I'll post it to Hive """
custom_json = {'url': url}
trx_id , success = send_notification(custom_json)
return trx_id, success | 5,358,222 |
def login():
"""
Display a basic login form in order to log in a user
"""
if request.method == 'GET':
return render_template('login.html')
else:
try:
usr = User.query.get(request.form['user_id'])
if bcrypt.checkpw(request.form['user_password'].encode('utf-8'),usr.password):
login_user(usr, remember=True)
flash('Logged in successfully')
return redirect(session['next_url'])
except Exception as e:
print("Sorry this user don't exist")
print(e)
return render_template('login.html') | 5,358,223 |
def hflip(stream):
"""Flip the input video horizontally.
Official documentation: `hflip <https://ffmpeg.org/ffmpeg-filters.html#hflip>`__
"""
return FilterNode(stream, hflip.__name__).stream() | 5,358,224 |
def get_diagonal_ripple_rainbows_2():
"""
Returns 11 diagonal ripple rainbows
Programs that use this function:
- Diagonal Ripple 3
- Diagonal Ripple 4
"""
rainbow01 = [
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1H, C2, C3, C4, C5, C6, C7, C8]
]
rainbow02 = [
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1H, C2, C3, C4, C5, C6, C7, C8],
[C1, C2H, C3, C4, C5, C6, C7, C8]
]
rainbow03 = [
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1H, C2, C3, C4, C5, C6, C7, C8],
[C1, C2H, C3, C4, C5, C6, C7, C8],
[C1, C2, C3H, C4, C5, C6, C7, C8]
]
rainbow04 = [
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1H, C2, C3, C4, C5, C6, C7, C8],
[C1, C2H, C3, C4, C5, C6, C7, C8],
[C1, C2, C3H, C4, C5, C6, C7, C8],
[C1, C2, C3, C4H, C5, C6, C7, C8]
]
rainbow05 = [
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1H, C2, C3, C4, C5, C6, C7, C8],
[C1, C2H, C3, C4, C5, C6, C7, C8],
[C1, C2, C3H, C4, C5, C6, C7, C8],
[C1, C2, C3, C4H, C5, C6, C7, C8],
[C1, C2, C3, C4, C5H, C6, C7, C8]
]
rainbow06 = [
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1H, C2, C3, C4, C5, C6, C7, C8],
[C1, C2H, C3, C4, C5, C6, C7, C8],
[C1, C2, C3H, C4, C5, C6, C7, C8],
[C1, C2, C3, C4H, C5, C6, C7, C8],
[C1, C2, C3, C4, C5H, C6, C7, C8],
[C1, C2, C3, C4, C5, C6H, C7, C8]
]
rainbow07 = [
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1H, C2, C3, C4, C5, C6, C7, C8],
[C1, C2H, C3, C4, C5, C6, C7, C8],
[C1, C2, C3H, C4, C5, C6, C7, C8],
[C1, C2, C3, C4H, C5, C6, C7, C8],
[C1, C2, C3, C4, C5H, C6, C7, C8],
[C1, C2, C3, C4, C5, C6H, C7, C8],
[C1, C2, C3, C4, C5, C6, C7H, C8]
]
rainbow08 = [
[C1H, C2, C3, C4, C5, C6, C7, C8],
[C1, C2H, C3, C4, C5, C6, C7, C8],
[C1, C2, C3H, C4, C5, C6, C7, C8],
[C1, C2, C3, C4H, C5, C6, C7, C8],
[C1, C2, C3, C4, C5H, C6, C7, C8],
[C1, C2, C3, C4, C5, C6H, C7, C8],
[C1, C2, C3, C4, C5, C6, C7H, C8],
[C1, C2, C3, C4, C5, C6, C7, C8H]
]
rainbow09 = [
[C1, C2H, C3, C4, C5, C6, C7, C8],
[C1, C2, C3H, C4, C5, C6, C7, C8],
[C1, C2, C3, C4H, C5, C6, C7, C8],
[C1, C2, C3, C4, C5H, C6, C7, C8],
[C1, C2, C3, C4, C5, C6H, C7, C8],
[C1, C2, C3, C4, C5, C6, C7H, C8],
[C1, C2, C3, C4, C5, C6, C7, C8H],
[C1, C2, C3, C4, C5, C6, C7, C8]
]
rainbow10 = [
[C1, C2, C3H, C4, C5, C6, C7, C8],
[C1, C2, C3, C4H, C5, C6, C7, C8],
[C1, C2, C3, C4, C5H, C6, C7, C8],
[C1, C2, C3, C4, C5, C6H, C7, C8],
[C1, C2, C3, C4, C5, C6, C7H, C8],
[C1, C2, C3, C4, C5, C6, C7, C8H],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8]
]
rainbow11 = [
[C1, C2, C3, C4H, C5, C6, C7, C8],
[C1, C2, C3, C4, C5H, C6, C7, C8],
[C1, C2, C3, C4, C5, C6H, C7, C8],
[C1, C2, C3, C4, C5, C6, C7H, C8],
[C1, C2, C3, C4, C5, C6, C7, C8H],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8]
]
rainbow12 = [
[C1, C2, C3, C4, C5H, C6, C7, C8],
[C1, C2, C3, C4, C5, C6H, C7, C8],
[C1, C2, C3, C4, C5, C6, C7H, C8],
[C1, C2, C3, C4, C5, C6, C7, C8H],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8]
]
rainbow13 = [
[C1, C2, C3, C4, C5, C6H, C7, C8],
[C1, C2, C3, C4, C5, C6, C7H, C8],
[C1, C2, C3, C4, C5, C6, C7, C8H],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8]
]
rainbow14 = [
[C1, C2, C3, C4, C5, C6, C7H, C8],
[C1, C2, C3, C4, C5, C6, C7, C8H],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8]
]
rainbow15 = [
[C1, C2, C3, C4, C5, C6, C7, C8H],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8],
[C1, C2, C3, C4, C5, C6, C7, C8]
]
return rainbow01, rainbow02, rainbow03, rainbow04, rainbow05, \
rainbow06, rainbow07, rainbow08, rainbow09, rainbow10, \
rainbow11, rainbow12, rainbow13, rainbow14, rainbow15 | 5,358,225 |
def add_file(original_filepath: Path, path_from_repo: Path) -> None:
"""Copies the file from the repository to staging area.
All parent folders will be created, albeit empty.
"""
hierarchy = paths.staging_area / path_from_repo
if not hierarchy.exists():
hierarchy.mkdir(parents=True)
shutil.copy2(original_filepath, hierarchy) | 5,358,226 |
def matrix_prod(A, B, display = False):
"""
Computes the matrix product of two matrices using array slicing and vector operations.
"""
if A.shape[1] != B.shape[0]:
raise ValueError("Dimensions not compatible.")
# Not allowed!?
#matrix = A.dot(B)
# Dotproduct of each A.row*B.clm
matrix = np.array([[np.sum(A[i,:]*B[:,j]) for j in range(B.shape[1])]
for i in range(A.shape[0])])
if display:
print(matrix)
return matrix | 5,358,227 |
def remove_quat_discontinuities(rotations):
"""
Removing quat discontinuities on the time dimension (removing flips)
:param rotations: Array of quaternions of shape (T, J, 4)
:return: The processed array without quaternion inversion.
"""
rots_inv = -rotations
for i in range(1, rotations.shape[0]):
# Compare dot products
replace_mask = np.sum(rotations[i - 1: i] * rotations[i: i + 1], axis=-1) < np.sum(
rotations[i - 1: i] * rots_inv[i: i + 1], axis=-1)
replace_mask = replace_mask[..., np.newaxis]
rotations[i] = replace_mask * rots_inv[i] + (1.0 - replace_mask) * rotations[i]
return rotations | 5,358,228 |
def compute_profile_from_frames(frames_str, ax, bt, box, N_bins=100, \
shift=None, verbose=False):
"""
Compute a density profile from a batch of xyz frames.
Input
=====
- frames_str: a regex containing frames in xyz format
- ax: axis along which to compute the profile
- bt: bead type
- box: box size, a (3, 3) matrix
- N_bins: number of bins
Output
======
- r: position vector
- pr: density profile vector
"""
frames = glob.glob(frames_str)
assert len(frames) != 0, "No xyz frames captured."
Nf = len(frames)
N = int(open(frames[0], "r").readline())
if verbose:
print(frames)
L = np.diag(box)
bins = np.linspace(0, L[ax], N_bins + 1)
dr = bins[1] - bins[0]
r = dr / 2.0 + bins[:-1]
Lsurf = L[list(set(range(3)).difference([ax]))] # cross-sectional surface
pr = np.zeros_like(r)
for frame in frames:
bl, X0 = read_xyz(frame)
if shift is not None:
assert len(shift) == 3, "Vector of shifting must be of size 3."
shift = np.array(shift)
X0 = X0 + shift
X0 = X0 % L
if bt == -1:
X = X0
else:
X = X0[bl == bt]
pr += np.histogram(X[:, ax], bins=bins)[0]
pr = pr / (dr * np.prod(Lsurf)) / Nf
return r, pr | 5,358,229 |
def wikipedia_search(query, lang="en", max_result=1):
"""
https://www.mediawiki.org/wiki/API:Opensearch
"""
query = any2unicode(query)
params = {
"action":"opensearch",
"search": query,
"format":"json",
#"formatversion":2,
#"namespace":0,
"suggest":"true",
"limit": 10
}
urlBase = "https://{}.wikipedia.org/w/api.php?".format(lang)
url = urlBase + urllib.urlencode(any2utf8(params))
#logging.info(url)
r = requests.get(url)
jsonData = json.loads(r.content)
#logging.info(jsonData)
items = []
ret = {"query":query, "itemList":items}
for idx, label in enumerate(jsonData[1][0:max_result]):
description = jsonData[2][idx]
url = jsonData[3][idx]
item = {
"name": label,
"description":description,
"url": url,
}
items.append(item)
return ret | 5,358,230 |
def group_result(result, func):
"""
:param result: A list of rows from the database: e.g. [(key, data1), (key, data2)]
:param func: the function to reduce the data e.g. func=median
:return: the data that is reduced. e.g. [(key, (data1+data2)/2)]
"""
data = {}
for key, value in result:
if key in data.keys():
data[key].append(value)
else:
data[key] = [value]
for key in data:
data[key] = func(data[key])
return data.items() | 5,358,231 |
def prep_image(img, inp_dim):
"""
Prepare image for inputting to the neural network.
Returns a Variable
"""
orig_im = img
dim = orig_im.shape[1], orig_im.shape[0]
img = cv2.resize(orig_im, (inp_dim, inp_dim))
# img_ = img[:,:,::-1].transpose((2,0,1)).copy()
img_ = img.transpose((2,0,1)).copy()
img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)
return img_, orig_im, dim | 5,358,232 |
def add_periodic_callback(doc, component, interval):
""" Add periodic callback to doc in a way that avoids reference cycles
If we instead use ``doc.add_periodic_callback(component.update, 100)`` then
the component stays in memory as a reference cycle because its method is
still around. This way we avoid that and let things clean up a bit more
nicely.
TODO: we still have reference cycles. Docs seem to be referred to by their
add_periodic_callback methods.
"""
ref = weakref.ref(component)
doc.add_periodic_callback(lambda: update(ref), interval)
_attach(doc, component) | 5,358,233 |
def s11_warmup_plot(
freq: np.ndarray,
s11_re: Dict[str, List[np.ndarray]],
s11_im: Dict[str, List[np.ndarray]],
temperatures: np.ndarray,
filename=None,
):
"""Plot all the S11 measurements taken in warmup, to illustrate convergence."""
nfreq = len(freq)
assert len(s11_re) == len(s11_im)
freq0 = freq[0]
freq1 = freq[nfreq // 2]
freq2 = freq[-1]
fig, ax = plt.subplots(5, 1, sharex=True, figsize=(12, 12))
for i, load in enumerate(s11_re.keys()):
re = np.atleast_2d(np.array(s11_re[load]))
im = np.atleast_2d(np.array(s11_im[load]))
ax[0].plot(re[:, 0], ls="-", color=f"C{i}", label=f"{load} (Re)")
ax[0].plot(im[:, 0], ls="--", color=f"C{i}", label=f"{load} (Im)")
ax[0].set_title(f"{freq0:.2f} MHz")
ax[1].plot(re[:, nfreq // 2])
ax[1].plot(im[:, nfreq // 2])
ax[1].set_title(f"{freq1:.2f} MHz")
ax[2].plot(re[:, -1])
ax[2].plot(im[:, -1])
ax[2].set_title(f"{freq2:.2f} MHz")
ax[3].plot(np.sqrt(np.mean(np.square(re[1:] - re[:-1]), axis=1)))
ax[3].plot(np.sqrt(np.mean(np.square(im[1:] - im[:-1]), axis=1)))
ax[3].set_title("RMS of difference between measurements")
ax[4].plot(temperatures)
ax[4].set_title(f"{freq0:.2f} MHz")
ax[4].set_title("Thermistor Temp.")
ax[0].legend()
if filename:
plt.savefig(filename) | 5,358,234 |
def extractLogData(context):
"""
helper function to extract all important data from the web context.
:param context: the web.py context object
:return: a dictionary with all information for the logging.
"""
logData = {}
logData['ip'] = context.ip
logData['account'] = context.env.get('HTTP_RUCIO_ACCOUNT')
logData['appid'] = 'clients' # has to changed, but atm no appid is send with the clients
logData['clientref'] = context.env.get('HTTP_RUCIO_CLIENTREF')
logData['uri'] = context.method + ' ' + context.protocol + "://" + context.host + context.homepath + context.fullpath
logData['requestid'] = uuid()
logData['requestHeader'] = context.env
logData['responseHeader'] = ''
logData['httpCode'] = ''
logData['duration'] = ''
return logData | 5,358,235 |
def slithering_snake_32():
"""
Lights up then turns off the LEDs on arms 3 and 2
"""
LOGGER.debug("SLITHERING SNAKE 32")
sleep_speed = 0.10
# Light up Snake 32
PYGLOW.led(13, 100)
sleep(sleep_speed)
PYGLOW.led(14, 100)
sleep(sleep_speed)
PYGLOW.led(15, 100)
sleep(sleep_speed)
PYGLOW.led(16, 100)
sleep(sleep_speed)
PYGLOW.led(17, 100)
sleep(sleep_speed)
PYGLOW.led(6, 100)
sleep(sleep_speed)
PYGLOW.led(12, 100)
sleep(sleep_speed)
PYGLOW.led(11, 100)
sleep(sleep_speed)
PYGLOW.led(10, 100)
sleep(sleep_speed)
PYGLOW.led(9, 100)
sleep(sleep_speed)
PYGLOW.led(8, 100)
sleep(sleep_speed)
PYGLOW.led(7, 100)
sleep(sleep_speed)
# Turn off Snake 32
PYGLOW.led(13, 0)
sleep(sleep_speed)
PYGLOW.led(14, 0)
sleep(sleep_speed)
PYGLOW.led(15, 0)
sleep(sleep_speed)
PYGLOW.led(16, 0)
sleep(sleep_speed)
PYGLOW.led(17, 0)
sleep(sleep_speed)
PYGLOW.led(6, 0)
sleep(sleep_speed)
PYGLOW.led(12, 0)
sleep(sleep_speed)
PYGLOW.led(11, 0)
sleep(sleep_speed)
PYGLOW.led(10, 0)
sleep(sleep_speed)
PYGLOW.led(9, 0)
sleep(sleep_speed)
PYGLOW.led(8, 0)
sleep(sleep_speed)
PYGLOW.led(7, 0) | 5,358,236 |
def balance_test():
"""Balance the site frequency spectrum"""
pos = [1, 10, 20, 30]
stop = [2, 11, 21, 31]
ref = ['A', 'C', 'T', 'G']
alt = ['T', 'G', 'G', 'A']
p = [0.1, 0.9, 0.3, 0.2]
l = vr.VariantList(pos, stop, ref, alt, p)
# The core things here are to make sure the sfs is balanced AND the rank ordering is maintained as far as possible
p_sfs = [0.1, 0.2, 0.3]
f_sfs = [0.5, 0.25, 0.25]
l.balance_probabilities(p_sfs, f_sfs)
assert_array_almost_equal(l.variants['p'], [0.1, 0.3, 0.2, 0.1], decimal=3) | 5,358,237 |
def Backbone(backbone_type='ResNet50', use_pretrain=True):
"""Backbone Model"""
weights = None
if use_pretrain:
weights = 'imagenet'
def backbone(x_in):
if backbone_type == 'ResNet50':
return ResNet50(input_shape=x_in.shape[1:], include_top=False,
weights=weights)(x_in)
elif backbone_type == 'MobileNetV2':
return MobileNetV2(input_shape=x_in.shape[1:], include_top=False,
weights=weights)(x_in)
else:
raise TypeError('backbone_type error!')
return backbone | 5,358,238 |
def start(ctx, vca_client, **kwargs):
"""
power on server and wait network connection availability for host
"""
# combine properties
obj = combine_properties(
ctx, kwargs=kwargs, names=['server'],
properties=[VCLOUD_VAPP_NAME, 'management_network'])
# get external
if obj.get('use_external_resource'):
ctx.logger.info('not starting server since an external server is '
'being used')
else:
vapp_name = get_vapp_name(ctx.instance.runtime_properties)
config = get_vcloud_config()
vdc = vca_client.get_vdc(config['vdc'])
vapp = vca_client.get_vapp(vdc, vapp_name)
_power_on_vm(ctx, vca_client, vapp, vapp_name)
if not _get_state(ctx=ctx, vca_client=vca_client):
return ctx.operation.retry(
message="Waiting for VM's configuration to complete",
retry_after=5) | 5,358,239 |
def softplus(z):
"""Numerically stable version of log(1 + exp(z))."""
# see stabilizing softplus: http://sachinashanbhag.blogspot.com/2014/05/numerically-approximation-of-log-1-expy.html # noqa
mu = z.copy()
mu[z > 35] = z[z > 35]
mu[z < -10] = np.exp(z[z < -10])
mu[(z >= -10) & (z <= 35)] = log1p(np.exp(z[(z >= -10) & (z <= 35)]))
return mu | 5,358,240 |
def merge_indexes(
indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]],
variables: Mapping[Hashable, Variable],
coord_names: Set[Hashable],
append: bool = False,
) -> "Tuple[OrderedDict[Any, Variable], Set[Hashable]]":
"""Merge variables into multi-indexes.
Not public API. Used in Dataset and DataArray set_index
methods.
"""
vars_to_replace = {} # Dict[Any, Variable]
vars_to_remove = [] # type: list
error_msg = "{} is not the name of an existing variable."
for dim, var_names in indexes.items():
if isinstance(var_names, str) or not isinstance(var_names, Sequence):
var_names = [var_names]
names, codes, levels = [], [], [] # type: (list, list, list)
current_index_variable = variables.get(dim)
for n in var_names:
try:
var = variables[n]
except KeyError:
raise ValueError(error_msg.format(n))
if (
current_index_variable is not None
and var.dims != current_index_variable.dims
):
raise ValueError(
"dimension mismatch between %r %s and %r %s"
% (dim, current_index_variable.dims, n, var.dims)
)
if current_index_variable is not None and append:
current_index = current_index_variable.to_index()
if isinstance(current_index, pd.MultiIndex):
try:
current_codes = current_index.codes
except AttributeError:
# fpr pandas<0.24
current_codes = current_index.labels
names.extend(current_index.names)
codes.extend(current_codes)
levels.extend(current_index.levels)
else:
names.append("%s_level_0" % dim)
cat = pd.Categorical(current_index.values, ordered=True)
codes.append(cat.codes)
levels.append(cat.categories)
if not len(names) and len(var_names) == 1:
idx = pd.Index(variables[var_names[0]].values)
else:
for n in var_names:
try:
var = variables[n]
except KeyError:
raise ValueError(error_msg.format(n))
names.append(n)
cat = pd.Categorical(var.values, ordered=True)
codes.append(cat.codes)
levels.append(cat.categories)
idx = pd.MultiIndex(levels, codes, names=names)
vars_to_replace[dim] = IndexVariable(dim, idx)
vars_to_remove.extend(var_names)
new_variables = OrderedDict(
[(k, v) for k, v in variables.items() if k not in vars_to_remove]
)
new_variables.update(vars_to_replace)
new_coord_names = coord_names | set(vars_to_replace)
new_coord_names -= set(vars_to_remove)
return new_variables, new_coord_names | 5,358,241 |
def least_squares(m, n):
""" Create a least squares problem with m datapoints and n dimensions """
A = np.random.randn(m, n)
_x = np.random.randn(n)
b = A.dot(_x)
x = cp.Variable(n)
return (x, cp.Problem(cp.Minimize(cp.sum_squares(A * x - b) + cp.norm(x, 2)))) | 5,358,242 |
def main():
"""
Implements the first step of the experiment pipeline. Creates feature \
sets for each one of the folds.
Returns:
None
"""
# Construct argument parser and parse arguments
ap = argparse.ArgumentParser()
ap.add_argument('-fpath', required=True)
args = vars(ap.parse_args())
t1 = time.time()
# Create folder to store experiment
date_time = datetime.datetime.now().strftime("%d-%m-%Y_%H-%M-%S")
exp_path = os.path.join(Config.base_dir, 'experiments', f'exp_{date_time}')
os.makedirs(exp_path)
# Create folder to store feature extraction results
results_path = os.path.join(exp_path, 'features_extraction_results')
os.makedirs(results_path)
copyfile('./geocoding/config.py', os.path.join(exp_path, 'config.py'))
# Load dataset into dataframe
df = feat_ut.load_points_df(os.path.join(Config.base_dir, args['fpath']))
# Shuffle dataframe
df = df.sample(frac=1).reset_index(drop=True)
# Encode labels
df, encoder = feat_ut.encode_labels(df)
df.to_csv(os.path.join(results_path, 'train_df.csv'), index=False)
pickle.dump(encoder, open(os.path.join(results_path, 'encoder.pkl'), 'wb'))
feat_ut.get_required_external_files(df, results_path)
addresses, targets = list(df['address']), list(df['target'])
skf = StratifiedKFold(n_splits=Config.n_folds)
fold = 1
for train_idxs, test_idxs in skf.split(addresses, targets):
fold_path = os.path.join(results_path, 'fold_' + str(fold))
os.makedirs(fold_path)
os.makedirs(os.path.join(fold_path, 'features'))
os.makedirs(os.path.join(fold_path, 'pickled_objects'))
X_train = feat_ut.create_train_features(df.iloc[train_idxs].reset_index(), results_path, fold_path)
X_test = feat_ut.create_test_features(
df.iloc[test_idxs].reset_index(), results_path, os.path.join(fold_path, 'pickled_objects'), fold_path
)
y_train, y_test = df['target'][train_idxs], df['target'][test_idxs]
np.save(os.path.join(fold_path, f'X_train.npy'), X_train)
np.save(os.path.join(fold_path, f'X_test.npy'), X_test)
np.save(os.path.join(fold_path, f'y_train.npy'), y_train)
np.save(os.path.join(fold_path, f'y_test.npy'), y_test)
fold += 1
wrtrs.write_feats_space(os.path.join(results_path, 'included_features.csv'))
print(f'Feature extraction done in {time.time() - t1:.3f} sec.')
return | 5,358,243 |
def getRef(refFile):
"""Returns a genome reference."""
refDict={}
hdList=[]
ref=''
num=0
try:
f=open(refFile)
except IOError:
errlog.error('Cannot find reference file ' +refFile+'. Please check pathname.')
sys.exit('Cannot find reference file '+refFile+'. Please check pathname.')
i=f.readline()
head=i[1:51].rstrip()
i=f.readline().rstrip()
while i:
if i[0]!='>':
ref+=i.rstrip()
i=f.readline()
else:
if head in hdList:
num+=1
head=head+str(num)
ref=ref.upper()
for l in 'RYMKSWHBVD':
ref=ref.replace(l,'N')
refDict[head]=ref
hdList.append(head)
head=i[1:51].rstrip()
i=f.readline()
ref=''
ref=ref.upper()
for l in 'RYMKSWHBVD':
ref=ref.replace(l,'N')
refDict[head]=ref
errlog.debug('Reference file successfully parsed.')
return refDict | 5,358,244 |
def cleanup_path(paths, removedir=True):
""" remove unreable files and directories from the input path collection,
skipped include two type of elements: unwanted directories if removedir is True
or unaccessible files/directories
"""
checked = []
skipped = []
for ele in paths:
ele = os.path.abspath(ele)
if os.path.exists(ele) and os.access(ele, os.R_OK):
if os.path.isdir(ele) and removedir:
skipped.append(ele)
else:
checked.append(ele)
else:
skipped.append(ele)
return checked, skipped | 5,358,245 |
def expand_amn(a, kpoints, idx, Rvectors, nproj_atom=None):
"""
Expand the projections matrix by translations of the orbitals
Parameters
----------
a : ndarray, shape (nkpts, nbnds, nproj)
kpoints : ndarray, shape (nkpts, 3)
idx : ndarray
indices of translated orbitals
Rvectors: ndarray
translation vectors for the orbitals
nproj_atom: ndarray, optional
number of projections on each atom, with idx and Rvectors now describing
atoms instead of orbitals
"""
assert len(Rvectors) == len(idx)
if nproj_atom is not None:
assert len(nproj_atom) == len(idx)
idx_new = []
Rvectors_new = []
for iatom, i in enumerate(idx):
offset = np.sum(nproj_atom[:i])
for j in range(nproj_atom[i]):
idx_new.append(offset+j)
Rvectors_new.append(Rvectors[iatom])
idx = idx_new
Rvectors = Rvectors_new
nkpts, nbnds, nproj = a.shape
a1 = np.zeros((nkpts, nbnds, len(idx)), dtype=complex)
k_dot_R = np.einsum('ki,ri->kr', kpoints, Rvectors)
exp_factors = np.exp(-1j * 2*np.pi * k_dot_R)
a1 = a[:, :, idx] * exp_factors[:, np.newaxis, :]
return a1 | 5,358,246 |
def combine_basis_vectors(weights, vectors, default_value=None, node_num=None):
"""
Combine basis vectors using ``weights`` as the Manning's n value for each
basis vector. If a ``default_value`` is set then all nodes with out data
are set to the ``default_value``.
:type weights: :class:`numpy.ndarray`
:param weights: array of size (num_of_basis_vec, 1)
:type vectors: list of dicts OR :class:`numpy.ndarray` of size (node_num,
num_of_basis_vec)
:param vectors: basis vectors
:returns: an array of size (node_num, 1) containing the manningsn value at
all nodes in numerical order or a dictionary
"""
if len(weights) != len(vectors):
raise LenError('weights, vectors', 'dimensions do not match')
if isinstance(vectors[0], np.array):
combine_bv_array(weights, vectors)
elif default_value and node_num:
return dict_to_array(add_dict(vectors, weights)[0], default_value,
node_num)
else:
return add_dict(vectors, weights)[0] | 5,358,247 |
def demo1():
"""
假设我们有三台洗衣机, 现在有三批衣服需要分别放到这三台洗衣机里面洗.
"""
def washing1():
sleep(3) # 第一台洗衣机, 需要洗3秒才能洗完 (只是打个比方)
print('washer1 finished') # 洗完的时候, 洗衣机会响一下, 告诉我们洗完了
def washing2():
sleep(2)
print('washer2 finished')
def washing3():
sleep(5)
print('washer3 finished')
washing1()
washing2()
washing3()
"""
这个还是很容易理解的, 运行 demo1(), 那么需要10秒钟才能把全部衣服洗完.
没错, 大部分时间都花在挨个地等洗衣机上了.
""" | 5,358,248 |
def test_managed_static_method_handler():
"""Test managed static method handlers."""
ob = EventTest()
EventTest.s_value = 0
ob.PublicEvent += ob.StaticHandler
assert EventTest.s_value == 0
ob.OnPublicEvent(EventArgsTest(10))
assert EventTest.s_value == 10
ob.PublicEvent -= ob.StaticHandler
assert EventTest.s_value == 10
ob.OnPublicEvent(EventArgsTest(20))
assert EventTest.s_value == 10 | 5,358,249 |
def _prepare_data_for_node_classification(
graph: nx.Graph, seed_node: int
) -> List[Tuple[Any, Any]]:
"""
Position seed node as the first node in the data.
TensorFlow GNN has a convention whereby the node to be classified, the "seed node",
is positioned first in the component. This is for use with layers such as
`tfgnn.keras.layers.ReadoutFirstNode` which extracts the first node from a component.
"""
seed_data = graph.nodes(data=True)[seed_node]
data = [(seed_data["features"], seed_data["label"])]
data += [
(data["features"], data["label"])
for node, data in graph.nodes(data=True)
if node != seed_node
]
return data | 5,358,250 |
def init():
""" Init the application and add routes """
logging.basicConfig(format='%(asctime)s: [%(levelname)s]: %(message)s',
level=logging.DEBUG)
global theconfig
theconfig = get_config()
global rc
rc = init_redis(theconfig)
app = default_app()
return app | 5,358,251 |
def norm_potential(latitude, longitude, h, refell, lmax):
"""
Calculates the normal potential at a given latitude and height
Arguments
---------
latitude: latitude in degrees
longitude: longitude in degrees
height: height above reference ellipsoid in meters
refell: reference ellipsoid name
lmax: maximum spherical harmonic degree
Returns
-------
U: normal potential at height h
dU_dr: derivative of normal potential with respect to radius
dU_dtheta: derivative of normal potential with respect to theta
"""
#-- get ellipsoid parameters for refell
ellip = ref_ellipsoid(refell)
a = np.float128(ellip['a'])
ecc1 = np.float128(ellip['ecc1'])
GM = np.float128(ellip['GM'])
J2 = np.float128(ellip['J2'])
#-- convert from geodetic latitude to geocentric latitude
latitude_geodetic_rad = (np.pi*latitude/180.0).astype(np.float128)
longitude_rad = (np.pi*longitude/180.0).astype(np.float128)
N = a/np.sqrt(1.0 - ecc1**2.0*np.sin(latitude_geodetic_rad)**2.0)
X = (N + h) * np.cos(latitude_geodetic_rad) * np.cos(longitude_rad)
Y = (N + h) * np.cos(latitude_geodetic_rad) * np.sin(longitude_rad)
Z = (N * (1.0 - ecc1**2.0) + h) * np.sin(latitude_geodetic_rad)
rr = np.sqrt(X**2.0 + Y**2.0 + Z**2.0)
latitude_geocentric = np.arctan(Z / np.sqrt(X**2.0 + Y**2.0))
#-- calculate even zonal harmonics
n = np.arange(2, 12+2, 2, dtype=np.float128)
J2n = cosine_even_zonals(J2, ecc1, n/2.0)
#-- normalized cosine harmonics: Cn = -Jn/np.sqrt(2.0*n+1.0)
#-- J2 = 0.108262982131e-2
C_2 = -J2n[0]/np.sqrt(5.0)
#-- J4 = -0.237091120053e-5
C_4 = -J2n[1]/np.sqrt(9.0)
#-- J6 = 0.608346498882e-8
C_6 = -J2n[2]/np.sqrt(13.0)
#-- J8 = -0.142681087920e-10
C_8 = -J2n[3]/np.sqrt(17.0)
#-- J10 = 0.121439275882e-13
C_10 = -J2n[4]/np.sqrt(21.0)
#-- J12 = 0.205395070709e-15
C_12 = -J2n[5]/np.sqrt(25.0)
#-- calculate legendre polynomials at latitude and their first derivative
Pl,dPl = legendre_polynomials(lmax, np.sin(latitude_geocentric),
ASTYPE=np.float128)
#-- normal potentials and derivatives
U = (GM/rr) * (1.0 + (a/rr)**2.*C_2*Pl[2,:] + (a/rr)**4.*C_4*Pl[4,:] + \
(a/rr)**6.*C_6*Pl[6,:] + (a/rr)**8.*C_8*Pl[8,:] + \
(a/rr)**10.*C_10*Pl[10,:] + (a/rr)**12.*C_12*Pl[12,:])
dU_dr = GM * (-1.0 / rr**2.0 - 3.0*(a**2.0/rr**4.0)*C_2*Pl[2,:] - \
5.0*(a**4.0/rr**6.0)*C_4*Pl[4,:] -7.0*(a**6.0/rr**8.0)*C_6*Pl[6,:] - \
9.0*(a**8.0/rr**10.)*C_8*Pl[8,:] -11.*(a**10./rr**12.)*C_10*Pl[10,:] - \
13.*(a**12./rr**14.)*C_12*Pl[12,:])
dU_dtheta = (GM/rr) * (1.0 + (a/rr)**2.0*C_2*dPl[2,:] + \
(a/rr)**4.0*C_4*dPl[4,:] + (a/rr)**6.0*C_6*dPl[6,:] + \
(a/rr)**8.0*C_8*dPl[8,:] + (a/rr)**10.0*C_10*dPl[10,:] + \
(a/rr)**12.0*C_12*dPl[12,:])
#-- return the potentials
return (U, dU_dr, dU_dtheta) | 5,358,252 |
def _findPlugInfo(rootDir):
""" Find every pluginInfo.json files below the root directory.
:param str rootDir: the search start from here
:return: a list of files path
:rtype: [str]
"""
files = []
for root, dirnames, filenames in os.walk(rootDir):
files.extend(glob.glob(root + '/plugInfo.json'))
return files | 5,358,253 |
def get_pp_gene_chains(chain_class_file, v=False):
"""Get gene: pp chains dict."""
gene_to_pp_chains = defaultdict(list) # init the dict
f = open(chain_class_file, "r") # open file with classifications
f.__next__() # skip header
for line in f:
line_data = line.rstrip().split("\t")
# line contains the following fields"
# gene orthologs paralogs trans p_pseudogenes
trans = line_data[0]
# proc_pseudogene chains are in the 4th field
pp_genes_field = line_data[4]
if pp_genes_field == "0":
# it 0 -> no ppgene chains -> skip
continue
# parse comma-separated string and save to dict
pp_genes = [int(x) for x in pp_genes_field.split(",") if x != ""]
gene_to_pp_chains[trans] = pp_genes
f.close()
if v:
verbose(f"Extracted {len(gene_to_pp_chains)} genes with proc pseudogenes")
return gene_to_pp_chains | 5,358,254 |
def test_load_accessor(test_df):
"""Function_docstring."""
check.is_not_none(getattr(test_df, "mp_pivot"))
check.is_not_none(getattr(test_df.mp_pivot, "run"))
check.is_not_none(getattr(test_df.mp_pivot, "display"))
check.is_not_none(getattr(test_df.mp_pivot, "tee"))
check.is_not_none(getattr(test_df.mp_pivot, "tee_exec"))
check.is_not_none(getattr(test_df.mp_pivot, "filter"))
check.is_not_none(getattr(test_df.mp_pivot, "filter_cols"))
check.is_not_none(getattr(test_df.mp_pivot, "sort")) | 5,358,255 |
def rec_search(wildcard):
"""
Traverse all subfolders and match files against the wildcard.
Returns:
A list of all matching files absolute paths.
"""
matched = []
for dirpath, _, files in os.walk(os.getcwd()):
fn_files = [os.path.join(dirpath, fn_file) for fn_file
in fnmatch.filter(files, wildcard)]
matched.extend(fn_files)
return matched | 5,358,256 |
def skip_object(change_mode, change):
"""
If `Mode` is `change`: we do not care about the `Conditions`
Else:
If `cfn` objects:
- We can omit the `Conditions`, objects will be involed when `Mode` is `provision` or `destroy`. (Original design. Backward compatibility.)
- In case `Conditions` is declared, objects will be involed when `Mode` matches with `Conditions`.
If `aws` objects: we must declare `Conditions` and match with `Mode`, or else the engine will skip that Object/Block.
OR
If `Mode` is `change`: we do not care about the `Conditions`
Else:
If we omit the `Conditions`:
- Only `cfn` objects are involed when `Mode` is `provision` or `destroy`. (Original design. Backward compatibility.)
- Others will be skipped.
Else:
Objects will be involed when `Mode` matches with `Conditions`.
Return:
- `True` means skipped
- `False` means involved
"""
if (change_mode!=CHANGE_MODE_CHANGE):
if ('Conditions' not in change):
if (change['Object']==STR_CFN) and (change_mode in [CHANGE_MODE_PROVISION,CHANGE_MODE_DESTROY]):
return False
return True
elif (change_mode not in change['Conditions']):
return True
return False | 5,358,257 |
def rootfinder(*args):
"""
rootfinder(str name, str solver, dict:SX rfp, dict opts) -> Function
Create a solver for rootfinding problems Takes a function where one of the
rootfinder(str name, str solver, dict:MX rfp, dict opts) -> Function
rootfinder(str name, str solver, Function f, dict opts) -> Function
> rootfinder(str name, str solver, dict:SX rfp, dict opts)
------------------------------------------------------------------------
Create a solver for rootfinding problems Takes a function where one of the
inputs is unknown and one of the outputs is a residual function that is
always zero, defines a new function where the the unknown input has been
replaced by a guess for the unknown and the residual output has been
replaced by the calculated value for the input.
For a function [y0, y1, ...,yi, .., yn] = F(x0, x1, ..., xj, ..., xm), where
xj is unknown and yi=0, defines a new function [y0, y1, ...,xj, .., yn] =
G(x0, x1, ..., xj_guess, ..., xm),
xj and yi must have the same dimension and d(yi)/d(xj) must be invertable.
By default, the first input is unknown and the first output is the residual.
General information
===================
>List of available options
+------------------+-----------------+------------------+------------------+
| Id | Type | Description | Used in |
+==================+=================+==================+==================+
| common_options | OT_DICT | Options for | casadi::OracleFu |
| | | auto-generated | nction |
| | | functions | |
+------------------+-----------------+------------------+------------------+
| constraints | OT_INTVECTOR | Constrain the | casadi::Rootfind |
| | | unknowns. 0 | er |
| | | (default): no | |
| | | constraint on | |
| | | ui, 1: ui >= | |
| | | 0.0, -1: ui <= | |
| | | 0.0, 2: ui > | |
| | | 0.0, -2: ui < | |
| | | 0.0. | |
+------------------+-----------------+------------------+------------------+
| error_on_fail | OT_BOOL | When the | casadi::Rootfind |
| | | numerical | er |
| | | process returns | |
| | | unsuccessfully, | |
| | | raise an error | |
| | | (default false). | |
+------------------+-----------------+------------------+------------------+
| implicit_input | OT_INT | Index of the | casadi::Rootfind |
| | | input that | er |
| | | corresponds to | |
| | | the actual root- | |
| | | finding | |
+------------------+-----------------+------------------+------------------+
| implicit_output | OT_INT | Index of the | casadi::Rootfind |
| | | output that | er |
| | | corresponds to | |
| | | the actual root- | |
| | | finding | |
+------------------+-----------------+------------------+------------------+
| jacobian_functio | OT_FUNCTION | Function object | casadi::Rootfind |
| n | | for calculating | er |
| | | the Jacobian | |
| | | (autogenerated | |
| | | by default) | |
+------------------+-----------------+------------------+------------------+
| linear_solver | OT_STRING | User-defined | casadi::Rootfind |
| | | linear solver | er |
| | | class. Needed | |
| | | for | |
| | | sensitivities. | |
+------------------+-----------------+------------------+------------------+
| linear_solver_op | OT_DICT | Options to be | casadi::Rootfind |
| tions | | passed to the | er |
| | | linear solver. | |
+------------------+-----------------+------------------+------------------+
| monitor | OT_STRINGVECTOR | Set of user | casadi::OracleFu |
| | | problem | nction |
| | | functions to be | |
| | | monitored | |
+------------------+-----------------+------------------+------------------+
| specific_options | OT_DICT | Options for | casadi::OracleFu |
| | | specific auto- | nction |
| | | generated | |
| | | functions, | |
| | | overwriting the | |
| | | defaults from | |
| | | common_options. | |
| | | Nested | |
| | | dictionary. | |
+------------------+-----------------+------------------+------------------+
>Input scheme: casadi::RootfinderInput (ROOTFINDER_NUM_IN = 2)
+---------------+-------+---------------------------------+
| Full name | Short | Description |
+===============+=======+=================================+
| ROOTFINDER_X0 | x0 | Initial guess for the solution. |
+---------------+-------+---------------------------------+
| ROOTFINDER_P | p | Parameters. |
+---------------+-------+---------------------------------+
>Output scheme: casadi::RootfinderOutput (ROOTFINDER_NUM_OUT = 1)
+--------------+-------+--------------------------------------+
| Full name | Short | Description |
+==============+=======+======================================+
| ROOTFINDER_X | x | Solution to the system of equations. |
+--------------+-------+--------------------------------------+
List of plugins
===============
- kinsol
- fast_newton
- nlpsol
- newton
Note: some of the plugins in this list might not be available on your
system. Also, there might be extra plugins available to you that are not
listed here. You can obtain their documentation with
Rootfinder.doc("myextraplugin")
--------------------------------------------------------------------------------
kinsol
------
KINSOL interface from the Sundials suite
>List of available options
+---------------------------+-----------------+----------------------------+
| Id | Type | Description |
+===========================+=================+============================+
| abstol | OT_DOUBLE | Stopping criterion |
| | | tolerance |
+---------------------------+-----------------+----------------------------+
| disable_internal_warnings | OT_BOOL | Disable KINSOL internal |
| | | warning messages |
+---------------------------+-----------------+----------------------------+
| exact_jacobian | OT_BOOL | Use exact Jacobian |
| | | information |
+---------------------------+-----------------+----------------------------+
| f_scale | OT_DOUBLEVECTOR | Equation scaling factors |
+---------------------------+-----------------+----------------------------+
| iterative_solver | OT_STRING | gmres|bcgstab|tfqmr |
+---------------------------+-----------------+----------------------------+
| linear_solver_type | OT_STRING | dense|banded|iterative|use |
| | | r_defined |
+---------------------------+-----------------+----------------------------+
| lower_bandwidth | OT_INT | Lower bandwidth for banded |
| | | linear solvers |
+---------------------------+-----------------+----------------------------+
| max_iter | OT_INT | Maximum number of Newton |
| | | iterations. Putting 0 sets |
| | | the default value of |
| | | KinSol. |
+---------------------------+-----------------+----------------------------+
| max_krylov | OT_INT | Maximum Krylov space |
| | | dimension |
+---------------------------+-----------------+----------------------------+
| pretype | OT_STRING | Type of preconditioner |
+---------------------------+-----------------+----------------------------+
| strategy | OT_STRING | Globalization strategy |
+---------------------------+-----------------+----------------------------+
| u_scale | OT_DOUBLEVECTOR | Variable scaling factors |
+---------------------------+-----------------+----------------------------+
| upper_bandwidth | OT_INT | Upper bandwidth for banded |
| | | linear solvers |
+---------------------------+-----------------+----------------------------+
| use_preconditioner | OT_BOOL | Precondition an iterative |
| | | solver |
+---------------------------+-----------------+----------------------------+
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
fast_newton
-----------
Implements simple newton iterations to solve an implicit function.
>List of available options
+------------+-----------+-------------------------------------------------+
| Id | Type | Description |
+============+===========+=================================================+
| abstol | OT_DOUBLE | Stopping criterion tolerance on ||g||__inf) |
+------------+-----------+-------------------------------------------------+
| abstolStep | OT_DOUBLE | Stopping criterion tolerance on step size |
+------------+-----------+-------------------------------------------------+
| max_iter | OT_INT | Maximum number of Newton iterations to perform |
| | | before returning. |
+------------+-----------+-------------------------------------------------+
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
nlpsol
------
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
newton
------
Implements simple newton iterations to solve an implicit function.
>List of available options
+-----------------+-----------+--------------------------------------------+
| Id | Type | Description |
+=================+===========+============================================+
| abstol | OT_DOUBLE | Stopping criterion tolerance on max(|F|) |
+-----------------+-----------+--------------------------------------------+
| abstolStep | OT_DOUBLE | Stopping criterion tolerance on step size |
+-----------------+-----------+--------------------------------------------+
| max_iter | OT_INT | Maximum number of Newton iterations to |
| | | perform before returning. |
+-----------------+-----------+--------------------------------------------+
| print_iteration | OT_BOOL | Print information about each iteration |
+-----------------+-----------+--------------------------------------------+
--------------------------------------------------------------------------------
Joel Andersson
> rootfinder(str name, str solver, dict:MX rfp, dict opts)
> rootfinder(str name, str solver, Function f, dict opts)
------------------------------------------------------------------------
"""
return _casadi.rootfinder(*args) | 5,358,258 |
def f18(x, rotation=None, shift=None, shuffle=None):
"""
Hybrid Function 8 (N=5)
Args:
x (array): Input vector of dimension 2, 10, 20, 30, 50 or 100.
rotation (matrix): Optional rotation matrix. If None (default), the
official matrix from the benchmark suite will be used.
shift (array): Optional shift vector. If None (default), the official
vector from the benchmark suite will be used.
shuffle (array): Optionbal shuffle vector. If None (default), the
official permutation vector from the benchmark suite will be used.
"""
nx = len(x)
if rotation is None:
rotation = transforms.rotations[nx][17]
if shift is None:
shift = transforms.shifts[17][:nx]
if shuffle is None:
shuffle = transforms.shuffles[nx][7]
x_transformed = np.matmul(rotation, x - shift)
x_parts = _shuffle_and_partition(x_transformed, shuffle, [0.2, 0.2, 0.2, 0.2, 0.2])
y = basic.high_conditioned_elliptic(x_parts[0])
y += basic.ackley(x_parts[1])
y += basic.rastrigin(x_parts[2])
y += basic.h_g_bat(x_parts[3])
y += basic.discus(x_parts[4])
return y + 1800.0 | 5,358,259 |
def shake_256_len(data: bytes, length: int) -> hashes.MessageDigest:
"""
Convenience function to hash a message.
"""
return HashlibHash.hash(hashes.shake_256_len(length), data) | 5,358,260 |
def rgb_to_cmyk(color_values: tp.List[float]) -> tp.List[float]:
"""Converts list of RGB values to CMYK.
:param color_values: (list) 3-member RGB color value list
:return: (list) 4-member CMYK color value list
"""
if color_values == [0.0, 0.0, 0.0]:
return [0.0, 0.0, 0.0, 1.0]
r, g, b = color_values
c = 1.0 - r
m = 1.0 - g
y = 1.0 - b
min_cmy = min(c, m, y)
c = (c - min_cmy) / (1 - min_cmy)
m = (m - min_cmy) / (1 - min_cmy)
y = (y - min_cmy) / (1 - min_cmy)
k = min_cmy
return [c, m, y, k] | 5,358,261 |
def check_family(matrix):
"""Check the validity of a family matrix for the vine copula.
Parameters:
----------
matrix : array
The pair-copula families.
Returns
-------
matrix : array
The corrected matrix.
"""
# TODO: check if the families are in the list of copulas
matrix = check_matrix(matrix)
matrix = check_triangular(matrix, k=1)
dim = matrix.shape[0]
for i in range(dim):
for j in range(i):
if isinstance(matrix[i, j], str):
matrix[i, j] = int(
R_VINECOPULA.BiCopName(matrix[i, j], False)[0])
elif isinstance(matrix[i, j], np.integer):
pass
matrix = matrix.astype(int)
return matrix | 5,358,262 |
def parse_pgt_programmearray(url):
"""
Parse filter.js programmearray for pgt information
:param url: base url for timetabling system
:return: pgt programme name to id dict
"""
# get filter.js file
source = get_filterjs(url)
name_to_id = {}
# e.g. programmearray[340] [0] = "MSc Finance and Investment (Business Analytics)/F/02 - MSc Finance and
# Investment (Business Analytics)";
matches = re.findall(r'programmearray\[(\d{1,3})\] \[0\] = "(.*)";\s+'
r'programmearray\[\1\] \[1\] = ".*";\s+'
r'programmearray\[\1\] \[2\] = "(PGT/.*)"', source)
for match in matches:
# match e.g. ('0', 'MA Applied Linguistics/F/01 - EG04 Applied Linguistics', 'PGT/C1014/C7PAPLST/F/01')
name_to_id[match[1]] = match[2]
return name_to_id | 5,358,263 |
def distance_calc(x1, y1, x2, y2):
"""Calculates distance between two points"""
return ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5 | 5,358,264 |
def LoadNasaData(lat, lon, show= False, selectparms= None):
""" Execute a request from NASA API for 10 years of atmospheric data
required to prepare daily statistical data used in Solar Insolation
calculations """
cmd = formulateRequest(-0.2739, 36.3765, selectparms)
jdi = requests.get(cmd[0]).json()
cols = cmd[1]
df = pd.json_normalize(jdi['features'][0]['properties']['parameter'][cols[0]]).T
df.index = pd.to_datetime(df.index)
df.rename(columns={0: cols[0]}, inplace= True)
for c in cols[1:]:
dfc = pd.json_normalize(jdi['features'][0]['properties']['parameter'][c]).T
dfc.index = pd.to_datetime(df.index)
dfc.rename(columns={0: c}, inplace= True)
df = df.join(dfc)
df['DayofYear'] = df.index.dayofyear
df = df[df['DayofYear'] != 366] #drop a day for leap years
atmo_dict = dict()
dg = df.groupby('DayofYear')
for col in cols:
dp = pd.DataFrame(dg[col].min())
dp.rename(columns={col: 'Min'}, inplace= True)
atmo_dict[col] = dp
dp = pd.DataFrame(dg[col].max())
dp.rename(columns={col: 'Max'}, inplace= True)
atmo_dict[col] = atmo_dict[col].join(dp)
dp = pd.DataFrame(dg[col].mean())
dp.rename(columns={col: 'S-Mean'}, inplace= True)
atmo_dict[col] = atmo_dict[col].join(dp)
dp = pd.DataFrame(dg[col].std())
dp.rename(columns={col: 'STDV'}, inplace= True)
atmo_dict[col] = atmo_dict[col].join(dp)
return atmo_dict | 5,358,265 |
def mmyy_date_slicer(date_str):
"""Return start and end point for given date in mm-yy format.
:param date_str: date in mmyy format, i.e. "1222" or "0108".
:return: start and end date string for a given mmyy formatted date string
"""
# Initialize output
start = ""
end = ""
if mmyy_valid_date(date_str):
today = date.today()
# Check if date is in the future
dt_check = datetime.strptime(date_str, "%m%y")
if dt_check.date() <= today:
# Determine the start date string
datetime_object = datetime.strptime(date_str[0:2], "%m")
mo = datetime_object.strftime("%b")
yyyy = f"20{date_str[2:]}"
start = f'1 {mo}, {yyyy}'
# Determine the end date string.
mm = int(date_str[0:2])
if mm == today.month:
pass
elif mm == 12:
end = f"1 Jan, {int(yyyy)+1}"
else:
mm1 = int(date_str[0:2]) + 1
datetime_object = datetime.strptime(f"{mm1}", "%m")
mo1 = datetime_object.strftime("%b")
end = f'1 {mo1}, {yyyy}'
else:
# print(f'date in the future! > {date_str}')
return "", ""
else:
# print(f'date malformed! > {date_str}')
return "", ""
return start, end | 5,358,266 |
def constant_arg(name: str):
"""
Promises that the given arg will not be modified
Only affects mutable data types
Removes the need to copy the data during inlining
"""
def annotation(target: typing.Callable):
optimiser = _schedule_optimisation(target)
optimiser.constant_args.add(name)
return target
return annotation | 5,358,267 |
def place_connection(body): # noqa: E501
"""Place an connection request from the SDX-Controller
# noqa: E501
:param body: order placed for creating a connection
:type body: dict | bytes
:rtype: Connection
"""
if connexion.request.is_json:
body = Connection.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!' | 5,358,268 |
def tag_list(request):
"""展示所有标签"""
return render(request, 'admin/tags_list.html') | 5,358,269 |
def scan_url(urls):
"""
Scan the url using the API
Args:
urls:
the list of urls
Returns:
A tuple of a bool indicating if all the urls are safe and a list indicating
the safeness of individual urls
"""
is_safe = True
safe_list = [True] * len(urls)
safe_browsing_url = "https://safebrowsing.googleapis.com/v4/threatMatches:find"
params = {"key": GOOGLE_TOKEN}
json = {
"threatInfo": {
"threatTypes": [
"THREAT_TYPE_UNSPECIFIED",
"MALWARE",
"SOCIAL_ENGINEERING",
"UNWANTED_SOFTWARE",
"POTENTIALLY_HARMFUL_APPLICATION",
],
"platformTypes": ["ANY_PLATFORM"],
"threatEntryTypes": ["URL"],
"threatEntries": [{"url": url} for url in urls],
}
}
r = requests.post(safe_browsing_url, params=params, json=json)
if r.status_code == 200:
results = r.json()
if "matches" in results and results["matches"]:
is_safe = False
matches = results["matches"]
urls_dict = {k: v for v, k in enumerate(urls)}
for match in matches:
safe_list[urls_dict[match["threat"]["url"]]] = False
return is_safe, safe_list | 5,358,270 |
def test_chain_rewrite_save_one_before_last():
"""Take chain of length 5, save first node."""
tf.reset_default_graph()
tf_dev = tf.device('/cpu:0')
tf_dev.__enter__()
n = 5
a0, a1, a2, a3, a4 = make_chain_tanh_constant(n)
grad = memory_saving_gradients.gradients([a4], [a0], checkpoints=[a2])[0]
expected_peak = (n+1-2)*10**6
sess = create_session()
sessrun(tf.global_variables_initializer())
sessrun(grad.op)
peak_memory = cpu_peak()
util.report_memory(peak_memory, expected_peak)
if not REMOVE_ASSERTS:
assert (peak_memory - expected_peak) < 1.1e6, "Difference too large." | 5,358,271 |
def head_to_tree(head, len_, prune, subj_pos, obj_pos):
"""
Convert a sequence of head indexes into a tree object.
"""
head = head[:len_].tolist()
root = None
if prune < 0:
nodes = [Tree() for _ in head]
for i in range(len(nodes)):
h = head[i]
nodes[i].idx = i
nodes[i].dist = -1 # just a filler
if h == 0:
root = nodes[i]
else:
nodes[h-1].add_child(nodes[i])
else:
# find dependency path
subj_pos = [i for i in range(len_) if subj_pos[i] == 0]
obj_pos = [i for i in range(len_) if obj_pos[i] == 0]
cas = None
subj_ancestors = set(subj_pos)
for s in subj_pos:
h = head[s]
tmp = [s]
while h > 0:
tmp += [h-1]
subj_ancestors.add(h-1)
h = head[h-1]
if cas is None:
cas = set(tmp)
else:
cas.intersection_update(tmp)
obj_ancestors = set(obj_pos)
for o in obj_pos:
h = head[o]
tmp = [o]
while h > 0:
tmp += [h-1]
obj_ancestors.add(h-1)
h = head[h-1]
cas.intersection_update(tmp)
# find lowest common ancestor
if len(cas) == 1:
lca = list(cas)[0]
else:
child_count = {k: 0 for k in cas}
for ca in cas:
if head[ca] > 0 and head[ca] - 1 in cas:
child_count[head[ca] - 1] += 1
# the LCA has no child in the CA set
for ca in cas:
if child_count[ca] == 0:
lca = ca
break
path_nodes = subj_ancestors.union(obj_ancestors).difference(cas)
path_nodes.add(lca)
# compute distance to path_nodes
dist = [-1 if i not in path_nodes else 0 for i in range(len_)]
for i in range(len_):
if dist[i] < 0:
stack = [i]
while stack[-1] >= 0 and stack[-1] not in path_nodes:
stack.append(head[stack[-1]] - 1)
if stack[-1] in path_nodes:
for d, j in enumerate(reversed(stack)):
dist[j] = d
else:
for j in stack:
if j >= 0 and dist[j] < 0:
dist[j] = int(1e4) # aka infinity
highest_node = lca
nodes = [Tree() if dist[i] <= prune else None for i in range(len_)]
for i in range(len(nodes)):
if nodes[i] is None:
continue
h = head[i]
nodes[i].idx = i
nodes[i].dist = dist[i]
if h > 0 and i != highest_node:
assert nodes[h-1] is not None
nodes[h-1].add_child(nodes[i])
root = nodes[highest_node]
assert root is not None
return root | 5,358,272 |
def capped_subtraction(x, y):
"""Saturated arithmetics. Returns x - y truncated to the int64_t range."""
assert_is_int64(x)
assert_is_int64(y)
if y == 0:
return x
if x == y:
if x == INT_MAX or x == INT_MIN:
raise OverflowError(
'Integer NaN: subtracting INT_MAX or INT_MIN to itself')
return 0
if x == INT_MAX or x == INT_MIN:
return x
if y == INT_MAX:
return INT_MIN
if y == INT_MIN:
return INT_MAX
return to_capped_int64(x - y) | 5,358,273 |
def plotInterestPoint(ax):
"""
"""
from pandas import read_excel
filepath="../data/points.xls"
points=read_excel(filepath, sheet_name="Map3" )
for lon, lat, cname, h_alig in zip(points.lon, points.lat, points.name, points.h_alig): #range(len(cities_name)):
ax.plot(lon, lat, label=cname, marker='o', transform=ccrs.Geodetic()) #, c='b'
ax.text(lon, lat, cname, fontsize=8, horizontalalignment=h_alig, transform=ccrs.Geodetic())
return | 5,358,274 |
def evaluate_points(func, begin, total_samps, var_list, attr):
"""
Inputs: func- the lambda function used to generate the data from the
evaluation vector
begin- the index to start at in the `attr` array
total_samps- the total number of samples to generate
var_list- list of the variables
attr- the attribute that holds the values to be used in the
evaluation vector
Identical to evaluate_points_verbose, but doesn't check for a verbose
option every iteration. This version also deals with indexing only part of
eval_vect.
"""
var_count = len(var_list)
term_count = func(np.zeros(var_count)).shape
if len(term_count) > 0:
term_count = term_count[1] # len(func(np.zeros(var_count)))
else:
term_count = 1
eval_vect = np.zeros([total_samps, var_count])
matrix = np.zeros([total_samps, term_count])
end = begin + total_samps
for j in range(var_count):
attr_arr = getattr(var_list[j], attr)
eval_vect[:, j] = attr_arr[begin:end].T
for i in range(total_samps):
matrix[i, :] = func(eval_vect[i, :])
return matrix | 5,358,275 |
def get_username():
"""Return username
Return a useful username even if we are running under HT-Condor.
Returns
-------
str : username
"""
batch_system = os.environ.get('BATCH_SYSTEM')
if batch_system == 'HTCondor':
return os.environ.get('USER', '*Unknown user*')
return os.getlogin() | 5,358,276 |
def corresponding_chromaticities_prediction_CIE1994(experiment=1):
"""
Returns the corresponding chromaticities prediction for *CIE 1994*
chromatic adaptation model.
Parameters
----------
experiment : integer or CorrespondingColourDataset, optional
{1, 2, 3, 4, 6, 8, 9, 11, 12}
*Breneman (1987)* experiment number or
:class:`colour.CorrespondingColourDataset` class instance. Returns
-------
tuple
Corresponding chromaticities prediction.
References
----------
:cite:`Breneman1987b`, :cite:`CIETC1-321994b`
Examples
--------
>>> from pprint import pprint
>>> pr = corresponding_chromaticities_prediction_CIE1994(2)
>>> pr = [(p.uv_m, p.uv_p) for p in pr]
>>> pprint(pr) # doctest: +ELLIPSIS
[(array([ 0.207, 0.486]), array([ 0.2273130..., 0.5267609...])),
(array([ 0.449, 0.511]), array([ 0.4612181..., 0.5191849...])),
(array([ 0.263, 0.505]), array([ 0.2872404..., 0.5306938...])),
(array([ 0.322, 0.545]), array([ 0.3489822..., 0.5454398...])),
(array([ 0.316, 0.537]), array([ 0.3371612..., 0.5421567...])),
(array([ 0.265, 0.553]), array([ 0.2889416..., 0.5534074...])),
(array([ 0.221, 0.538]), array([ 0.2412195..., 0.5464301...])),
(array([ 0.135, 0.532]), array([ 0.1530344..., 0.5488239...])),
(array([ 0.145, 0.472]), array([ 0.1568709..., 0.5258835...])),
(array([ 0.163, 0.331]), array([ 0.1499762..., 0.4401747...])),
(array([ 0.176, 0.431]), array([ 0.1876711..., 0.5039627...])),
(array([ 0.244, 0.349]), array([ 0.2560012..., 0.4546263...]))]
"""
experiment_results = (convert_experiment_results_Breneman1987(experiment)
if is_numeric(experiment) else experiment)
with domain_range_scale(1):
XYZ_t, XYZ_r = experiment_results.XYZ_t, experiment_results.XYZ_r
xy_o1, xy_o2 = XYZ_to_xy([XYZ_t, XYZ_r])
uv_t = Luv_to_uv(XYZ_to_Luv(experiment_results.XYZ_ct, xy_o1), xy_o1)
uv_m = Luv_to_uv(XYZ_to_Luv(experiment_results.XYZ_cr, xy_o2), xy_o2)
Y_r = experiment_results.B_r
E_o1, E_o2 = experiment_results.Y_t, experiment_results.Y_r
XYZ_1 = experiment_results.XYZ_ct
XYZ_2 = chromatic_adaptation_CIE1994(XYZ_1, xy_o1, xy_o2, Y_r, E_o1,
E_o2)
uv_p = Luv_to_uv(XYZ_to_Luv(XYZ_2, xy_o2), xy_o2)
return tuple([
CorrespondingChromaticitiesPrediction(experiment_results.name,
uv_t[i], uv_m[i], uv_p[i])
for i in range(len(uv_t))
]) | 5,358,277 |
def interpExtrap(x, xp, yp):
"""numpy.interp interpolation function extended by linear extrapolation."""
y = np.interp(x, xp, yp)
y = np.where(x < xp[0], yp[0]+(x-xp[0])*(yp[0]-yp[1])/(xp[0]-xp[1]), y)
return np.where(x > xp[-1], yp[-1]+(x-xp[-1])*(yp[-1]-yp[-2]) /
(xp[-1]-xp[-2]), y) | 5,358,278 |
def get_operating():
"""Get latest operating budgets from shared drive."""
logging.info('Retrieving latest operating budget')
command = "smbclient //ad.sannet.gov/dfs " \
+ "--user={adname}%{adpass} -W ad -c " \
+ "'prompt OFF;"\
+ " cd \"FMGT-Shared/Shared/BUDGET/" \
+ "Open Data/Open Data Portal/" \
+ "Shared with Performance and Analytics/" \
+ "Budget/Operating/\";" \
+ " lcd \"/data/temp/\";" \
+ " mget FY*BUDGET.xlsx;'"
command = command.format(adname=conf['alb_sannet_user'],
adpass=conf['alb_sannet_pass'],
temp_dir=conf['temp_data_dir'])
logging.info(command)
try:
p = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)
return p
except subprocess.CalledProcessError as e:
return e.output | 5,358,279 |
def test_parameter_creation(mock_settings_env_vars) -> None: # type: ignore
"""Test the creation of parameters to be used in settings."""
aws_parameter = AwsParameterStore(name="Setting", check_settings=True)
assert aws_parameter.name == "Setting"
with mock.patch.dict(os.environ, {"SETTINGS_NAME": "", "SETTINGS_ENVIRONMENT": ""}):
with pytest.raises(ValueError):
AwsParameterStore(name="Setting", check_settings=True)
aws_parameter_2 = AwsParameterStore(name="Setting")
assert aws_parameter_2.name == "Setting"
assert aws_parameter_2.location == "/environments/Setting"
aws_parameter_3 = AwsParameterStore(name="Setting", location="/def")
assert aws_parameter_3.location == "/def"
aws_parameter_4 = AwsParameterStore(name="Setting", location_path="/def")
assert aws_parameter_4.location_path == "/def"
assert aws_parameter_4.location == "/def/Setting" | 5,358,280 |
def removeNodesFromMRMLScene(nodesToRemove):
"""Removes the input nodes from the scene. Nodes will no longer be accessible from the mrmlScene or from the UI.
Parameters
----------
nodesToRemove: List[vtkMRMLNode] or vtkMRMLNode
Objects to remove from the scene
"""
for node in nodesToRemove:
removeNodeFromMRMLScene(node) | 5,358,281 |
def get_nome_socio(id):
"""pega o nome de um livro pelo id."""
if request.method == 'GET':
try:
socio = db.query_bd('select * from socio where id = "%s"' % id)
if socio:
print(socio)
socio = socio[0]
print(socio)
content = {
'nome': socio['nome'],
'status': socio['status']
}
return json.dumps(content)
except Exception as e:
print(e)
return render_template('404.html') | 5,358,282 |
def processDeps(element: etree.Element, params: dict = {}) -> None:
"""Function to NAF deps layer to RDF
Args:
element: element containing the deps layer
params: dict of params to store results
Returns:
None
"""
output = params["out"]
for dep in element:
if dep.tag == "dep":
# depname = genDepName(params)
# output.write(" xl:type naf-base:dep ;\n")
rfunc = dep.attrib["rfunc"]
to_term = dep.attrib["to_term"]
from_term = dep.attrib["from_term"]
output.write(
"_:" + from_term + " " + "naf-rfunc:" + rfunc + " _:" + to_term + "\n"
)
# for key in dep.attrib.keys():
# if (key != "id"):
# if key == "rfunc":
# output.write(" naf-base:"+attrib2pred(key)+' naf-base:'+dep.attrib[key]+' ;\n')
# else:
# output.write(" naf-base:"+attrib2pred(key)+' _:'+dep.attrib[key]+' ;\n')
output.write(" .\n")
return None | 5,358,283 |
def fit_solution_matrix(weights, design_matrix, cache=None, hash_decimal=10, fit_mat_key=None):
"""
Calculate the linear least squares solution matrix
from a design matrix, A and a weights matrix W
S = [A^T W A]^{-1} A^T W
Parameters
----------
weights: array-like
ndata x ndata matrix of data weights
design_matrx: array-like
ndata x n_fit_params matrix transforming fit_parameters to data
cache: optional dictionary
optional dictionary storing pre-computed fitting matrix.
hash_decimal: int optional
the number of decimals to use in hash for caching. default is 10
fit_mat_key: optional hashable variable
optional key. If none is used, hash fit matrix against design and
weighting matrix.
Returns
-----------
array-like
n_fit_params x n_fit_params matrix
S = [A^T W A]^{-1} A ^T W
"""
if cache is None:
cache = {}
ndata = weights.shape[0]
if not weights.shape[0] == weights.shape[1]:
raise ValueError("weights must be a square matrix")
if not design_matrix.shape[0] == ndata:
raise ValueError("weights matrix incompatible with design_matrix!")
if fit_mat_key is None:
opkey = ('fitting_matrix',) + tuple(np.round(weights.flatten(), hash_decimal))\
+tuple(np.round(design_matrix.flatten(), hash_decimal))
else:
opkey = fit_mat_key
if not opkey in cache:
#check condition number
cmat = np.conj(design_matrix.T) @ weights @ design_matrix
#should there be a conjugation!?!
if np.linalg.cond(cmat)>=1e9:
warn('Warning!!!!: Poorly conditioned matrix! Your linear inpainting IS WRONG!')
cache[opkey] = np.linalg.pinv(cmat) @ np.conj(design_matrix.T) @ weights
else:
try:
cache[opkey] = np.linalg.inv(cmat) @ np.conj(design_matrix.T) @ weights
except np.linalg.LinAlgError as error:
print(error)
cache[opkey] = None
return cache[opkey] | 5,358,284 |
def version_from(schema_path, document_path):
"""HACK A DID ACK derives non-default 1.1 version from path."""
LOG.debug("xml version derivation flat inspection schema_path=%s", schema_path)
if CRVF_PRE_OASIS_SEMANTIC_VERSION in str(schema_path):
return CRVF_PRE_OASIS_SEMANTIC_VERSION
if CRVF_DEFAULT_SEMANTIC_VERSION in str(schema_path):
return CRVF_DEFAULT_SEMANTIC_VERSION
LOG.debug("xml version derivation deep call document_path=%s", document_path)
return version_peek(document_path) | 5,358,285 |
def nessus_vuln_check(request):
"""
Get the detailed vulnerability information.
:param request:
:return:
"""
if request.method == 'GET':
id_vul = request.GET['vuln_id']
else:
id_vul = ''
vul_dat = nessus_report_db.objects.filter(vul_id=id_vul)
return render(request, 'nessus_vuln_data.html', {'vul_dat': vul_dat}) | 5,358,286 |
def wait_for_job_completion(namespace, timeout, error_msg):
"""
This is a WORKAROUND of particular ocsci design choices: I just wait
for one pod in the namespace, and then ask for the pod again to get
it's name (but it would be much better to just wait for the job to
finish instead, then ask for a name of the successful pod and use it
to get logs ...)
Returns:
str: name of Pod resource of the finished job
"""
ocp_pod = ocp.OCP(kind="Pod", namespace=namespace)
try:
ocp_pod.wait_for_resource(
resource_count=1,
condition=constants.STATUS_COMPLETED,
timeout=timeout,
sleep=30)
except TimeoutExpiredError as ex:
# report some high level error as well
logger.error(error_msg)
# TODO: log both describe and the output from the fio pods, as DEBUG
ex.message = error_msg
raise(ex)
# indentify pod of the completed job
pod_data = ocp_pod.get()
# explicit list of assumptions, if these assumptions are not met, the
# code won't work and it either means that something went terrible
# wrong or that the code needs to be changed
assert pod_data['kind'] == "List"
pod_dict = pod_data['items'][0]
assert pod_dict['kind'] == "Pod"
pod_name = pod_dict['metadata']['name']
logger.info(f"Identified pod name of the finished Job: {pod_name}")
pod_name = pod_dict['metadata']['name']
return pod_name | 5,358,287 |
def delete_data(data, object_name, **kwargs):
"""
Delete data
"""
data.delete()
is_queryset = isinstance(data, QuerySet)
return {
"is_queryset": is_queryset,
"data": data,
"object_name": object_name,
} | 5,358,288 |
def test_create_box(client, default_qr_code):
"""Verify base GraphQL query endpoint"""
box_creation_input_string = f"""{{
product_id: 1,
items: 9999,
location_id: 100000005,
comments: "",
size_id: 1,
qr_barcode: "{default_qr_code["code"]}",
created_by: "1"
}}"""
# TODO: add location, product and qr to the responses for this
gql_mutation_string = f"""mutation {{
createBox(
box_creation_input : {box_creation_input_string}
) {{
id
items
}}
}}"""
data = {"query": gql_mutation_string}
response_data = client.post("/graphql", json=data)
# TODO: fix this test
created_box = response_data.json["data"]["createBox"]
assert response_data.status_code == 200
assert created_box["items"] == 9999 | 5,358,289 |
def get_g(source):
""" Read the Graph from a textfile """
G = {}
Grev = {}
for i in range(1, N+1):
G[i] = []
Grev[i] = []
fin = open(source)
for line in fin:
v1 = int(line.split()[0])
v2 = int(line.split()[1])
G[v1].append(v2)
Grev[v2].append(v1)
fin.close()
return G, Grev | 5,358,290 |
def _mercator(lat_long):
"""
Calculate the 2D X and Y coordinates from a set of coordinates based on radius, latitude and longitude using the
Mercator projection.
:param lat_long: The coordinates of the points to be projected expressed as radius, latitude and longitude.
:type lat_long: list[tuple]
:return: The projected coordinates in the XY-plane.
:rtype: ndarray
"""
x = np.array([coord[0] * coord[2] for coord in lat_long])
y = np.array([coord[0] * np.log(np.tan(np.pi / 4 + coord[1] / 2))
for coord in lat_long])
return np.vstack((x, y)).T | 5,358,291 |
def subjectForm(request, experiment_id):
"""
Generates the fourth page, the demographic/participant data form of an experiment.
"""
experiment = get_object_or_404(Experiment, pk=experiment_id)
form = SubjectDataForm(experiment=experiment)
t = Template(experiment.demographic_data_page_tpl)
c = RequestContext(request, {'subject_data_form': form, 'experiment': experiment, 'recaptcha_site_key': settings.GOOGLE_RECAPTCHA_SITE_KEY})
return HttpResponse(t.render(c)) | 5,358,292 |
def sigma_0(x):
"""First rotational + shifting mixing function
σ_256_0(x) = ROTR_7(x) ⊕ ROTR_18(x) ⊕ SHR_3(x)
"""
return ROTR(x, 7) ^ ROTR(x, 18) ^ SHR(x, 3) | 5,358,293 |
def mat_mult(a=[1.0, 0.0], b=[1.0, 0.0], n=3, result=[0.0, 0.0]):
"""Multiply two square matrices (not element-wise).
Stores the result in `result`.
Parameters
----------
a: list
b: list
n: int : number of rows/columns
result: list
"""
i, j, k = declare('int', 3)
for i in range(n):
for k in range(n):
s = 0.0
for j in range(n):
s += a[n*i + j] * b[n*j + k]
result[n*i + k] = s | 5,358,294 |
def edit_entry(edit_result):
"""Edit entry"""
new_entry = edit_result.copy()
edit_key = None
edit_value = None
date_keys = ["Date"]
int_keys = ["Time Spent"]
while edit_key not in edit_result:
reset_screen("key", "Please type the key you want to edit.")
for key, value in edit_result.items():
print(f"{key}: {value}")
edit_key = get_input(str)
if edit_key not in edit_result:
reset_screen(error=True, sub_title="Input is not a valid key.")
if edit_key in date_keys:
input_type = datetime
elif edit_key in int_keys:
input_type = int
else:
input_type = str
while not edit_value:
reset_screen("new value", ENTRY_QUESTIONS[edit_key])
edit_value = get_input(input_type, newline=False)
new_entry[edit_key] = edit_value
entries = get_entries()
entries[entries.index(edit_result)] = new_entry
csvfile = open("entries.csv", "w")
csvfile.close()
for entry in entries:
write_to_csv(entry)
return new_entry | 5,358,295 |
def build_input_files(filename, base_path = 'input_files', out = sys.stdout):
"""
build_input_files(filename, base_path = 'input_files')
takes a 'well-formated' input fileand outputs a
directory structure with the properly formated input files
created in them.
"""
calling_dir = os.getcwd()
# I'm doing this because I need it later
file_path, file_name = os.path.split(filename)
with open(filename, 'r') as f:
txt = f.read()
## First Parse the FDS file
param_dict, IOoutput = FDSa_parser(txt, file_name, out)
# param_dict, sweep_param_dict, prms_in_axis = calculate_params(param_dict, axes)
for key_ in param_dict.keys():
txt = txt.replace(param_dict[key_][0], key_)
formatted_trials, logfile, IOoutput = eval_parsed_FDS(param_dict, out)
print("formatted_trials", formatted_trials[0])
## Make input files and directories
for i, value_set in enumerate(formatted_trials):
print(i,value_set)
tmp_txt = txt
# make a directory
case_name = 'case_'+int2base(i, 26)
# FDS uses uppercase reseved keywords, and so will we
value_set['TITLE'] = case_name
input_directory_builder(case_name, base_path)
# populate the input file
print(tmp_txt.count(list(value_set.keys())[1]))
print(value_set)
with open('tmp_txt', 'w') as f:
f.write(str(tmp_txt))
tmp_txt = tmp_txt.format(**value_set) ## The format command doesn't like : or . because it things its a float format
# create the file name
fname = os.path.join(calling_dir, base_path,
case_name, case_name + '.fds')
# write the input file to the directory
with open(fname, 'w') as f:
f.write(str(tmp_txt))
log_path_name = os.path.join(calling_dir, base_path, file_name[:-4] + '.log')
# write the augmented fds log file
with open(log_path_name, 'a') as f:
f.write(logfile)
return IOoutput | 5,358,296 |
def srpd(mvec, k, ra, Nmax, w, V):
"""
Calculate the Steered Response Power Density (SRPD)
:param mvec: SHD coefficients for the TF bin to be analysed
:param k: Wave number (2*pi*f/c)
:param ra: Radius of the microphone array
:param Nmax: Maximum SHD order to be used
:param w: Diagonal eigenvalue matrix
:param V: Reduced eigenvector matrix
:return: SRPD for the given pixel
"""
assert np.size(mvec) == (Nmax + 1) ** 2
V = V[0:(Nmax + 1) ** 2, 0:(Nmax + 1) ** 2]
w = w[0:(Nmax + 1) ** 2]
kra = k * ra
jn, jnp, yn, ynp = sph_jnyn(Nmax, kra)
# jn, jnp, yn, ynp = spec.sph_jnyn(Nmax, kra)
hn = jn - 1j * yn
hnp = jnp - 1j * ynp
bnkra = jn - (jnp / hnp) * hn
b = []
for n in range(Nmax + 1):
for count in range(-n, n + 1):
b.append(1 / (4 * np.pi * (1j) ** n * bnkra[n]))
b = np.array(b)
p = b * mvec
B0 = np.conj(np.matrix(np.conj(p)) * V).T
B0s = np.diag(w) * np.multiply(B0, np.conj(B0))
srpval = B0s.sum()
return srpval | 5,358,297 |
def batch_inverse(tensor):
"""
Compute the matrix inverse of a batch of square matrices. This routine is used for removing rotational motion
during the molecular dynamics simulation. Taken from https://stackoverflow.com/questions/46595157
Args:
tensor (torch.Tensor): Tensor of square matrices with the shape n_batch x dim1 x dim1
Returns:
torch.Tensor: Tensor of the inverted square matrices with the same shape as the input tensor.
"""
eye = tensor.new_ones(tensor.size(-1), device=tensor.device).diag().expand_as(tensor)
tensor_inv, _ = torch.gesv(eye, tensor)
return tensor_inv | 5,358,298 |
def create(**kwds):
"""
Add data.
"""
status_code = 200
message = "Successfully added data."
articles = []
for a in kwds.get("articles", []):
a = Article.query.filter_by(id=a).first()
if a:
articles.append(a)
cols = {"user_id": current_user.id, "name": kwds["name"]}
model = Bookmark.query.filter_by(**cols).first()
if model:
for a in articles:
exist = model.articles.filter_by(id=a.id).first()
if not exist:
model.articles.append(a)
db_commit()
else:
cols["articles"] = articles
model = Bookmark(**cols)
db_add(model)
return {"code": status_code, "message": message} | 5,358,299 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.