content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def promote(lhs, rhs, promote_option=True):
"""Promote two scalar dshapes to a possibly larger, but compatible type.
Examples
--------
>>> from datashape import int32, int64, Option
>>> x = Option(int32)
>>> y = int64
>>> promote(x, y)
Option(ty=ctype("int64"))
>>> promote(int64, int64)
ctype("int64")
Don't promote to option types.
>>> promote(x, y, promote_option=False)
ctype("int64")
Notes
----
This uses ``numpy.result_type`` for type promotion logic. See the numpy
documentation at
http://docs.scipy.org/doc/numpy/reference/generated/numpy.result_type.html
"""
if lhs == rhs:
return lhs
else:
left, right = getattr(lhs, 'ty', lhs), getattr(rhs, 'ty', rhs)
dtype = datashape.CType.from_numpy_dtype(
np.result_type(
datashape.to_numpy_dtype(left),
datashape.to_numpy_dtype(right),
),
)
if promote_option:
dtype = optionify(lhs, rhs, dtype)
return dtype | 300 |
def calc_derFreq(in_tped, out_derFreq_tsv):
"""Calculate the derived allele frequency for each SNP in one population"""
with open(in_tped) as tped, open(out_derFreq_tsv, 'w') as out:
out.write('\t'.join(['chrom', 'snpId', 'pos', 'derFreq']) + '\n')
for line in tped:
chrom, snpId, genPos_cm, physPos_bp, alleles = line.strip().split(maxsplit=4)
n = [alleles.count(i) for i in ('0', '1')]
derFreq = n[0] / (n[0] + n[1])
out.write('\t'.join([chrom, snpId, physPos_bp, f'{derFreq:.2f}']) + '\n') | 301 |
def validate_esc(esc):
"""Validate esc options\n
Give an error if the characters aren't '*?[]'
"""
esc = esc.replace("]", "[")
argset = set(esc)
charset = {"*", "?", "["}
if argset.difference(charset):
err = "input character is not '*?[]'"
raise argparse.ArgumentTypeError(err)
return "".join(argset) | 302 |
def reset():
"""Reset the radio device"""
#extern void radio_reset(void);
radio_reset_fn() | 303 |
def test_profile_valid(resource_type):
"""Resource types are valid."""
assert resource_type == mapbox.Analytics(
access_token='pk.test')._validate_resource_type(resource_type) | 304 |
def calc_amp_pop(eigenvecs, wave_func, nstates):
"""Calculates amplitudes and population from wave function, eigenvectors"""
pop = np.zeros(nstates)
amp = np.zeros((nstates), dtype=np.complex128)
for j in range(nstates):
amp[j] = np.dot(eigenvecs[:, j], wave_func)
pop[j] = np.real(bra_ket(amp[j], amp[j]))
return amp, pop | 305 |
def reflect(cls, *args, **kwargs):
"""
Construct a funsor, populate ``._ast_values``, and cons hash.
This is the only interpretation allowed to construct funsors.
"""
if len(args) > len(cls._ast_fields):
# handle varargs
new_args = tuple(args[:len(cls._ast_fields) - 1]) + (args[len(cls._ast_fields) - 1 - len(args):],)
assert len(new_args) == len(cls._ast_fields)
_, args = args, new_args
# JAX DeviceArray has .__hash__ method but raise the unhashable error there.
cache_key = tuple(id(arg) if type(arg).__name__ == "DeviceArray" or not isinstance(arg, Hashable)
else arg for arg in args)
if cache_key in cls._cons_cache:
return cls._cons_cache[cache_key]
arg_types = tuple(typing.Tuple[tuple(map(type, arg))]
if (type(arg) is tuple and all(isinstance(a, Funsor) for a in arg))
else typing.Tuple if (type(arg) is tuple and not arg)
else type(arg) for arg in args)
cls_specific = (cls.__origin__ if cls.__args__ else cls)[arg_types]
result = super(FunsorMeta, cls_specific).__call__(*args)
result._ast_values = args
# alpha-convert eagerly upon binding any variable
result = _alpha_mangle(result)
cls._cons_cache[cache_key] = result
return result | 306 |
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description='Semantic Segmentation')
# Data parameters.
parser.add_argument('--batch_size', type=int, default=1,
help='Number of images in one step.')
parser.add_argument('--use_lemniscate', type=str, default='',
help='Path to lemniscate embeddings.')
parser.add_argument('--data_dir', type=str, default='',
help='/path/to/dataset/.')
parser.add_argument('--input_size', type=str, default='336,336',
help='Comma-separated string with H and W of image.')
parser.add_argument('--random_seed', type=int, default=1234,
help='Random seed to have reproducible results.')
parser.add_argument('--num_gpu', type=int, default=2,
help='Number of gpus for training.')
# Training paramters.
parser.add_argument('--is_training', action='store_true',
help='Whether to updates weights.')
parser.add_argument('--use_global_status', action='store_true',
help='Whether to updates moving mean and variance.')
parser.add_argument('--learning_rate', type=float, default=2.5e-4,
help='Base learning rate.')
parser.add_argument('--momentum', type=float, default=0.9,
help='Momentum component of the optimiser.')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Regularisation parameter for L2-loss.')
parser.add_argument('--num_classes', type=int, default=1000,
help='Number of classes to predict.')
parser.add_argument('--num_epochs', type=int, default=300,
help='Number of training steps.')
# parser.add_argument('--iter_size', type=int, default=10,
# help='Number of iteration to update weights')
parser.add_argument('--random_mirror', action='store_true',
help='Whether to randomly mirror the inputs.')
parser.add_argument('--random_crop', action='store_true',
help='Whether to randomly crop the inputs.')
parser.add_argument('--random_scale', action='store_true',
help='Whether to randomly scale the inputs.')
parser.add_argument('--num_loading_workers', type=int, default=10,
help='Number of workers to load imagenet.')
parser.add_argument('--schedule', type=int, nargs='+', default=[40],
help='Decrease learning rate at these epochs.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
parser.add_argument('--power', type=float, default=0.6,
help='Decay for poly learing rate policy.')
parser.add_argument('--decay', type=float, default=0.4,
help='Decay for exponential learing rate policy.')
parser.add_argument('--use_embed_preloaded', type=str, default="",
help='Path to preloaded numpy embeddings as torch tensor.')
# SegSort parameters.
parser.add_argument('--embedding_dim', type=int, default=32,
help='Dimension of the feature embeddings.')
# Misc paramters.
parser.add_argument('--restore_from', type=str, default='',
help='Where restore checkpoint/model parameters from.')
parser.add_argument('--save_pred_every', type=int, default=10000,
help='Save summaries and checkpoint every often.')
parser.add_argument('--update_tb_every', type=int, default=20,
help='Update summaries every often.')
parser.add_argument('--snapshot_dir', type=str, default='',
help='Where to save snapshots of the model.')
parser.add_argument('--not_restore_classifier', action='store_true',
help='Whether to not restore classifier layers.')
return parser.parse_args() | 307 |
def make_strictly_feasible(x, lb, ub, rstep=1e-10):
"""Shift a point to the interior of a feasible region.
Each element of the returned vector is at least at a relative distance
`rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used.
"""
x_new = x.clone()
active = find_active_constraints(x, lb, ub, rstep)
lower_mask = torch.eq(active, -1)
upper_mask = torch.eq(active, 1)
if rstep == 0:
torch.nextafter(lb[lower_mask], ub[lower_mask], out=x_new[lower_mask])
torch.nextafter(ub[upper_mask], lb[upper_mask], out=x_new[upper_mask])
else:
x_new[lower_mask] = lb[lower_mask].add(lb[lower_mask].abs().clamp(1,None), alpha=rstep)
x_new[upper_mask] = ub[upper_mask].sub(ub[upper_mask].abs().clamp(1,None), alpha=rstep)
tight_bounds = (x_new < lb) | (x_new > ub)
x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds])
return x_new | 308 |
def main():
""" General test method """
from . import spectra as sp
p_dict = {'Bfield':700,'rb85frac':1,'Btheta':88*np.pi/180,'Bphi':0*np.pi/180,'lcell':75e-3,'T':84,'Dline':'D2','Elem':'Cs'}
chiL,chiR,chiZ = sp.calc_chi(np.linspace(-3500,3500,10),p_dict)
#print 'ez: ',chiZ + 1 # ez / e0
#print 'ex: ',0.5*(2+chiL+chiR) # ex / e0
#print 'exy: ',0.5j*(chiR-chiL) # exy / e0
RotMat, n1, n2 = solve_diel(chiL,chiR,chiZ,88*np.pi/180)
print((RotMat.shape)) | 309 |
def SearchPageGenerator(query, step=None, total=None, namespaces=None, site=None):
"""
Provides a list of results using the internal MediaWiki search engine
"""
if site is None:
site = pywikibot.Site()
for page in site.search(query, step=step, total=total, namespaces=namespaces):
yield page | 310 |
def add_emails(request):
"""
Args:
request: Http Request (ignored in this function)
Returns: Add operation status wrapped on response's object
"""
error_messages = []
success_messages = []
status = HTTP_200_OK
success, message = queries.add_emails(request.data)
if success:
success_messages.append(message)
else:
error_messages.append(message)
status = HTTP_403_FORBIDDEN
return create_response(error_messages=error_messages, success_messages=success_messages, status=status) | 311 |
def deposit(amount, account):
"""Deposit STEEM to market in exchange for STEEMP."""
stm = shared_blockchain_instance()
if stm.rpc is not None:
stm.rpc.rpcconnect()
if not stm.is_steem:
print("Please set a Steem node")
return
if not account:
account = stm.config["default_account"]
if not unlock_wallet(stm):
return
market = Market(steem_instance=stm)
tx = market.deposit(account, amount)
tx = json.dumps(tx, indent=4)
print(tx) | 312 |
def tanD(angle):
"""
angle est la mesure d'un angle en degrés
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Retourne la tangente de angle.
"""
return math.tan(math.radians(angle)) | 313 |
def test_complex_df(complex_dataframe):
"""
Get a dataframe from a complex mapped dataframe
"""
df = complex_dataframe
mapper = DataFrameMapper(
[('target', None), ('feat1', None), ('feat2', None)],
df_out=True)
transformed = mapper.fit_transform(df)
assert len(transformed) == len(complex_dataframe)
for c in df.columns:
assert len(transformed[c]) == len(df[c]) | 314 |
def main(verbose: bool = False, log_path: Optional[str] = None):
"""Launches Noteserver.
Noteserver is a LSP server that works with most editors in order to help make
taking notes easier! This program expects to receive LSP RPCs from stdin and
will produce LSP RPCs to stdout.
Args:
verbose: Include for additional logging.
log_path: Set to write debug logs to a file.
"""
logging.basicConfig(filename=log_path,
filemode="w",
level=logging.DEBUG if verbose else logging.WARNING)
# Start server!
while True:
try:
logging.info("Starting server!")
server.Server(reader=sys.stdin.buffer, writer=sys.stdout.buffer).run()
except ValueError as error:
logging.error("Encountered server error and restarting: %s", error) | 315 |
def step_i_get_a_task_from_the_entity_using_the_service_api(
context, service_name, tasks_service_name, filename
):
"""
:type context: behave.runner.Context
:type service_name: str
:type tasks_service_name: str
:type filename: str
"""
location = context.json_location
headers = read_json_from_file(filename, location)
service_client = context.services[service_name]["client"]
tasks_service_client = context.services[tasks_service_name]["client"]
tasks_resp = context.services[tasks_service_name]["resp"]
first_task = tasks_resp.json()["tasks"][0]
# TODO config value?
task_id = first_task["task_id"]
context.services[tasks_service_name]["id"] = task_id
context.services[tasks_service_name]["resp"] = tasks_service_client.get(
resource_id=task_id, headers=headers
) | 316 |
def parse_all_headers():
"""
Call parse_header() on all of Allegro's public include files.
"""
p = options.source
includes = " -I " + p + "/include -I " + os.path.join(options.build,
"include")
includes += " -I " + p + "/addons/acodec"
headers = [p + "/include/allegro5/allegro.h",
p + "/addons/acodec/allegro5/allegro_acodec.h",
p + "/include/allegro5/allegro_opengl.h"]
if options.windows:
headers += [p + "/include/allegro5/allegro_windows.h"]
for addon in glob.glob(p + "/addons/*"):
name = addon[len(p + "/addons/"):]
header = os.path.join(p, "addons", name, "allegro5",
"allegro_" + name + ".h")
if os.path.exists(header):
headers.append(header)
includes += " -I " + os.path.join(p, "addons", name)
for header in headers:
p = subprocess.Popen(options.compiler + " -E -dD - " + includes,
stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=True)
filename = "#include <allegro5/allegro.h>\n" + open(header).read()
p.stdin.write(filename.encode('utf-8'))
p.stdin.close()
text = p.stdout.read().decode("utf-8")
parse_header(text.splitlines(), header)
#print("%d definitions in %s" % (n, header)) | 317 |
def offset_zero_by_one(feature):
"""Sets the start coordinate to 1 if it is actually 0.
Required for the flanking to work properly in those cases.
"""
if feature.start == 0:
feature.start += 1
return feature | 318 |
def smilesToMolecule(smiles):
"""
Convert a SMILES string to a CDK Molecule object.
Returns: the Molecule object
"""
mol = None
try:
smilesParser = cdk.smiles.SmilesParser(silentChemObjectBuilder)
mol = smilesParser.parseSmiles(smiles)
except cdk.exception.InvalidSmilesException as e:
System.err.println('An error occured while parsing the SMILES')
e.printStackTrace()
return mol | 319 |
def build_pathmatcher(name, defaultServiceUrl):
"""
This builds and returns a full pathMatcher entry, for appending to an existing URL map.
Parameters:
name: The name of the pathMatcher.
defaultServiceUrl: Denotes the URL requests should go to if none of the path patterns match.
"""
matcher = OrderedDict()
matcher['defaultService'] = defaultServiceUrl
matcher['name'] = name
return matcher | 320 |
def gaussian1D_smoothing(input_array, sigma, window_size):
"""
Function to smooth input array using 1D gaussian smoothing
Args:
input_array (numpy.array): input array of values
sigma (float): sigma value for gaussian smoothing
window_size (int): window size for gaussian smoothing
Returns:
numpy.array: smoothed output array
"""
# compute truncate value (#standard_deviations)
truncate = (((window_size - 1)/2)-0.5)/sigma
return gaussian_filter1d(input_array, sigma=sigma, truncate=truncate) | 321 |
async def test_enabling_webhook(hass, hass_ws_client, setup_api, mock_cloud_login):
"""Test we call right code to enable webhooks."""
client = await hass_ws_client(hass)
with patch(
"hass_nabucasa.cloudhooks.Cloudhooks.async_create", return_value={}
) as mock_enable:
await client.send_json(
{"id": 5, "type": "cloud/cloudhook/create", "webhook_id": "mock-webhook-id"}
)
response = await client.receive_json()
assert response["success"]
assert len(mock_enable.mock_calls) == 1
assert mock_enable.mock_calls[0][1][0] == "mock-webhook-id" | 322 |
def add_whitespace(c_fn):
""" Add two spaces between all tokens of a C function
"""
tok = re.compile(r'[a-zA-Z0-9_]+|\*|\(|\)|\,|\[|\]')
return ' ' + ' '.join(tok.findall(c_fn)) + ' ' | 323 |
def readFlow(fn):
""" Read .flo file in Middlebury format"""
with open(fn, 'rb') as f:
magic = np.fromfile(f, np.float32, count=1)
if 202021.25 != magic:
print('Magic number incorrect. Invalid .flo file')
return None
else:
w = np.fromfile(f, np.int32, count=1)
h = np.fromfile(f, np.int32, count=1)
#print('Reading %d x %d flo file\n' % (w, h))
data = np.fromfile(f, np.float32, count=2*int(w)*int(h))
# Reshape data into 3D array (columns, rows, bands)
# The reshape here is for visualization, the original code is (w,h,2)
x=np.resize(data, (int(h), int(w), 2))
return x | 324 |
def test_method_attr():
"""Test the value of the method attribute."""
assert_equal(m.method, "multidim_parameter_study") | 325 |
def plot_umap_list(adata, title, color_groups):
"""
Plots UMAPS based with different coloring groups
:param adata: Adata Object containing a latent space embedding
:param title: Figure title
:param color_groups: Column name in adata.obs used for coloring the UMAP
:return:
"""
try:
if adata.X.shape[1] == 2:
adata.obsm['X_umap'] = adata.X
else:
sc.pp.neighbors(adata, use_rep='X')
sc.tl.umap(adata)
figures = []
for group in color_groups:
fig = sc.pl.umap(adata, color=group, title=title+'_'+group, return_fig=True)
fig.tight_layout()
figures.append(fig)
return figures
except ValueError as e:
print(e)
return [] | 326 |
def page_with_subject_page_generator(
generator: Iterable[pywikibot.Page], return_subject_only=False
) -> Generator[pywikibot.Page, None, None]:
"""
Yield pages and associated subject pages from another generator.
Only yields subject pages if the original generator yields a non-
subject page, and does not check if the subject page in fact exists.
"""
for page in generator:
if not return_subject_only or not page.isTalkPage():
yield page
if page.isTalkPage():
yield page.toggleTalkPage() | 327 |
def _delete_block_structure_on_course_delete(sender, course_key, **kwargs): # pylint: disable=unused-argument
"""
Catches the signal that a course has been deleted from the
module store and invalidates the corresponding cache entry if one
exists.
"""
clear_course_from_cache(course_key) | 328 |
def _filename(url, headers):
"""Given the URL and the HTTP headers received while fetching it,
generate a reasonable name for the file. If no suitable name can be
found, return None. (Either uses the Content-Disposition explicit
filename or a filename from the URL.)
"""
filename = None
# Try to get filename from Content-Disposition header.
heads = re.findall(r'^Content-Disposition:\s*(.*?)\r\n',
headers, re.I | re.M)
if heads:
cdisp = rfc6266.parse_headers(heads[-1], relaxed=True)
filename = cdisp.filename_unsafe
# Get filename from URL.
if not filename:
parts = urlparse.urlparse(url).path.split('/')
if parts:
filename = parts[-1]
# Strip unsafe characters from path.
if filename:
filename = filename.strip()
for sep in (os.sep, os.altsep):
if sep:
filename = filename.replace(sep, '_')
for pat in FILENAME_REPLACE:
filename = pat.sub('_', filename)
if filename:
return filename | 329 |
def MatrixCrossProduct(Mat1, Mat2):
"""
Returns the cross products of Mat1 and Mat2.
:param:
- Mat1 & Mat2 - Required : 5D matrix with shape (3,1,nz,ny,nx).
:return:
- Mat3 : 5D matrix with shape (3,1,nz,ny,nx).
"""
Mat3 = np.zeros_like(Mat1)
Mat3[0] = Mat1[1]*Mat2[2]-Mat1[2]*Mat2[1]
Mat3[1] = Mat1[2]*Mat2[0]-Mat1[0]*Mat2[2]
Mat3[2] = Mat1[0]*Mat2[1]-Mat1[1]*Mat2[0]
return Mat3 | 330 |
def partition_preds_by_scrape_type(verify_predictions,
evidence_predictions,
val_examples):
"""Partition predictions by which scrape_type they come from.
The validation fold contains four sets of evidence: drqa, lucene, ukp_pred,
and ukp_wiki. The intention is in this function to partition these into
four sets so that they can each be scored separately to measure the
difference between them on models that are trained on one of these
(train_scrape).
Args:
verify_predictions: Claim verification predictions to partition, a 3-dim
tensor of probabilities (one for each class)
evidence_predictions: Evidence predictions to partition, a scalar
probability of matching
val_examples: Validation examples, typically all of
FeverMetricsCallback._validation_flat
Returns:
Predictions and examples partitioned by scrape type
"""
partitioned_verify = collections.defaultdict(list)
partitioned_match = collections.defaultdict(list)
partitioned_example = collections.defaultdict(list)
for verify_probs, match_prob, example in zip(verify_predictions,
evidence_predictions,
val_examples):
struct, _ = example
metadata = json.loads(unwrap_tensor(struct['metadata']))
scrape_type = metadata['scrape_type']
partitioned_verify[scrape_type].append(verify_probs)
partitioned_match[scrape_type].append(match_prob)
partitioned_example[scrape_type].append(example)
return partitioned_verify, partitioned_match, partitioned_example | 331 |
def apply_delay_turbulence(signal, delay, fs):
"""Apply phase delay due to turbulence.
:param signal: Signal
:param delay: Delay
:param fs: Sample frequency
"""
k_r = np.arange(0, len(signal), 1) # Create vector of indices
k = k_r - delay * fs # Create vector of warped indices
kf = np.floor(k).astype(int) # Floor the warped indices. Convert to integers so we can use them as indices.
dk = kf - k
ko = np.copy(kf)
kf[ko<0] = 0
kf[ko+1>=len(ko)] = 0
R = ( (1.0 + dk) * signal[kf] + (-dk) * signal[kf+1] ) * (ko >= 0) * (ko+1 < len(k)) #+ 0.0 * (kf<0)
return R | 332 |
def binaryContext():
"""Return the registered context for the binary functions.
Return Value:
Ctor() for the binary function context
"""
return bin_func_class | 333 |
def validate_vm_file(file_name: Path, nx: int, ny: int, nz: int):
"""
Validates that a velocity model file has the correct size, and no 0 values in a sample of the layers
:param file_name: A Path object representing the file to test
:param nx, ny, nz: The size of the VM in grid spaces (nx*ny*nz)
:return: A possibly empty list of issues with the VM file
"""
errors = []
vm_size = nx * ny * nz
size = file_name.stat().st_size
if size != vm_size * SIZE_FLOAT:
errors.append(
f"VM filesize for {file_name} expected: {vm_size * SIZE_FLOAT} found: {size}"
)
with VelocityModelFile(nx, ny, nz, file_name, writable=False, memmap=True) as vmf:
min_v = vmf.get_values().min()
if min_v <= 0.0:
errors.append(f"File {file_name} has minimum value of {min_v}")
return errors | 334 |
def format_assignment_html(recording, debug=False):
"""Given a single recording, format it into an HTML file.
Each recording will only have one student.
Returns a {content: str, student: str, type: str, assignment: str} dict.
"""
try:
files = format_files_list(recording.get('files', {}))
warnings = format_warnings(recording.get('warnings', {}).items())
header = format_header(recording, warnings)
output = (header + files) + '\n\n'
except Exception as err:
if debug:
raise err
output = format_as_code(traceback.format_exc())
return {
'assignment': recording['spec'],
'content': output,
'student': recording['student'],
'type': 'html',
} | 335 |
def plot_PSD_amps(df, ch_titles, out_dir, channel):
""" Plots PSD using pwelch method. """
%matplotlib qt
sr = df["samplerate"].values[0]
df_0 = df.loc[df['amplitude_ma'] == df['amplitude_ma'].unique()[0]]
df_1 = df.loc[df['amplitude_ma'] == df['amplitude_ma'].unique()[1]]
df_2 = df.loc[df['amplitude_ma'] == df['amplitude_ma'].unique()[2]]
df_3 = df.loc[df['amplitude_ma'] == df['amplitude_ma'].unique()[3]]
df_4 = df.loc[df['amplitude_ma'] == df['amplitude_ma'].unique()[4]]
df_5 = df.loc[df['amplitude_ma'] == df['amplitude_ma'].unique()[5]]
f_0, Pxx_den_0 = signal.welch(df_0.loc[:, "ch" + str(channel) + "_mV"].values, sr, average = 'median')
f_1, Pxx_den_1 = signal.welch(df_1.loc[:, "ch" + str(channel) + "_mV"].values, sr, average = 'median')
f_2, Pxx_den_2 = signal.welch(df_2.loc[:, "ch" + str(channel) + "_mV"].values, sr, average = 'median')
f_3, Pxx_den_3 = signal.welch(df_3.loc[:, "ch" + str(channel) + "_mV"].values, sr, average = 'median')
f_4, Pxx_den_4 = signal.welch(df_4.loc[:, "ch" + str(channel) + "_mV"].values, sr, average = 'median')
f_5, Pxx_den_5 = signal.welch(df_5.loc[:, "ch" + str(channel) + "_mV"].values, sr, average = 'median')
fig = plt.figure()
plt.semilogy(f_0, Pxx_den_0, label = '0mA', alpha = 0.6)
plt.semilogy(f_1, Pxx_den_1, label = '1mA', alpha = 0.6)
plt.semilogy(f_2, Pxx_den_2, label = '2mA', alpha = 0.6)
plt.semilogy(f_3, Pxx_den_3, label = '3mA', alpha = 0.6)
plt.semilogy(f_4, Pxx_den_4, label = '4mA', alpha = 0.6)
plt.semilogy(f_5, Pxx_den_5, label = '5mA', alpha = 0.6)
plt.axvline(13, 0, 1, c = 'indianred')
plt.axvline(30, 0, 1, c = 'indianred')
plt.axvline(60, 0, 1, c = 'seagreen')
plt.axvline(90, 0, 1, c = 'seagreen')
#plt.plot(f, Pxx_den)
plt.xlabel('frequency (Hz)')
plt.ylabel('PSD (mV**2/Hz)')
plt.title(ch_titles[0] + "\n" + ch_titles[channel])
plt.legend()
plt.show()
fig.tight_layout()
save_label = concat_label(ch_titles[0])
fig.savefig(out_dir + "/" + "PSDamps_" + save_label + "_ch" + str(channel) + ".svg") | 336 |
def test_createTask1():
"""Checks for newly created task its status and urgency"""
i_task = tq.create_task("immediate")
assert i_task.status == "pending" and i_task.urgency == 3 | 337 |
def adapted_chu_liu_edmonds(length: int,
score_matrix: numpy.ndarray,
coreference: List[int],
current_nodes: List[bool],
final_edges: Dict[int, int],
old_input: numpy.ndarray,
old_output: numpy.ndarray,
representatives: List[Set[int]]):
"""
Applies the chu-liu-edmonds algorithm recursively
to a graph with edge weights defined by score_matrix.
Note that this function operates in place, so variables
will be modified.
Parameters
----------
length : ``int``, required.
The number of nodes.
score_matrix : ``numpy.ndarray``, required.
The score matrix representing the scores for pairs
of nodes.
coreference: ``List[int]``, required.
A list which maps a node to its first precedent.
current_nodes : ``List[bool]``, required.
The nodes which are representatives in the graph.
A representative at it's most basic represents a node,
but as the algorithm progresses, individual nodes will
represent collapsed cycles in the graph.
final_edges: ``Dict[int, int]``, required.
An empty dictionary which will be populated with the
nodes which are connected in the maximum spanning tree.
old_input: ``numpy.ndarray``, required.
a map from an edge to its head node.
Key: The edge is a tuple, and elements in a tuple
could be a node or a representative of a cycle.
old_output: ``numpy.ndarray``, required.
representatives : ``List[Set[int]]``, required.
A list containing the nodes that a particular node
is representing at this iteration in the graph.
Returns
-------
Nothing - all variables are modified in place.
"""
# Set the initial graph to be the greedy best one.
# Node '0' is always the root node.
parents = [-1]
for node1 in range(1, length):
# Init the parent of each node to be the root node.
parents.append(0)
if current_nodes[node1]:
# If the node is a representative,
# find the max outgoing edge to other non-root representative,
# and update its parent.
max_score = score_matrix[0, node1]
for node2 in range(1, length):
if node2 == node1 or not current_nodes[node2]:
continue
# Exclude edges formed by two coreferred nodes
_parent = old_input[node1, node2]
_child = old_output[node1, node2]
if coreference[_parent] == coreference[_child]:
continue
new_score = score_matrix[node2, node1]
if new_score > max_score:
max_score = new_score
parents[node1] = node2
# Check if this solution has a cycle.
has_cycle, cycle = _find_cycle(parents, length, current_nodes)
# If there are no cycles, find all edges and return.
if not has_cycle:
final_edges[0] = -1
for node in range(1, length):
if not current_nodes[node]:
continue
parent = old_input[parents[node], node]
child = old_output[parents[node], node]
final_edges[child] = parent
return
# Otherwise, we have a cycle so we need to remove an edge.
# From here until the recursive call is the contraction stage of the algorithm.
cycle_weight = 0.0
# Find the weight of the cycle.
index = 0
for node in cycle:
index += 1
cycle_weight += score_matrix[parents[node], node]
# For each node in the graph, find the maximum weight incoming
# and outgoing edge into the cycle.
cycle_representative = cycle[0]
for node in range(length):
# Nodes not in the cycle.
if not current_nodes[node] or node in cycle:
continue
in_edge_weight = float("-inf")
in_edge = -1
out_edge_weight = float("-inf")
out_edge = -1
for node_in_cycle in cycle:
# Exclude edges formed by two coreferred nodes.
_parent = old_input[node_in_cycle, node]
_child = old_output[node_in_cycle, node]
if coreference[_parent] != coreference[_child]:
if score_matrix[node_in_cycle, node] > in_edge_weight:
in_edge_weight = score_matrix[node_in_cycle, node]
in_edge = node_in_cycle
# Exclude edges formed by two coreferred nodes.
_parent = old_input[node, node_in_cycle]
_child = old_output[node, node_in_cycle]
if coreference[_parent] != coreference[_child]:
# Add the new edge score to the cycle weight
# and subtract the edge we're considering removing.
score = (cycle_weight +
score_matrix[node, node_in_cycle] -
score_matrix[parents[node_in_cycle], node_in_cycle])
if score > out_edge_weight:
out_edge_weight = score
out_edge = node_in_cycle
score_matrix[cycle_representative, node] = in_edge_weight
old_input[cycle_representative, node] = old_input[in_edge, node]
old_output[cycle_representative, node] = old_output[in_edge, node]
score_matrix[node, cycle_representative] = out_edge_weight
old_output[node, cycle_representative] = old_output[node, out_edge]
old_input[node, cycle_representative] = old_input[node, out_edge]
# For the next recursive iteration, we want to consider the cycle as a
# single node. Here we collapse the cycle into the first node in the
# cycle (first node is arbitrary), set all the other nodes not be
# considered in the next iteration. We also keep track of which
# representatives we are considering this iteration because we need
# them below to check if we're done.
considered_representatives: List[Set[int]] = []
for i, node_in_cycle in enumerate(cycle):
considered_representatives.append(set())
if i > 0:
# We need to consider at least one
# node in the cycle, arbitrarily choose
# the first.
current_nodes[node_in_cycle] = False
for node in representatives[node_in_cycle]:
considered_representatives[i].add(node)
if i > 0:
representatives[cycle_representative].add(node)
adapted_chu_liu_edmonds(length, score_matrix, coreference, current_nodes, final_edges, old_input, old_output, representatives)
# Expansion stage.
# check each node in cycle, if one of its representatives
# is a key in the final_edges, it is the one we need.
# The node we are looking for is the node which is the child
# of the incoming edge to the cycle.
found = False
key_node = -1
for i, node in enumerate(cycle):
for cycle_rep in considered_representatives[i]:
if cycle_rep in final_edges:
key_node = node
found = True
break
if found:
break
# break the cycle.
previous = parents[key_node]
while previous != key_node:
child = old_output[parents[previous], previous]
parent = old_input[parents[previous], previous]
final_edges[child] = parent
previous = parents[previous] | 338 |
def compute_xlabel_confusion_matrix(y_true, y_pred, labels_train=None, labels_test=None,
normalize=True, sample_weight=None):
"""Computes confusion matrix when the labels used to train the classifier are
different than those of the test set.
Args:
y_true: Ground truth.
y_pred: Estimated labels.
labels_train: List of labels used to train the classifier. This may be used to reorder
or select a subset of labels. If none is given, those that
appear at least once in y_pred are used in sorted order.
labels_test: List of labels of the test set. This may be used to reorder
or select a subset of labels. If none is given, those that
appear at least once in y_true are used in sorted order.
sample_weight: Sample weights.
Returns:
Confusion matrix (num_classes_test x num_classes_train)
"""
y_true = list2ndarray(y_true)
y_pred = list2ndarray(y_pred)
if labels_train is None:
labels_train = np.unique(y_pred)
else:
labels_train = list2ndarray(labels_train)
if labels_test is None:
labels_test = np.unique(y_true)
else:
labels_test = list2ndarray(labels_test)
assert y_true.dtype == y_pred.dtype, 'y_true and y_pred labels does not have the same type'
assert labels_train.dtype == labels_test.dtype, 'Train and test labels does not have the same type'
assert labels_train.dtype == y_pred.dtype, 'Labels, y_true and y_pred does not have the same type'
num_classes_test = len(labels_test)
if issubclass(y_true.dtype.type, np.integer):
y_pred += num_classes_test
elif issubclass(y_true.dtype.type, np.dtype('U')) or issubclass(
y_true.dtype.type, np.dtype('S')):
y_true = np.asarray(['TEST_' + s for s in y_true])
y_pred = np.asarray(['TRAIN_' + s for s in y_pred])
else:
raise Exception()
if issubclass(labels_train.dtype.type, np.integer):
labels_train += num_classes_test
elif issubclass(labels_train.dtype.type, np.dtype('U')) or issubclass(
labels_train.dtype.type, np.dtype('S')):
labels_test = np.asarray(['TEST_' + s for s in labels_test])
labels_train = np.asarray(['TRAIN_' + s for s in labels_train])
else:
raise Exception()
labels = np.concatenate((labels_test, labels_train))
C = confusion_matrix(y_true, y_pred, labels, sample_weight)
C = C[:num_classes_test, num_classes_test:]
if normalize:
C = C/np.sum(C, axis=1, keepdims=True)
return C | 339 |
def write_build_file(build_gn_path, package_name, name_with_version, language_version, deps, dart_sources):
""" writes BUILD.gn file for Dart package with dependencies """
with open(build_gn_path, 'w', encoding='utf-8') as build_gn:
build_gn.write('''# This file is generated by importer.py for %s
import("//build/dart/dart_library.gni")
dart_library("%s") {
package_name = "%s"
language_version = "%s"
disable_analysis = true
deps = [
''' % (name_with_version, package_name, package_name, language_version))
for dep in deps:
if dep in LOCAL_PACKAGES:
build_gn.write(' "%s",\n' % LOCAL_PACKAGES[dep])
else:
build_gn.write(' "//third_party/dart-pkg/pub/%s",\n' % dep)
build_gn.write(''' ]
sources = [
''')
for source in sorted(dart_sources):
build_gn.write(' "%s",\n' % source)
build_gn.write(''' ]
}
''') | 340 |
def generate_synthetic_data(n=50): #n is the number of generated random training points from normal distribution
"""Create two sets of points from bivariate normal distributions."""
points = np.concatenate((ss.norm(0,1).rvs((n,2)),ss.norm(1,1).rvs((n,2))), axis=0) #norm(mean, standard deviation)
#'.rvs' Random variates of given type. Here we have: .rvs((number of rows, number of columns))
# 'axis = 0' means that we are concatenating along the rows of these arrays
#the whole lemgth/size of points array is 2nx2: nx2 for the first generated points set and nx2 for the second one
outcomes = np.concatenate((np.repeat(0,n), np.repeat(1,n)), axis=0) #generate two ndarrays/classes/outcomes/targets: the first one has 0 values and length(n)
#and the second one with values 1 and length n.
#0 and 1 here refer to the names of classes
#axis =0 means that the concatenating happens along the rows
return (points, outcomes) | 341 |
def show_qr_detection(img, pts):
"""Draw both the lines and corners based on the array of vertices of the found QR code"""
pts = np.int32(pts).reshape(-1, 2)
for j in range(pts.shape[0]):
cv2.line(img, tuple(pts[j]), tuple(pts[(j + 1) % pts.shape[0]]), (255, 0, 0), 5)
for j in range(pts.shape[0]):
cv2.circle(img, tuple(pts[j]), 10, (255, 0, 255), -1) | 342 |
def upload_download_test(**kwargs):
"""Run upload and/or download test with generated test files"""
cwm_worker_tests.upload_download_test.main(**kwargs) | 343 |
def mask_array(array, idx, n_behind, n_ahead):
"""[summary]
Args:
array ([type]): [description]
idx ([type]): [description]
n_behind ([type]): [description]
n_ahead ([type]): [description]
Returns:
[type]: [description]
"""
first = max(0, idx - n_behind)
last = min(idx + n_ahead + 1, len(array))
array_masked = array[first:last].copy()
return array_masked | 344 |
def test_atomic_language_length_2_nistxml_sv_iv_atomic_language_length_3_5(mode, save_output, output_format):
"""
Type atomic/language is restricted by facet length with value 10.
"""
assert_bindings(
schema="nistData/atomic/language/Schema+Instance/NISTSchema-SV-IV-atomic-language-length-3.xsd",
instance="nistData/atomic/language/Schema+Instance/NISTXML-SV-IV-atomic-language-length-3-5.xml",
class_name="NistschemaSvIvAtomicLanguageLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 345 |
def pip_main():
"""Entry point for pip-packaged binary
Required because the pip-packaged binary calls the entry method
without arguments
"""
main([' '.join(sys.argv[1:])]) | 346 |
def get_nfs_acl(path: str, user: str) -> str:
"""
Retrieve the complete list of access control permissions assigned to a file or directory.
"""
raw = command(["/usr/bin/nfs4_getfacl", path], output=True).stdout.decode("utf-8")
allowed: Set[str] = set()
denied: Set[str] = set()
for line in raw.splitlines():
if line.startswith("#"):
continue
type_, _, principal, perms = line.split(":")
if principal != user:
continue
if type_ == "A":
allowed.update(perms)
elif type_ == "D":
denied.update(perms)
return "".join(sorted(allowed - denied)) | 347 |
def PolyMult(p1, p2, debug=False):
"""
Multiply two numbers in the GF(2^8) finite field defined
See http://stackoverflow.com/questions/13202758/multiplying-two-polynomials
For info
"""
binP2 = bin(p2)[2:].zfill(8)
mult = 0
if p1 == 0 or p2 == 0:
return 0
for i in range(8):
bit = binP2[i]
if bit == "1":
mult ^= (p1 << (7 - i))
reducPoly = int("100011011", 2)
while True:
if GetMSBIndex(mult) < GetMSBIndex(reducPoly):
break
elif GetMSBIndex(mult) == GetMSBIndex(reducPoly):
mult ^= reducPoly
else:
degreeDiff = GetMSBIndex(mult) - GetMSBIndex(reducPoly)
mult ^= (reducPoly << degreeDiff)
return mult | 348 |
def run_hdbscan(X_df, X_tsne, output_dir, transparent):
"""Cluster using density estimation
Parameters
----------
X_df: DataFrame
X_tsne: array-like, [n_samples, 2]
output_dir: str, path
transparent: bool
Returns
-------
clusterer: HDBSCAN object
assignments: numpy array of shape [n_samples,]
"""
from hdbscan import HDBSCAN
clusterer = HDBSCAN(
core_dist_n_jobs=-1,
cluster_selection_method="eom", # 'leaf',
approx_min_span_tree=False,
min_cluster_size=100,
min_samples=1,
leaf_size=100,
gen_min_span_tree=True,
# alpha=10.,
memory=Memory(cachedir=None, verbose=0),
)
assignments = clusterer.fit_predict(X_df)
centroid_labels, counts = np.unique(assignments, return_counts=True)
n_clusters = len(centroid_labels)
assignments[assignments == -1] = n_clusters - 1
logger.info("[HDBSCAN] Found {} clusters".format(n_clusters))
logger.info("[HDBSCAN] Cluster assignments:\n{}".format(counts))
logger.info(
"[HDBSCAN] Cluster persistence:\n{}".format(clusterer.cluster_persistence_)
)
return assignments, clusterer.exemplars_, n_clusters, clusterer | 349 |
async def test_http_error400(aresponses):
"""Test HTTP 404 response handling."""
aresponses.add(
"pvoutput.org",
"/service/r2/test",
"GET",
aresponses.Response(text="OMG PUPPIES!", status=404),
)
async with aiohttp.ClientSession() as session:
pvoutput = PVOutput(api_key="fake", system_id=12345, session=session)
with pytest.raises(PVOutputError):
assert await pvoutput._request("test") | 350 |
def check_min_sample_periods(X, time_column, min_sample_periods):
"""
Check if all periods contained in a dataframe for a certain time_column
contain at least min_sample_periods examples.
"""
return (X[time_column].value_counts() >= min_sample_periods).prod() | 351 |
def _insert(partition, bat):
"""
用于向hbase中插入数据, 每个数据表字段不同需要单独写put语句
:param partition: 【partition】
:param bat: 【batch】
:return:
"""
for row in partition:
# bat.put(str(row.datasetA.movie_id).encode(),
# {"similar:{}".format(row.datasetB.movie_id).encode(): b"%0.4f" % (row.EucDistance)})
bat.put(str(row.movie_id).encode(),
{"similar:{}".format(row.movie_id2).encode(): b"%0.4f" % (row.cos_sim)}) | 352 |
def main():
"""
Run ftfy as a command-line utility. (Requires Python 2.7 or later, or
the 'argparse' module.)
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('filename', help='file to transcode')
args = parser.parse_args()
file = open(args.filename)
for line in fix_file(file):
if ENCODE_STDOUT:
sys.stdout.write(line)
else:
sys.stdout.write(line) | 353 |
def test_profit(performance):
"""The profit property should return the profit earned on a win bet for the performance"""
expected_value = -1.0
if performance['result'] == 1:
expected_value += performance['starting_price']
assert performance.profit == expected_value | 354 |
def get_quest_stat(cards): # pylint: disable=R0912,R0915
""" Get quest statistics.
"""
res = {}
encounter_sets = set()
keywords = set()
card_types = {}
for card in cards:
if card.get(lotr.CARD_KEYWORDS):
keywords = keywords.union(
lotr.extract_keywords(card[lotr.CARD_KEYWORDS]))
if (card.get(lotr.CARD_TEXT) and
(' Restricted.' in card[lotr.CARD_TEXT] or
'\nRestricted.' in card[lotr.CARD_TEXT])):
keywords.add('Restricted')
if card.get(lotr.CARD_ENCOUNTER_SET):
encounter_sets.add(card[lotr.CARD_ENCOUNTER_SET])
if card.get(lotr.CARD_ADDITIONAL_ENCOUNTER_SETS):
encounter_sets = encounter_sets.union(
[s.strip() for s in
str(card[lotr.CARD_ADDITIONAL_ENCOUNTER_SETS]).split(';')])
card_type = card[lotr.CARD_TYPE]
if card.get(lotr.CARD_SPHERE) in ('Boon', 'Burden'):
card_type = '{} ({})'.format(card_type, card[lotr.CARD_SPHERE])
card_types[card_type] = (
card_types.get(card_type, 0) + card[lotr.CARD_QUANTITY])
if encounter_sets:
res['encounter_sets'] = '*Encounter Sets*: {}\n'.format(
', '.join(sorted(encounter_sets)))
else:
res['encounter_sets'] = ''
if keywords:
res['keywords'] = '*Keywords*: {}\n'.format(
', '.join(sorted(keywords)))
else:
res['keywords'] = ''
card_types = sorted(list(card_types.items()), key=lambda t: t[0])
card_types = sorted(card_types, key=lambda t: t[1], reverse=True)
res['total'] = '*Cards*: {}\n'.format(sum(t[1] for t in card_types))
res['card_types'] = '\n'.join('*{}*: {}'.format(
t[0], t[1]) for t in card_types)
card_types = {}
threat = 0
max_threat = 0
shadow = 0
surge = 0
res['encounter_deck'] = ''
deck = [card for card in cards if card[CARD_DECK_SECTION] == 'Encounter']
for card in deck:
card_type = card[lotr.CARD_TYPE]
if card.get(lotr.CARD_SPHERE) in ('Boon', 'Burden'):
card_type = '{} ({})'.format(card_type, card[lotr.CARD_SPHERE])
card_types[card_type] = (
card_types.get(card_type, 0) + card[lotr.CARD_QUANTITY])
if lotr.is_positive_int(card.get(lotr.CARD_THREAT)):
threat += int(card[lotr.CARD_THREAT]) * card[lotr.CARD_QUANTITY]
max_threat = max(max_threat, int(card[lotr.CARD_THREAT]))
if card.get(lotr.CARD_SHADOW):
shadow += card[lotr.CARD_QUANTITY]
if card.get(lotr.CARD_KEYWORDS):
if 'Surge' in lotr.extract_keywords(card[lotr.CARD_KEYWORDS]):
surge += card[lotr.CARD_QUANTITY]
if not card_types:
return res
card_types = sorted(list(card_types.items()), key=lambda t: t[0])
card_types = sorted(card_types, key=lambda t: t[1], reverse=True)
total = sum(t[1] for t in card_types)
card_types = [(t[0], '{} ({}%)'.format(t[1], round(t[1] * 100 / total)))
for t in card_types]
res['encounter_deck'] = '**Encounter Deck**\n*Cards*: {}\n\n{}\n\n'.format(
total, '\n'.join('*{}*: {}'.format(t[0], t[1]) for t in card_types))
if shadow:
res['encounter_deck'] += '*Shadow*: {} ({}%)\n'.format(
shadow, round(shadow * 100 / total))
if surge:
res['encounter_deck'] += '*Surge*: {} ({}%)\n'.format(
surge, round(surge * 100 / total))
res['encounter_deck'] += '*Threat*: {} (Avg), {} (Max)\n\n'.format(
round(threat / total, 1), max_threat)
return res | 355 |
def build_command_names():
""" Use the list of commands available to build the COOMAND_NAMES dict.
"""
for cmd in COMMANDS:
doc = cmd.__doc__.strip() if cmd.__doc__ is not None else 'Unknown'
doc = doc.split('\n')[0]
COMMAND_NAMES[cmd.__name__] = {'name': doc, 'function': cmd} | 356 |
def bind_type(python_value):
"""Return a Gibica type derived from a Python type."""
binding_table = {'bool': Bool, 'int': Int, 'float': Float}
if python_value is None:
return NoneType()
python_type = type(python_value)
gibica_type = binding_table.get(python_type.__name__)
if gibica_type is None:
raise TypeError('Impossible to recognize underlying type.')
return gibica_type(python_value) | 357 |
def delete_server(hostname, instance_id):
"""
Deletes a server by hostname and instance_id.
"""
host = get_host_by_hostname(hostname)
if not host or not instance_id:
return None
try:
r = requests.delete("%s/servers/%i" % (host['uri'], instance_id),
auth=HTTPDigestAuth(host['username'], host['password']),
timeout=(CONNECT_TIMEOUT, READ_TIMEOUT))
if r.ok:
return r.json()
except requests.exceptions.ConnectionError as e:
import traceback
traceback.print_exc()
return None
return None | 358 |
def quote_ident(val):
"""
This method returns a new string replacing " with "",
and adding a " at the start and end of the string.
"""
return '"' + val.replace('"', '""') + '"' | 359 |
def TFC_TDF(in_channels, num_layers, gr, kt, kf, f, bn_factor=16, bias=False):
"""
Wrapper Function: -> TDC_TIF
in_channels: number of input channels
num_layers: number of densely connected conv layers
gr: growth rate
kt: kernel size of the temporal axis.
kf: kernel size of the freq. axis
f: num of frequency bins
below are params for TDF
bn_factor: bottleneck factor. if None: single layer. else: MLP that maps f => f//bn_factor => f
bias: bias setting of linear layers
"""
return TFC_TIF(in_channels, num_layers, gr, kt, kf, f, bn_factor, bias) | 360 |
def barplot_data(gene_values, gene_names, cluster_name, x_label,
title=None):
"""
Converts data for top genes into a json for building the
bar plot. Output should be formatted in a way that can be plugged into
Plotly.
Args:
gene_values (list): list of tuples (gene_id, gene_value)
gene_names (list): list of gene names corresponding to
the genes in gene_values.
cluster_name: name of the cluster from which the top genes are drawn.
x_label: label for the x-axis.
title: plot title
"""
if gene_values is None:
gene_values = [(1,1), (2,2), (3,3)]
if gene_names is None:
gene_names = ['placeholder 1', 'placeholder 2', 'placeholder 3']
if title is None:
title = 'Top genes for cluster {0}'.format(cluster_name)
return json.dumps({
'data': [{
'x': list(x[1] for x in gene_values),
'y': gene_names,
'orientation': 'h',
'type': 'bar',
}],
'layout': {
'title': title,
'xaxis': {'title': x_label},
'margin': {'t': 40},
},
}, cls=SimpleEncoder) | 361 |
def save_animate(data, data_root, quantity, kwargs_plot={}, kwargs_animate={}):
"""
Save the frames and animate the quantity of interest
Args:
data: the flow field defined by the class SimFramework
data_root: file path to an empty folder to save frames to
quantity: what we are animating
**kwargs_plot: args for plotting the flow
**kwargs_animate: extra args for imageio.get_writer
Returns:
"""
save_sim_frames(data, os.path.join(data_root, 'figures'), quantity, **kwargs_plot)
animate(os.path.join(data_root, 'figures'), quantity, **kwargs_animate) | 362 |
def main():
"""main function"""
field = {
'minLngE6': 116298171,
'minLatE6': 39986831,
'maxLngE6': 116311303,
'maxLatE6': 39990941,
}
with open('cookies') as cookies:
cookies = cookies.read().strip()
intel = ingrex.Intel(cookies, field)
result = intel.fetch_msg(tab='faction')
result = intel.fetch_map(['17_29630_13630_0_8_100'])
result = intel.fetch_portal(guid='ac8348883c8840f6a797bf9f4f22ce39.16')
result = intel.fetch_score()
result = intel.fetch_region()
result = intel.fetch_artifacts()
print(result) | 363 |
def logic_not(operand: ValueOrExpression) -> Expression:
"""
Constructs a logical negation expression.
"""
return Not(operators.NotOperator.NOT, ensure_expr(operand)) | 364 |
def webpage_attribute_getter(attr):
""" Helper function for defining getters for web_page attributes, e.g.
``get_foo_enabled = webpage_attribute_getter("foo")`` returns
a value of ``webpage.foo`` attribute.
"""
def _getter(self):
return getattr(self.web_page, attr)
return _getter | 365 |
def diff_with_step(a:np.ndarray, step:int=1, **kwargs) -> np.ndarray:
""" finished, checked,
compute a[n+step] - a[n] for all valid n
Parameters
----------
a: ndarray,
the input data
step: int, default 1,
the step to compute the difference
kwargs: dict,
Returns
-------
d: ndarray:
the difference array
"""
if step >= len(a):
raise ValueError(f"step ({step}) should be less than the length ({len(a)}) of `a`")
d = a[step:] - a[:-step]
return d | 366 |
def main():
"""
Driver to download all of the ancillary data files
"""
# Get data from World Ocean (WOA) 2013 version 2
# get_WOA13_data()
# Get data from NODC (Levitus) World Ocean Atlas 1994
# get_WOA94_data()
# Get data from NODC World Ocean Atlas 2001
# get_WOA01_data()
# GEBCO’s gridded bathymetric data set
# get_GEBCO_data()
# Get data for Dissolved Organic Carbon (DOC)
# get_DOC_data()
# Get data for Shortwave radiation (Large and Yeager, 2009)
# get_SWrad_data()
# Get data for chlorophyll-a from SeaWIFS
# get_SeaWIFS_data()
# Get data for Productivity (Behrenfeld and Falkowski, 1997)
# get_productivity_data()
# Get the data from World Ocean Atlas 2018
get_WOA18_data() | 367 |
def save_temp_data(data, filename, directory='temp'):
"""save temp data to disk"""
if not os.path.exists(directory):
os.makedirs(directory)
with open(directory + '/'+ filename + '.temp', 'wb') as f:
pickle.dump(data, f)
f.close()
print("Data saved to", filename + ".temp in working directory") | 368 |
def rmse(y_true: np.ndarray, y_pred: np.ndarray):
"""
Returns the root mean squared error between y_true and y_pred.
:param y_true: NumPy.ndarray with the ground truth values.
:param y_pred: NumPy.ndarray with the ground predicted values.
:return: root mean squared error (float).
"""
return np.sqrt(mean_squared_error(y_true, y_pred)) | 369 |
def party_name_from_key(party_key):
"""returns the relevant party name"""
relevant_parties = {0: 'Alternativet',
1: 'Dansk Folkeparti',
2: 'Det Konservative Folkeparti',
3: 'Enhedslisten - De Rød-Grønne',
4: 'Liberal Alliance',
5: 'Nye Borgerlige',
6: 'Radikale Venstre',
7: 'SF - Socialistisk Folkeparti',
8: 'Socialdemokratiet',
9: 'Venstre, Danmarks Liberale Parti'}
return relevant_parties[party_key] | 370 |
def GetOutDirectory():
"""Returns the Chromium build output directory.
NOTE: This is determined in the following way:
- From a previous call to SetOutputDirectory()
- Otherwise, from the CHROMIUM_OUTPUT_DIR env variable, if it is defined.
- Otherwise, from the current Chromium source directory, and a previous
call to SetBuildType() or the BUILDTYPE env variable, in combination
with the optional CHROMIUM_OUT_DIR env variable.
"""
if 'CHROMIUM_OUTPUT_DIR' in os.environ:
return os.path.abspath(os.path.join(
DIR_SOURCE_ROOT, os.environ.get('CHROMIUM_OUTPUT_DIR')))
build_type = os.environ.get('BUILDTYPE')
if not build_type:
raise EnvironmentError(_MISSING_OUTPUT_DIR_MESSAGE)
return os.path.abspath(os.path.join(
DIR_SOURCE_ROOT, os.environ.get('CHROMIUM_OUT_DIR', 'out'),
build_type)) | 371 |
def __imul__(self,n) :
"""Concatenate the bitstring to itself |n| times, bitreversed if n < 0"""
if not isint(n) :
raise TypeError("Can't multiply bitstring by non int");
if n <= 0 :
if n :
n = -n;
l = self._l;
for i in xrange(l//2) :
self[i],self[l-1-i] = self[l-1-i],self[i];
else :
self._x = 0;
self._l = 0;
if n > 1 :
y = type(self)(self);
for _ in xrange(n-1) :
self.iconcat(y);
return self; | 372 |
def FlushAllCaches():
"""Removes any cached data from datastore/memache."""
chart_data_keys = ChartData.query().fetch(keys_only=True)
ndb.delete_multi(chart_data_keys)
project_list_keys = Projects.query().fetch(keys_only=True)
ndb.delete_multi(project_list_keys) | 373 |
def test_integration_format_configuring_conf_json_no_interactive_positive(tmp_path: PosixPath,
source_path: str,
destination_path: str,
formatter: BaseUpdateYML,
yml_title: str,
file_type: str):
"""
Given
- A yml file (integration, playbook or script) with no tests playbooks configured that are not configured
in conf.json
When
- using the -y option
Then
- Ensure no exception is raised
- If file_type is playbook or a script: Ensure {"playbookID": <content item ID>} is added to conf.json
for each test playbook configured in the yml under 'tests' key
- If file_type is integration: Ensure {"playbookID": <content item ID>, "integrations": yml_title} is
added to conf.json for each test playbook configured in the yml under 'tests' key
"""
# Setting up conf.json
conf_json_path = str(tmp_path / 'conf.json')
with open(conf_json_path, 'w') as file:
json.dump(CONF_JSON_ORIGINAL_CONTENT, file, indent=4)
BaseUpdateYML.CONF_PATH = conf_json_path
test_playbooks = ['test1', 'test2']
saved_file_path = str(tmp_path / os.path.basename(destination_path))
runner = CliRunner()
# Running format in the first time
result = runner.invoke(main, [FORMAT_CMD, '-i', source_path, '-o', saved_file_path, '-y'])
assert not result.exception
if file_type == 'playbook':
_verify_conf_json_modified(test_playbooks, '', conf_json_path)
else:
_verify_conf_json_modified(test_playbooks, yml_title, conf_json_path) | 374 |
def update_cache(force=False, cache_file=None):
"""
Load a build cache, updating it if necessary.
A cache is considered outdated if any of its inputs have changed.
Arguments
force -- Consider a cache outdated regardless of whether its inputs have
been modified.
"""
if not cache_file:
cache_file = find_config()
cache_config = devpipeline_configure.parser.read_config(cache_file)
cache = devpipeline_configure.cache._CachedConfig(cache_config, cache_file)
if force or _is_outdated(cache_file, cache):
cache = devpipeline_configure.config.process_config(
cache_config.get("DEFAULT", "dp.build_config"),
os.path.dirname(cache_file),
"build.cache",
profiles=cache_config.get("DEFAULT", "dp.profile_name", fallback=None),
overrides=cache_config.get("DEFAULT", "dp.overrides", fallback=None),
src_root=cache_config.get("DEFAULT", "dp.src_root"),
)
devpipeline_core.sanitizer.sanitize(
cache, lambda n, m: print("{} [{}]".format(m, n))
)
return cache | 375 |
def create_uniform_masses_lengths_randomizer_qq(frac_halfspan: float):
"""
Get a uniform randomizer that applies to all masses and lengths of the Quanser Qube according to a fraction of their
nominal parameter values
:param frac_halfspan: fraction of the nominal parameter value
:return: `DomainRandomizer` with uniformly distributed masses and lengths
"""
from pyrado.environments.pysim.quanser_qube import QQubeSim
dp_nom = QQubeSim.get_nominal_domain_param()
return DomainRandomizer(
UniformDomainParam(
name="mass_pend_pole",
mean=dp_nom["mass_pend_pole"],
halfspan=dp_nom["mass_pend_pole"] / frac_halfspan,
clip_lo=1e-3,
),
UniformDomainParam(
name="mass_rot_pole",
mean=dp_nom["mass_rot_pole"],
halfspan=dp_nom["mass_rot_pole"] / frac_halfspan,
clip_lo=1e-3,
),
UniformDomainParam(
name="length_rot_pole",
mean=dp_nom["length_rot_pole"],
halfspan=dp_nom["length_rot_pole"] / frac_halfspan,
clip_lo=1e-2,
),
UniformDomainParam(
name="length_pend_pole",
mean=dp_nom["length_pend_pole"],
halfspan=dp_nom["length_pend_pole"] / frac_halfspan,
clip_lo=1e-2,
),
) | 376 |
def compress_table(tbl, condition, blen=None, storage=None, create='table',
**kwargs):
"""Return selected rows of a table."""
# setup
storage = _util.get_storage(storage)
names, columns = _util.check_table_like(tbl)
blen = _util.get_blen_table(tbl, blen)
_util.check_equal_length(columns[0], condition)
length = len(columns[0])
nnz = count_nonzero(condition)
# block iteration
out = None
for i in range(0, length, blen):
j = min(i+blen, length)
bcond = np.asanyarray(condition[i:j])
# don't access any data unless we have to
if np.any(bcond):
bcolumns = [np.asanyarray(c[i:j]) for c in columns]
res = [np.compress(bcond, c, axis=0) for c in bcolumns]
if out is None:
out = getattr(storage, create)(res, names=names,
expectedlen=nnz, **kwargs)
else:
out.append(res)
return out | 377 |
def validate_schedule():
"""Helper routine to report issues with the schedule"""
all_items = prefetch_schedule_items()
errors = []
for validator, _type, msg in SCHEDULE_ITEM_VALIDATORS:
for item in validator(all_items):
errors.append('%s: %s' % (msg, item))
all_slots = prefetch_slots()
for validator, _type, msg in SLOT_VALIDATORS:
for slot in validator(all_slots):
errors.append('%s: %s' % (msg, slot))
return errors | 378 |
def train_gridsearchcv_model(base_model: Any,
X: np.array,
y: np.array,
cv_splitter,
hyperparameter_grid: Dict[str, Any],
scoring: Union[str, Callable[[Any, np.array, np.array], int]]="f1_weighted",
n_jobs: int=4,
verbose: int=3,
) -> Tuple[Dict[str, Any], pd.DataFrame]:
"""Trains given model using gridsearch crossvalidation.
X - numpy array of input vectors
y - numpy array of input labels
cv - spitter that splits X and y to train and validation splits
hyperaparameter_grid - hyperparameters used for grid search
scoring - scoring function which is used to evaluate
n_jobs - number of cores to use
verbose - level of verboseness used for GridSearchCV, see scikit-learn
returns (best_parameters, scores_df) where
best_parameters are best hyperparameters found
scores_df is dataframe with scores over all hyperparameter combinations
"""
model = GridSearchCV(
base_model, hyperparameter_grid,
scoring=scoring,
n_jobs=n_jobs, cv=cv_splitter,
refit=False, verbose=verbose,
return_train_score=True
)
return train_cv_model(model, X, y) | 379 |
def set_resolmatrix(nspec,nwave):
""" Generate a Resolution Matrix
Args:
nspec: int
nwave: int
Returns:
Rdata: np.array
"""
sigma = np.linspace(2,10,nwave*nspec)
ndiag = 21
xx = np.linspace(-ndiag/2.0, +ndiag/2.0, ndiag)
Rdata = np.zeros( (nspec, len(xx), nwave) )
for i in range(nspec):
for j in range(nwave):
kernel = np.exp(-xx**2/(2*sigma[i*nwave+j]**2))
kernel /= sum(kernel)
Rdata[i,:,j] = kernel
return Rdata | 380 |
def assembleR(X, W, fct):
"""
"""
M = W * fct(X)
return M | 381 |
def generate_dictionary_variable_types(
dict_name, key_name, search_dict, indent_level=0
):
"""Generate a dictionary from config with values from either function, variable, or static"""
out_str = []
# Don't escape these:
types_used = ["None", "True", "False", None, True, False]
if len(search_dict) < 1:
logging.warning("Can't search 0 len dict")
return None
if key_exists("function", search_dict):
logging.info("Found funciton in dict")
out_str = f'{dict_name}["{key_name}"] = {search_dict["function"]}'
elif key_exists("variable", search_dict):
logging.info("Found variable in dict")
out_str = f'{dict_name}["{key_name}"] = {search_dict["variable"]}'
elif key_exists("static", search_dict):
if (
isinstance(search_dict["static"], int)
or search_dict["static"] in types_used
):
logging.info("Found static (None / Bool) in dict")
out_str = f'{dict_name}["{key_name}"] = {search_dict["static"]}'
else:
logging.info("Found static (string) in dict")
out_str = f'{dict_name}["{key_name}"] = "{search_dict["static"]}"'
else:
logging.warning("Unable to find function, variable, or static string")
return None
return indent(out_str, indent_level) | 382 |
def make_uuid(value):
"""Converts a value into a python uuid object."""
if isinstance(value, uuid.UUID):
return value
return uuid.UUID(value) | 383 |
def test_twospin_v_coo():
"""Tests to see if sparse.tensordot works with COO arrays instead.
This test passes for sparse <=0.10.0, but fails for >=0.11.0,
and generates the same nmrsim error that was observed when sparse was upgraded.
"""
v, J = spin2()
Lz = np.array(
[[[0.5 + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j],
[0. + 0.j, 0.5 + 0.j, 0. + 0.j, 0. + 0.j],
[0. + 0.j, 0. + 0.j, -0.5 + 0.j, -0. + 0.j],
[0. + 0.j, 0. + 0.j, -0. + 0.j, -0.5 + 0.j]],
[[0.5 + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j],
[0. + 0.j, -0.5 + 0.j, 0. + 0.j, -0. + 0.j],
[0. + 0.j, 0. + 0.j, 0.5 + 0.j, 0. + 0.j],
[0. + 0.j, -0. + 0.j, 0. + 0.j, -0.5 + 0.j]]]
)
v_coo = sparse.COO(v)
Lz_coo = sparse.COO(Lz)
H = sparse.tensordot(v_coo, Lz_coo, axes=1)
assert np.allclose(
H.todense(),
np.array(
[[15. + 0.j, 0. + 0.j, 0. + 0.j, 0. + 0.j],
[0. + 0.j, -5. + 0.j, 0. + 0.j, 0. + 0.j],
[0. + 0.j, 0. + 0.j, 5. + 0.j, 0. + 0.j],
[0. + 0.j, 0. + 0.j, 0. + 0.j, -15. + 0.j]]
)) | 384 |
def create_config(config_data,
aliases=False,
prefix=False,
multiple_displays=False,
look_info=None,
custom_output_info=None,
custom_lut_dir=None):
"""
Create the *OCIO* config based on the configuration data
Parameters
----------
config_data : dict
Colorspaces and transforms converting between those colorspaces and
the reference colorspace, *ACES*, along with other data needed to
generate a complete *OCIO* configuration
aliases : bool, optional
Whether or not to include Alias colorspaces
prefix : bool, optional
Whether or not to prefix the colorspace names with their Family names
multiple_displays : bool, optional
Whether to create a single display named *ACES* with Views for each
Output Transform or multiple displays, one for each Output Transform
look_info : array of str or unicode, optional
Paths and names for look data
custom_lut_dir : str or unicode, optional
Directory to use for storing custom look files
Returns
-------
*OCIO* config
The constructed OCIO configuration
"""
if look_info is None:
look_info = []
if custom_output_info is None:
custom_output_info = []
prefixed_names = {}
alias_colorspaces = []
config = ocio.Config()
config.setDescription('An ACES config generated from python')
search_path = ['luts']
if custom_lut_dir:
search_path.append('custom')
config.setSearchPath(':'.join(search_path))
reference_data = config_data['referenceColorSpace']
# Adding the colorspace *Family* into the name which helps with
# applications that present colorspaces as one a flat list.
if prefix:
prefixed_name = colorspace_prefixed_name(reference_data)
prefixed_names[reference_data.name] = prefixed_name
reference_data.name = prefixed_name
print('Adding the reference color space : %s' % reference_data.name)
reference = ocio.ColorSpace(
name=reference_data.name,
bitDepth=reference_data.bit_depth,
description=reference_data.description,
equalityGroup=reference_data.equality_group,
family=reference_data.family,
isData=reference_data.is_data,
allocation=reference_data.allocation_type,
allocationVars=reference_data.allocation_vars)
config.addColorSpace(reference)
if aliases:
if reference_data.aliases:
# Deferring adding alias colorspaces until end, which helps with
# applications listing the colorspaces in the order that they were
# defined in the configuration: alias colorspaces are usually named
# lower case with spaces but normal colorspaces names are longer
# and more verbose, thus it becomes harder for user to visually
# parse the list of colorspaces when there are names such as
# "crv_canonlog" interspersed with names like
# "Input - Canon - Curve - Canon-Log".
# Moving the alias colorspace definitions to the end of the
# configuration avoids the above problem.
alias_colorspaces.append(
[reference_data, reference_data, reference_data.aliases])
print('')
if look_info:
print('Adding looks')
config_data['looks'] = []
for look in look_info:
add_look(config,
look,
custom_lut_dir,
reference_data.name,
config_data)
add_looks_to_views(look_info,
reference_data.name,
config_data,
multiple_displays)
print('')
if custom_output_info:
print('Adding custom output transforms')
for custom_output in custom_output_info:
add_custom_output(config,
custom_output,
custom_lut_dir,
reference_data,
config_data,
alias_colorspaces,
prefix)
print('')
print('Adding regular colorspaces')
for colorspace in sorted(config_data['colorSpaces'],
cmp=lambda x,y: cmp(x.family.lower(), y.family.lower())):
# Adding the colorspace *Family* into the name which helps with
# applications that present colorspaces as one a flat list.
if prefix:
prefixed_name = colorspace_prefixed_name(colorspace)
prefixed_names[colorspace.name] = prefixed_name
colorspace.name = prefixed_name
print('Creating new color space : %s' % colorspace.name)
description = colorspace.description
if colorspace.aces_transform_id:
description += (
'\n\nACES Transform ID : %s' % colorspace.aces_transform_id)
ocio_colorspace = ocio.ColorSpace(
name=colorspace.name,
bitDepth=colorspace.bit_depth,
description=description,
equalityGroup=colorspace.equality_group,
family=colorspace.family,
isData=colorspace.is_data,
allocation=colorspace.allocation_type,
allocationVars=colorspace.allocation_vars)
if colorspace.to_reference_transforms:
print('\tGenerating To-Reference transforms')
ocio_transform = create_ocio_transform(
colorspace.to_reference_transforms)
ocio_colorspace.setTransform(
ocio_transform,
ocio.Constants.COLORSPACE_DIR_TO_REFERENCE)
if colorspace.from_reference_transforms:
print('\tGenerating From-Reference transforms')
ocio_transform = create_ocio_transform(
colorspace.from_reference_transforms)
ocio_colorspace.setTransform(
ocio_transform,
ocio.Constants.COLORSPACE_DIR_FROM_REFERENCE)
config.addColorSpace(ocio_colorspace)
if aliases:
if colorspace.aliases:
# Deferring adding alias colorspaces until end, which helps
# with applications listing the colorspaces in the order that
# they were defined in the configuration.
alias_colorspaces.append(
[reference_data, colorspace, colorspace.aliases])
print('')
print('')
# Adding roles early so that alias colorspaces can be created
# with roles names before remaining colorspace aliases are added
# to the configuration.
print('Setting the roles')
if prefix:
set_config_roles(
config,
color_picking=prefixed_names[
config_data['roles']['color_picking']],
color_timing=prefixed_names[config_data['roles']['color_timing']],
compositing_log=prefixed_names[
config_data['roles']['compositing_log']],
data=prefixed_names[config_data['roles']['data']],
default=prefixed_names[config_data['roles']['default']],
matte_paint=prefixed_names[config_data['roles']['matte_paint']],
reference=prefixed_names[config_data['roles']['reference']],
scene_linear=prefixed_names[config_data['roles']['scene_linear']],
compositing_linear=prefixed_names[config_data['roles']['scene_linear']],
rendering=prefixed_names[config_data['roles']['scene_linear']],
texture_paint=prefixed_names[
config_data['roles']['texture_paint']])
# Add the aliased colorspaces for each role
for role_name, role_colorspace_name in config_data['roles'].iteritems():
role_colorspace_prefixed_name = prefixed_names[role_colorspace_name]
#print( 'Finding colorspace : %s' % role_colorspace_prefixed_name )
# Find the colorspace pointed to by the role
role_colorspaces = [colorspace
for colorspace in config_data['colorSpaces']
if colorspace.name == role_colorspace_prefixed_name]
role_colorspace = None
if len(role_colorspaces) > 0:
role_colorspace = role_colorspaces[0]
else:
if reference_data.name == role_colorspace_prefixed_name:
role_colorspace = reference_data
if role_colorspace:
# The alias colorspace shouldn't match the role name exactly
role_name_alias1 = "role_%s" % role_name
role_name_alias2 = "Role - %s" % role_name
print( 'Adding a role colorspace named %s, pointing to %s' % (
role_name_alias2, role_colorspace.name))
alias_colorspaces.append(
(reference_data, role_colorspace, [role_name_alias1]))
add_colorspace_aliases(
config, reference_data, role_colorspace, [role_name_alias2],
'Utility/Roles')
else:
set_config_roles(
config,
color_picking=config_data['roles']['color_picking'],
color_timing=config_data['roles']['color_timing'],
compositing_log=config_data['roles']['compositing_log'],
data=config_data['roles']['data'],
default=config_data['roles']['default'],
matte_paint=config_data['roles']['matte_paint'],
reference=config_data['roles']['reference'],
scene_linear=config_data['roles']['scene_linear'],
compositing_linear=config_data['roles']['scene_linear'],
rendering=config_data['roles']['scene_linear'],
texture_paint=config_data['roles']['texture_paint'])
# Add the aliased colorspaces for each role
for role_name, role_colorspace_name in config_data['roles'].iteritems():
# Find the colorspace pointed to by the role
role_colorspaces = [colorspace
for colorspace in config_data['colorSpaces']
if colorspace.name == role_colorspace_name]
role_colorspace = None
if len(role_colorspaces) > 0:
role_colorspace = role_colorspaces[0]
else:
if reference_data.name == role_colorspace_name:
role_colorspace = reference_data
if role_colorspace:
# The alias colorspace shouldn't match the role name exactly
role_name_alias1 = "role_%s" % role_name
role_name_alias2 = "Role - %s" % role_name
print('Adding a role colorspace named %s, pointing to %s' % (
role_name_alias2, role_colorspace.name))
alias_colorspaces.append(
(reference_data, role_colorspace, [role_name_alias1]))
add_colorspace_aliases(
config, reference_data, role_colorspace, [role_name_alias2],
'Utility/Roles')
print('')
# Adding alias colorspaces at the end as some applications use
# colorspaces definitions order of the configuration to order
# the colorspaces in their selection lists, some applications
# use alphabetical ordering.
# This should keep the alias colorspaces out of the way for applications
# using the configuration order.
print('Adding the alias colorspaces')
for reference, colorspace, aliases in alias_colorspaces:
add_colorspace_aliases(config, reference, colorspace, aliases,
'Utility/Aliases')
print('')
print('Adding the diplays and views')
# Setting the *color_picking* role to be the first *Display*'s
# *Output Transform* *View*.
default_display_name = config_data['defaultDisplay']
default_display_views = config_data['displays'][default_display_name]
default_display_colorspace = default_display_views['Output Transform']
# Defining *Displays* and *Views*.
displays, views = [], []
# Defining a generic *Display* and *View* setup.
if multiple_displays:
looks = config_data['looks'] if ('looks' in config_data) else []
looks = ', '.join(looks)
print('Creating multiple displays, with looks : %s' % looks)
# *Displays* are not reordered to put the *defaultDisplay* first
# because *OCIO* will order them alphabetically when the configuration
# is written to disk.
for display, view_list in config_data['displays'].iteritems():
for view_name, colorspace in view_list.iteritems():
config.addDisplay(display, view_name, colorspace.name, looks)
if 'Output Transform' in view_name and looks != '':
# *Views* without *Looks*.
config.addDisplay(display, view_name, colorspace.name)
# *Views* with *Looks*.
view_name_with_looks = '%s with %s' % (view_name, looks)
config.addDisplay(display, view_name_with_looks,
colorspace.name, looks)
else:
config.addDisplay(display, view_name, colorspace.name)
if not (view_name in views):
views.append(view_name)
displays.append(display)
# *Displays* and *Views* useful in a *GUI* context.
else:
single_display_name = 'ACES'
displays.append(single_display_name)
# Ensuring the *defaultDisplay* is first.
display_names = sorted(config_data['displays'])
display_names.insert(0, display_names.pop(
display_names.index(default_display_name)))
looks = config_data['looks'] if ('looks' in config_data) else []
look_names = ', '.join(looks)
displays_views_colorspaces = []
for display in display_names:
view_list = config_data['displays'][display]
for view_name, colorspace in view_list.iteritems():
if 'Output Transform' in view_name:
# We use the *Display* names as the *View* names in this
# case as there is a single *Display* containing all the
# *Views*.
# This works for more applications than not,as of the time
# of this implementation.
# Autodesk Maya 2016 doesn't support parentheses in
# *View* names.
sanitised_display = replace(display, {')': '', '(': ''})
# *View* with *Looks*.
if 'with' in view_name:
sanitised_display = '%s with %s' % (
sanitised_display, look_names)
views_with_looks_at_end = False
# Storing combo of *Display*, *View* and *Colorspace*
# name so they can be added to the end of the list.
if views_with_looks_at_end:
displays_views_colorspaces.append(
[single_display_name, sanitised_display,
colorspace.name])
else:
config.addDisplay(single_display_name,
sanitised_display,
colorspace.name)
if not (sanitised_display in views):
views.append(sanitised_display)
# *View* without *Looks*.
else:
config.addDisplay(single_display_name,
sanitised_display,
colorspace.name)
if not (sanitised_display in views):
views.append(sanitised_display)
# Adding to the configuration any *Display*, *View* combinations that
# were saved for later.
# This list should be empty unless `views_with_looks_at_end` is
# set `True` above.
for display_view_colorspace in displays_views_colorspaces:
single_display_name, sanitised_display, colorspace_name = (
display_view_colorspace)
config.addDisplay(single_display_name,
sanitised_display,
colorspace_name)
if not (sanitised_display in views):
views.append(sanitised_display)
raw_display_space_name = config_data['roles']['data']
log_display_space_name = config_data['roles']['compositing_log']
if prefix:
raw_display_space_name = prefixed_names[raw_display_space_name]
log_display_space_name = prefixed_names[log_display_space_name]
config.addDisplay(single_display_name, 'Raw', raw_display_space_name)
views.append('Raw')
config.addDisplay(single_display_name, 'Log', log_display_space_name)
views.append('Log')
config.setActiveDisplays(','.join(sorted(displays)))
config.setActiveViews(','.join(views))
print('')
# Ensuring the configuration is valid.
config.sanityCheck()
# Resetting colorspace names to their non-prefixed versions.
if prefix:
prefixed_names_inverse = {}
for original, prefixed in prefixed_names.iteritems():
prefixed_names_inverse[prefixed] = original
reference_data.name = prefixed_names_inverse[reference_data.name]
try:
for colorspace in config_data['colorSpaces']:
colorspace.name = prefixed_names_inverse[colorspace.name]
except:
print('Error with Prefixed names')
for original, prefixed in prefixed_names.iteritems():
print('%s, %s' % (original, prefixed))
print('\n')
print('Inverse Lookup of Prefixed names')
for prefixed, original in prefixed_names_inverse.iteritems():
print('%s, %s' % (prefixed, original))
raise
return config | 385 |
def parse_json_with_comments(pathlike):
"""
Parse a JSON file after removing any comments.
Comments can use either ``//`` for single-line
comments or or ``/* ... */`` for multi-line comments.
The input filepath can be a string or ``pathlib.Path``.
Parameters
----------
filename : str or os.PathLike
Path to the input JSON file either as a string
or as a ``pathlib.Path`` object.
Returns
-------
obj : dict
JSON object representing the input file.
Note
----
This code was adapted from:
https://web.archive.org/web/20150520154859/http://www.lifl.fr/~riquetd/parse-a-json-file-with-comments.html
"""
# Regular expression to identify comments
comment_re = re.compile(r'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
re.DOTALL | re.MULTILINE)
# if we passed in a string, convert it to a Path
if isinstance(pathlike, str):
pathlike = Path(pathlike)
with open(pathlike, 'r') as file_buff:
content = ''.join(file_buff.readlines())
# Looking for comments
match = comment_re.search(content)
while match:
# single line comment
content = content[:match.start()] + content[match.end():]
match = comment_re.search(content)
# Return JSON object
config = json.loads(content)
return config | 386 |
def main():
""" Main entry point
"""
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--betatest", dest="betatest", action="store_true",
help="If used, then do not update the symlinks with this version's entry points")
parser.add_argument("--python", dest="pythonbin",
help="In case you need a specific python to run the project, specify its path here")
parser.add_argument("-f", "--force", dest="force", action="store_true",
help="Force the reinstallation of a package as by default, if the directory exists, nothing is "
"done.")
options, args = parser.parse_known_args()
if not args:
print 'Specify at least the project name'
parser.print_help()
return
project = args[0]
install_proj = InstallProject(project)
if options.pythonbin:
install_proj.set_python(options.pythonbin)
if options.betatest:
install_proj.set_betatest()
if options.force:
install_proj.set_force()
# mandatory
install_proj.run() | 387 |
def _unpack_available_edges(avail, weight=None, G=None):
"""Helper to separate avail into edges and corresponding weights"""
if weight is None:
weight = "weight"
if isinstance(avail, dict):
avail_uv = list(avail.keys())
avail_w = list(avail.values())
else:
def _try_getitem(d):
try:
return d[weight]
except TypeError:
return d
avail_uv = [tup[0:2] for tup in avail]
avail_w = [1 if len(tup) == 2 else _try_getitem(tup[-1]) for tup in avail]
if G is not None:
# Edges already in the graph are filtered
flags = [not G.has_edge(u, v) for u, v in avail_uv]
avail_uv = list(it.compress(avail_uv, flags))
avail_w = list(it.compress(avail_w, flags))
return avail_uv, avail_w | 388 |
def triple_str_to_dict(clause):
"""
converts a triple (for a where_clause) in the form
<<#subj, pred_text, #obj/obj_text>>
to dictionary form. it assumed that one of the three entries is
replaced by a "?"
if the obj memid is fixed (as opposed to the obj_text),
use a "#" in front of the memid. subj_text is not a valid
possibility for the first entry of the triple; still, if a query uses
a fixed subj, it should be preceded with a "#".
the order is assumed to be subj, pred, obj.
examples:
"find me a record whose name is bob":
<< ?, has_name, bob >> --> {"pred_text": "has_name", "obj_text": "bob"}
"find me a record who is a friend of the entity with memid
dd2ca5a4c5204fc09c71279f8956a2b1":
<< ?, friend_of, #dd2ca5a4c5204fc09c71279f8956a2b1 >> -->
{"pred_text": "friend_of", "obj": "dd2ca5a4c5204fc09c71279f8956a2b1"}
"find me a record x for which the entity with memid
dd2ca5a4c5204fc09c71279f8956a2b1" is a parent_of x:
<< #dd2ca5a4c5204fc09c71279f8956a2b1, parent_of, ? >> -->
{"pred_text": "parent_of", "subj": "dd2ca5a4c5204fc09c71279f8956a2b1"}
commmas in obj text or subj text need to be escaped with \
"find me a record whose name is bob, the sailor":
<< ?, has_name, bob >> --> {"pred_text": "has_name", "obj_text": "bob\, the sailor"}
TODO:
This does not currently handle nested queries.
This does not currently handle multiple "?"
moar escapes?
"""
comma = uuid.uuid4().hex
clause = clause.replace("\,", comma)
terms = remove_enclosing_symbol(clause, ("<<", ">>")).split(",")
terms = [t.replace(comma, ",") for t in terms]
terms = [t.strip() for t in terms]
assert terms[1] and terms[1] != "?"
out = {"pred_text": terms[1]}
if terms[0] == "?":
if terms[2] == "?":
raise Exception(
"queries with both subj and obj unfixed in a triple are not yet supported"
)
assert terms[2] != "?"
if terms[2][0] == "#":
out["obj"] = terms[2][1:]
else:
out["obj_text"] = terms[2]
else:
if terms[0][0] == "#":
out["subj"] = terms[0][1:]
else:
raise Exception(
'queries with a "subj_text" (as opposed to subj memid) in a triple are not supported'
)
return out | 389 |
def test_check_auth(session): # pylint:disable=unused-argument
"""Assert that check_auth is working as expected."""
user = factory_user_model()
org = factory_org_model()
factory_membership_model(user.id, org.id)
entity = factory_entity_model()
factory_affiliation_model(entity.id, org.id)
# Test for staff role
check_auth({'realm_access': {'roles': ['staff']}, 'sub': str(user.keycloak_guid)}, one_of_roles=STAFF)
# Test for owner role
check_auth({'realm_access': {'roles': ['public']}, 'sub': str(user.keycloak_guid)}, one_of_roles=OWNER,
business_identifier=entity.business_identifier)
# Test for owner role with org id
check_auth({'realm_access': {'roles': ['public']}, 'sub': str(user.keycloak_guid)}, one_of_roles=OWNER,
org_id=org.id)
# Test for exception, check for auth if resource is available for STAFF users
with pytest.raises(HTTPException) as excinfo:
check_auth({'realm_access': {'roles': ['public']}, 'sub': str(user.keycloak_guid)}, one_of_roles=[STAFF],
business_identifier=entity.business_identifier)
assert excinfo.exception.code == 403
# Test auth where STAFF role is in disabled role list
with pytest.raises(HTTPException) as excinfo:
check_auth({'realm_access': {'roles': ['staff']}, 'sub': str(user.keycloak_guid)}, disabled_roles=[STAFF],
business_identifier=entity.business_identifier)
assert excinfo.exception.code == 403
# Test auth where STAFF role is exact match
with pytest.raises(HTTPException) as excinfo:
check_auth({'realm_access': {'roles': ['public']}, 'sub': str(user.keycloak_guid)}, equals_role=MEMBER,
business_identifier=entity.business_identifier)
assert excinfo.exception.code == 403
# Test auth where STAFF role is exact match
with pytest.raises(HTTPException) as excinfo:
check_auth({'realm_access': {'roles': ['public']}, 'sub': str(user.keycloak_guid)}, equals_role=MEMBER,
org_id=org.id)
assert excinfo.exception.code == 403 | 390 |
def test_add_edge_1():
"""
Test normal usage.
"""
gfa_graph = mod.GFAGraph()
gfa_graph.add_node('node1', 4, 'ACTG', tags={}, labels={})
gfa_graph.add_node('node2', 1000, '*', tags={}, labels={})
edge_name = 'edge1'
source, source_orient = 'node1', '+'
sink, sink_orient = 'node2', '+'
source_start, source_end = 4, 4
sink_start, sink_end = 0, 0
cigar = '*'
gfa_graph.add_edge(edge_name, source, source_orient, sink, sink_orient, source_start, source_end, sink_start, sink_end, cigar, tags={}, labels={})
assert(len(gfa_graph.edges.keys()) == 1) | 391 |
def reddit_data(subreddit, time_request = -9999):
"""
@brief function to retrieve the metadata of a gutenberg book given its ID
:param subreddit: the name of the subreddit
:param time_request: unix timestamp of when requested subreddit was generated
:return: a list of reddit objects with the data of the posts
"""
base_url = get_reddit_url()
url = f"{base_url}/cache?subreddit={subreddit}&time_resquest={time_request}"
content = server_request(url)
data = json.loads(content.decode("utf-8"))
reddit_posts = []
for n in data:
post = reddit.reddit
post.id = data[n]["id"]
post.title = data[n]["title"]
post.author = data[n]["author"]
post.score = int(data[n]["score"])
post.vote_ratio = int(data[n]["vote_ratio"])
post.comment_count = int(data[n]["comment_count"])
post.subreddit = data[n]["subreddit"]
post.post_time = int(data[n]["post_time"])
post.url = data[n]["url"]
post.text = data[n]["text"]
reddit_posts.append(post)
return reddit_posts | 392 |
def main():
""" COMMANDS MANAGER / SWITCH PANEL """
args: dict = demisto.args()
params: dict = demisto.params()
self_deployed: bool = params.get('self_deployed', False)
tenant_id: str = params.get('tenant_id', '')
auth_and_token_url: str = params.get('auth_id', '')
enc_key: str = params.get('enc_key', '')
base_url: str = urljoin(params.get('url', ''), '/v1.0')
app_name: str = 'ms-graph-mail'
ok_codes: tuple = (200, 201, 202, 204)
use_ssl: bool = not params.get('insecure', False)
proxy: bool = params.get('proxy', False)
# params related to mailbox to fetch incidents
mailbox_to_fetch = params.get('mailbox_to_fetch', '')
folder_to_fetch = params.get('folder_to_fetch', 'Inbox')
first_fetch_interval = params.get('first_fetch', '15 minutes')
emails_fetch_limit = int(params.get('fetch_limit', '50'))
timeout = arg_to_number(params.get('timeout', '10') or '10')
client: MsGraphClient = MsGraphClient(self_deployed, tenant_id, auth_and_token_url, enc_key, app_name, base_url,
use_ssl, proxy, ok_codes, mailbox_to_fetch, folder_to_fetch,
first_fetch_interval, emails_fetch_limit, timeout)
command = demisto.command()
LOG(f'Command being called is {command}')
try:
if command == 'test-module':
client.ms_client.get_access_token()
demisto.results('ok')
if command == 'fetch-incidents':
next_run, incidents = client.fetch_incidents(demisto.getLastRun())
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif command in ('msgraph-mail-list-emails', 'msgraph-mail-search-email'):
list_mails_command(client, args)
elif command == 'msgraph-mail-get-email':
get_message_command(client, args)
elif command == 'msgraph-mail-delete-email':
delete_mail_command(client, args)
elif command == 'msgraph-mail-list-attachments':
list_attachments_command(client, args)
elif command == 'msgraph-mail-get-attachment':
get_attachment_command(client, args)
elif command == 'msgraph-mail-list-folders':
list_folders_command(client, args)
elif command == 'msgraph-mail-list-child-folders':
list_child_folders_command(client, args)
elif command == 'msgraph-mail-create-folder':
create_folder_command(client, args)
elif command == 'msgraph-mail-update-folder':
update_folder_command(client, args)
elif command == 'msgraph-mail-delete-folder':
delete_folder_command(client, args)
elif command == 'msgraph-mail-move-email':
move_email_command(client, args)
elif command == 'msgraph-mail-get-email-as-eml':
get_email_as_eml_command(client, args)
elif command == 'msgraph-mail-create-draft':
create_draft_command(client, args)
elif command == 'msgraph-mail-reply-to':
reply_to_command(client, args) # pylint: disable=E1123
elif command == 'msgraph-mail-send-draft':
send_draft_command(client, args) # pylint: disable=E1123
elif command == 'send-mail':
send_email_command(client, args)
elif command == 'reply-mail':
return_results(reply_email_command(client, args))
# Log exceptions
except Exception as e:
return_error(str(e)) | 393 |
def rename_var(fname):
""" Rename defined variables in HDF5 file. """
with h5py.File(fname) as f:
f['bs'] = f['bs_ice1']
f['lew'] = f['lew_ice2']
f['tes'] = f['tes_ice2']
del f['bs_ice1']
del f['lew_ice2']
del f['tes_ice2'] | 394 |
def write_velocity_files(U_25_RHS_str, U_50_RHS_str, U_100_RHS_str, U_125_RHS_str, U_150_RHS_str, U_25_LHS_str, U_50_LHS_str, U_100_LHS_str, U_125_LHS_str, U_150_LHS_str, path_0_100, path_0_125, path_0_150, path_0_25, path_0_50):
"""Create the details file for the surrounding cases, and write the velocities in line two"""
fname = "details" # Filename
file_25_path = path_0_25
file_50_path = path_0_50
file_100_path = path_0_100
file_125_path = path_0_125
file_150_path = path_0_150
details_file_25 = file_25_path + fname
details_file_50 = file_50_path + fname
details_file_100 = file_100_path + fname
details_file_125 = file_125_path + fname
details_file_150 = file_150_path + fname
with open(details_file_25, 'w+') as f:
f.write('Velocity' +'\n')
f.write(U_25_RHS_str)
with open(details_file_50, 'w+') as f:
f.write('Velocity' +'\n')
f.write(U_50_RHS_str)
with open(details_file_100, 'w+') as f:
f.write('Velocity' +'\n')
f.write(U_100_RHS_str)
with open(details_file_125, 'w+') as f:
f.write('Velocity' +'\n')
f.write(U_125_RHS_str)
with open(details_file_150, 'w+') as f:
f.write('Velocity' +'\n')
f.write(U_150_RHS_str)
return details_file_25, details_file_50, details_file_100, details_file_125, details_file_150 | 395 |
def traceback_to_server(client):
"""
Send all traceback children of Exception to sentry
"""
def excepthook(exctype, value, traceback):
if issubclass(exctype, Exception):
client.captureException(exc_info=(exctype, value, traceback))
sys.__excepthook__(exctype, value, traceback)
sys.excepthook = excepthook | 396 |
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existance of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if contrib_framework.is_tensor(x) or (y is not None and
contrib_framework.is_tensor(y)):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.') | 397 |
def datetime_to_bytes(value):
"""Return bytes representing UTC time in microseconds."""
return pack('>Q', int(value.timestamp() * 1e6)) | 398 |
def test_tfenv_run_no_version_file(
cd_tmp_path: Path, caplog: LogCaptureFixture
) -> None:
"""Test ``runway tfenv run -- --help`` no version file."""
caplog.set_level(logging.ERROR, logger="runway")
runner = CliRunner()
result = runner.invoke(cli, ["tfenv", "run", "--", "--help"])
assert result.exit_code == 1
assert "unable to find a .terraform-version file" in "\n".join(caplog.messages) | 399 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.