content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def _center_size_bbox_to_corners_bbox(centers, sizes):
"""Converts bbox center-size representation to corners representation.
Args:
centers: a tensor with shape [N, 2] representing bounding box centers
sizes: a tensor with shape [N, 2] representing bounding boxes
Returns:
corners: tensor with shape [N, 4] representing bounding boxes in corners
representation
"""
return tf.concat([centers - .5 * sizes, centers + .5 * sizes], 1) | 5,358,300 |
def get_starting_month(number_of_months_to_get,
include_actual_month=True,
actual_date=datetime.datetime.now()):
"""
Get starting month based on parameters
:param number_of_months_to_get: Numbers of months to get - e.g: 2
:param include_actual_month: Include actual month? e.g.: True
:param actual_date: Actual Date e.g: now()
:return: :raise Exception: if number_of_months_to_get less than 1
Initial month & year e.g: (12,2014)
"""
if number_of_months_to_get <= 0:
raise Exception("Number of month's to get should be greater than 0")
initial_year = actual_date.year
if actual_date.month > number_of_months_to_get:
initial_month = actual_date.month - number_of_months_to_get
else:
initial_month = actual_date.month - number_of_months_to_get
if initial_month <= 0:
initial_month += 12
initial_year -= 1
if include_actual_month:
initial_month += 1
if initial_month > 12:
initial_month = 1
initial_year += 1
return initial_month, initial_year | 5,358,301 |
def signal_handler(sig, frame):
""" Suppress stack traces when intentionally closed """
print("SIGINT or Control-C detected... exiting...")
sys.exit(0) | 5,358,302 |
def main() -> None:
"""Main function entrypoint."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--load_module",
type=str,
default="./complainers",
help="A local python module or just a folder containing all complainers. "
"The difference is that the module must contain a '__init__.py' file inside it. "
"The module needs to supply all complainers via `from $load_module import *`.",
)
parser.add_argument(
"--analyze_dir",
type=str,
default=".",
help="The directory to run all globs and issue complaints about.",
)
parser.add_argument(
"-n",
type=int,
default=5,
help="The number of lines before and after an error to show in context.",
)
parser.add_argument(
"--inline",
action="store_true",
help="Enable this option with zero chosen lines ('-n=0') to show error inline.",
)
parser.add_argument(
"--staged",
action="store_true",
help="Only glob files that are staged for git commit.",
)
parser.add_argument(
"--include_untracked",
action="store_true",
help="Also include untracked files from git in glob.",
)
args = parser.parse_args()
args.analyze_dir = Path(args.analyze_dir).absolute()
if not args.analyze_dir.is_dir():
raise ValueError(f"{args.analyze_dir} is not a directory.")
load_module_path = Path(args.load_module).relative_to(".")
analyze_dir = Path(args.analyze_dir).absolute()
context_nb_lines = max(int(args.n), 0)
# Handle some basic tests
if load_module_path == Path("."):
raise ValueError(f"load_module should be a subdirectory, not the current path.")
if not load_module_path.is_dir():
raise ValueError(f"{load_module_path} is not a directory.")
# Get all complainers
all_complainers: List[Complainer] = []
# Check for an __init__.py
IS_MODULE = (load_module_path / "__init__.py").is_file()
# get complainers by loading a module with an __init__.py
if IS_MODULE:
# Get the relative module name
load_module = str(load_module_path).replace(os.sep, ".")
# Load the complainers within the module
mod = importlib.import_module(load_module)
for _name, obj in inspect.getmembers(mod, inspect.isclass):
if issubclass(obj, Complainer) and obj != Complainer:
# Initialize the item and add it to all complainers
all_complainers.append(obj())
# get complainers by loading a list of files in a directory
else:
# For all files in the target folder.
for file1 in load_module_path.iterdir():
# If file starts from letter and ends with .py
if file1.is_file() and file1.suffix == ".py":
# Import each file as a module from it's full path.
spec = importlib.util.spec_from_file_location(
".", load_module_path.absolute() / file1.name
)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod) # type: ignore
# For each object definition that is a class.
for _name, obj in inspect.getmembers(mod, inspect.isclass):
if issubclass(obj, Complainer) and obj != Complainer:
all_complainers.append(obj())
if not all_complainers:
raise ValueError(
f"No Complainers found in module from {load_module_path.absolute()}."
)
print(color_txt("Found Complainers:", BColors.OKGREEN))
for c in all_complainers:
print(
color_txt(f" - {type(c).__module__}.{type(c).__name__}", BColors.OKGREEN)
)
print(color_txt(f"Running renag analyzer on '{analyze_dir}'..", BColors.OKGREEN))
# Get all the captures and globs of all complainers
all_captures_files: Dict[Path, Set[ParserElement]] = defaultdict(set)
capture_to_complainer: Dict[ParserElement, List[Complainer]] = defaultdict(list)
complainer_to_files: Dict[Complainer, Set[Path]] = defaultdict(set)
for complainer in all_complainers:
# Make sure that glob is not an empty list
if not complainer.glob:
raise ValueError(f"Empty glob inside {complainer}: {complainer.glob}")
# Avoid later issue with complainer.capture being empty for the 'Regex' from pyparsing.
# Note: Has to do it this early, because below we start mapping it to the complainers by capture.
if isinstance(complainer.capture, str) and not complainer.capture:
complainer.capture = Empty()
elif isinstance(complainer.capture, str):
complainer.capture = Regex(
complainer.capture, flags=complainer.regex_options
)
# Map the capture to all complainers
capture_to_complainer[complainer.capture].append(complainer)
# Get all the files to analyze
all_files: Set[Path] = set()
for g in complainer.glob:
if not g:
raise ValueError(
f"Empty glob value inside {complainer} ({complainer.glob}): {g}"
)
all_files |= set(analyze_dir.rglob(g))
if complainer.exclude_glob:
for g in complainer.exclude_glob:
if not g:
raise ValueError(
f"Empty exclude glob value inside {complainer} ({complainer.exclude_glob}): {g}"
)
all_files -= set(analyze_dir.rglob(g))
# Add all files and captures to the dicts
for file1 in all_files:
all_captures_files[file1].add(complainer.capture)
complainer_to_files[complainer].add(file1)
# Get git repo information
try:
repo = git.Repo()
except: # noqa: E722 I don't know what this might return if there isn't a git repo
staged_files: Set[Path] = set()
untracked_files: Set[Path] = set()
else:
if args.staged:
staged_files_diffs = repo.index.diff("HEAD")
staged_files = {
Path(repo.working_tree_dir) / diff.b_path for diff in staged_files_diffs
}
else:
staged_files = set()
untracked_files = {Path(path).absolute() for path in repo.untracked_files}
# Iterate over all captures and globs
N_WARNINGS, N_CRITICAL = 0, 0
for file2, captures in all_captures_files.items():
# Check if file is staged for git commit if args.git is true
if args.staged and file2 not in staged_files:
continue
# Check if file is untracked if we are in a git repo
if (not args.include_untracked) and (file2.absolute() in untracked_files):
continue
# Open the file
with file2.open("r") as f2:
try:
txt: str = f2.read()
except UnicodeDecodeError:
continue
# Get the or of all captures
# Then Iterate over all captures
for capture in captures:
# Then Get all matches in the file
for match, start, stop in capture.scanString(txt):
# Then iterate over all complainers
for complainer in capture_to_complainer[capture]:
# Skip if this file is not specifically globbed by this complainer
if file2 not in complainer_to_files[complainer]:
continue
complaints = complainer.check(
txt=txt,
capture_span=(start, stop),
path=file2,
capture_data=match,
)
for complaint in complaints:
if complaint.severity is Severity.CRITICAL:
N_CRITICAL += 1
else:
N_WARNINGS += 1
print(
complaint.pformat(
context_nb_lines=context_nb_lines,
inline_mode=args.inline,
),
end="\n\n",
)
# In the end, we try to call .finalize() on each complainer. Its purpose is
# to allow for complainers to have methods that will be called once, in the end.
for complainer in all_complainers:
if not hasattr(complainer, "finalize"):
continue
complaints = complainer.finalize()
for complaint in complaints:
if complaint.severity == Severity.CRITICAL:
N_CRITICAL += 1
else:
N_WARNINGS += 1
print(
complaint.pformat(
context_nb_lines=context_nb_lines, inline_mode=args.inline
),
end="\n\n",
)
# End by exiting the program
N = N_WARNINGS + N_CRITICAL
if not N:
print(color_txt("Renag finished with no complaints.", BColors.OKGREEN))
exit(0)
print(
color_txt(
f"{N} Complaints found: {N_WARNINGS} Warnings, {N_CRITICAL} Critical.",
BColors.WARNING,
)
)
# If has critical errors - exit with non-zero code..
if N_CRITICAL != 0:
exit(1)
# ..else quit early.
exit(0) | 5,358,303 |
def cvt_raise_stmt(node: pytree.Base, ctx: Ctx) -> ast_cooked.Base:
"""raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]]"""
# 0 1 2 3 2 3 4 5
#-# Raise(expr? exc, expr? cause)
assert ctx.is_REF, [node]
if len(node.children) == 1:
return ast_cooked.RaiseStmt(items=[])
exc = cvt(node.children[1], ctx)
if len(node.children) > 2:
# TODO: test case
if xcast(Leaf, node.children[2]).value == 'from':
raise_from = cvt(node.children[3], ctx)
exc2 = ast_cooked.OMITTED_NODE
exc3 = ast_cooked.OMITTED_NODE
else:
raise_from = ast_cooked.OMITTED_NODE
assert node.children[2].type == token.COMMA, [node]
exc2 = cvt(node.children[3], ctx)
# TODO: test case
if len(node.children) > 4:
assert node.children[4].type == token.COMMA, [node]
exc3 = cvt(node.children[5], ctx)
else:
exc3 = ast_cooked.OMITTED_NODE
else:
raise_from = ast_cooked.OMITTED_NODE
exc2 = ast_cooked.OMITTED_NODE
exc3 = ast_cooked.OMITTED_NODE
return ast_cooked.RaiseStmt(items=[exc, exc2, exc3, raise_from]) | 5,358,304 |
def user_in_user_groups(user_id, **options):
"""
Get all user groups a user belongs to
:param user_id: The id of user
:param user_id: str
:param options: Generic advanced options dict, see online documentation
:type options: dict, optional
:return: List of groups user is in
:rtype: dict
"""
uri = [USER_GROUPS_SUB_PATH, user_id]
return _call_account_api("get", uri, {}, **options) | 5,358,305 |
def read_book(title_path):
"""Read a book and return it as a string"""
with open(title_path, "r", encoding = "utf8") as current_file: #encoding = "utf8" causes a problem when running the code in Python 2.7. However, it runs normally when using Python 3.5.
text = current_file.read()
text = text.replace("\n","").replace("\r","")
return text | 5,358,306 |
def count_number(file_name, number):
"""
统计一个大文件里指定 byte 的个数,为了跑在不同进程上所以没有优化
"""
print('searching %s:(%s)' % (number, getpid()))
with open(file_name, 'rb') as f:
count = 0
while True:
data = f.read(1024)
if not data:
break
for d in data:
if d == number:
count += 1
print('found %s=%s (%s)' % (number, count, getpid())) | 5,358,307 |
def eval_whole_scene_one_epoch(sess, ops, test_writer):
""" ops: dict mapping from string to tf ops """
global EPOCH_CNT
is_training = False
test_idxs = np.arange(0, len(TEST_DATASET_WHOLE_SCENE))
num_batches = len(TEST_DATASET_WHOLE_SCENE)
total_correct = 0
total_seen = 0
loss_sum = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
total_correct_vox = 0
total_seen_vox = 0
total_seen_class_vox = [0 for _ in range(NUM_CLASSES)]
total_correct_class_vox = [0 for _ in range(NUM_CLASSES)]
log_string(str(datetime.now()))
log_string('---- EPOCH %03d EVALUATION WHOLE SCENE----'%(EPOCH_CNT))
labelweights = np.zeros(21)
labelweights_vox = np.zeros(21)
is_continue_batch = False
extra_batch_data = np.zeros((0,NUM_POINT,3))
extra_batch_label = np.zeros((0,NUM_POINT))
extra_batch_smpw = np.zeros((0,NUM_POINT))
for batch_idx in range(num_batches):
if not is_continue_batch:
batch_data, batch_label, batch_smpw = TEST_DATASET_WHOLE_SCENE[batch_idx]
batch_data = np.concatenate((batch_data,extra_batch_data),axis=0)
batch_label = np.concatenate((batch_label,extra_batch_label),axis=0)
batch_smpw = np.concatenate((batch_smpw,extra_batch_smpw),axis=0)
else:
batch_data_tmp, batch_label_tmp, batch_smpw_tmp = TEST_DATASET_WHOLE_SCENE[batch_idx]
batch_data = np.concatenate((batch_data,batch_data_tmp),axis=0)
batch_label = np.concatenate((batch_label,batch_label_tmp),axis=0)
batch_smpw = np.concatenate((batch_smpw,batch_smpw_tmp),axis=0)
if batch_data.shape[0]<BATCH_SIZE:
is_continue_batch = True
continue
elif batch_data.shape[0]==BATCH_SIZE:
is_continue_batch = False
extra_batch_data = np.zeros((0,NUM_POINT,3))
extra_batch_label = np.zeros((0,NUM_POINT))
extra_batch_smpw = np.zeros((0,NUM_POINT))
else:
is_continue_batch = False
extra_batch_data = batch_data[BATCH_SIZE:,:,:]
extra_batch_label = batch_label[BATCH_SIZE:,:]
extra_batch_smpw = batch_smpw[BATCH_SIZE:,:]
batch_data = batch_data[:BATCH_SIZE,:,:]
batch_label = batch_label[:BATCH_SIZE,:]
batch_smpw = batch_smpw[:BATCH_SIZE,:]
aug_data = batch_data
feed_dict = {ops['pointclouds_pl']: aug_data,
ops['labels_pl']: batch_label,
ops['smpws_pl']: batch_smpw,
ops['is_training_pl']: is_training}
summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['loss'], ops['pred']], feed_dict=feed_dict)
test_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 2) # BxN
correct = np.sum((pred_val == batch_label) & (batch_label>0) & (batch_smpw>0)) # evaluate only on 20 categories but not unknown
total_correct += correct
total_seen += np.sum((batch_label>0) & (batch_smpw>0))
loss_sum += loss_val
tmp,_ = np.histogram(batch_label,range(22))
labelweights += tmp
for l in range(NUM_CLASSES):
total_seen_class[l] += np.sum((batch_label==l) & (batch_smpw>0))
total_correct_class[l] += np.sum((pred_val==l) & (batch_label==l) & (batch_smpw>0))
for b in range(batch_label.shape[0]):
_, uvlabel, _ = pc_util.point_cloud_label_to_surface_voxel_label_fast(aug_data[b,batch_smpw[b,:]>0,:], np.concatenate((np.expand_dims(batch_label[b,batch_smpw[b,:]>0],1),np.expand_dims(pred_val[b,batch_smpw[b,:]>0],1)),axis=1), res=0.02)
total_correct_vox += np.sum((uvlabel[:,0]==uvlabel[:,1])&(uvlabel[:,0]>0))
total_seen_vox += np.sum(uvlabel[:,0]>0)
tmp,_ = np.histogram(uvlabel[:,0],range(22))
labelweights_vox += tmp
for l in range(NUM_CLASSES):
total_seen_class_vox[l] += np.sum(uvlabel[:,0]==l)
total_correct_class_vox[l] += np.sum((uvlabel[:,0]==l) & (uvlabel[:,1]==l))
log_string('eval whole scene mean loss: %f' % (loss_sum / float(num_batches)))
log_string('eval whole scene point accuracy vox: %f'% (total_correct_vox / float(total_seen_vox)))
log_string('eval whole scene point avg class acc vox: %f' % (np.mean(np.array(total_correct_class_vox[1:])/(np.array(total_seen_class_vox[1:],dtype=np.float)+1e-6))))
log_string('eval whole scene point accuracy: %f'% (total_correct / float(total_seen)))
log_string('eval whole scene point avg class acc: %f' % (np.mean(np.array(total_correct_class[1:])/(np.array(total_seen_class[1:],dtype=np.float)+1e-6))))
labelweights = labelweights[1:].astype(np.float32)/np.sum(labelweights[1:].astype(np.float32))
labelweights_vox = labelweights_vox[1:].astype(np.float32)/np.sum(labelweights_vox[1:].astype(np.float32))
caliweights = np.array([0.388,0.357,0.038,0.033,0.017,0.02,0.016,0.025,0.002,0.002,0.002,0.007,0.006,0.022,0.004,0.0004,0.003,0.002,0.024,0.029])
caliacc = np.average(np.array(total_correct_class_vox[1:])/(np.array(total_seen_class_vox[1:],dtype=np.float)+1e-6),weights=caliweights)
log_string('eval whole scene point calibrated average acc vox: %f' % caliacc)
per_class_str = 'vox based --------'
for l in range(1,NUM_CLASSES):
per_class_str += 'class %d weight: %f, acc: %f; ' % (l,labelweights_vox[l-1],total_correct_class_vox[l]/float(total_seen_class_vox[l]))
log_string(per_class_str)
EPOCH_CNT += 1
return caliacc | 5,358,308 |
def _create_save_name(save_path: str, case_date: date, field_names: list, fix: str = "") -> str:
"""Creates file name for saved images."""
date_string = case_date.strftime("%Y%m%d")
return f"{save_path}{date_string}_{'_'.join(field_names)}{fix}.png" | 5,358,309 |
def print_document(update: Update, context: CallbackContext) -> None:
"""Don't print received document"""
name, permission_to_print = user_info(update.message.from_user)
if not permission_to_print:
update.message.reply_text("You are not allowed to print, request permission with /start")
return
logging.info("Document received from {}".format(name))
update.message.reply_text("How about no. Print your own documents!") | 5,358,310 |
def list_favorite_queries():
"""List of all favorite queries.
Returns (title, rows, headers, status)"""
headers = ["Name", "Query"]
rows = [(r, favoritequeries.get(r)) for r in favoritequeries.list()]
if not rows:
status = '\nNo favorite queries found.' + favoritequeries.usage
else:
status = ''
return [('', rows, headers, status)] | 5,358,311 |
def random_portfolio_weights(weights_count) -> np.array:
""" Random portfolio weights, of length weights_count. """
weights = np.random.random((weights_count, 1))
weights /= np.sum(weights)
return weights.reshape(-1, 1) | 5,358,312 |
def matrix2list(mat):
"""Create list of lists from blender Matrix type."""
return list(map(list, list(mat))) | 5,358,313 |
def convert_handle(handle):
"""
Takes string handle such as 1: or 10:1 and creates a binary number accepted
by the kernel Traffic Control.
"""
if isinstance(handle, str):
major, minor = handle.split(':') # "major:minor"
minor = minor if minor else '0'
return int(major, 16) << 16 | int(minor, 16)
return handle | 5,358,314 |
def list_canned_image_help(scripts_path, fullpath):
"""
List the help and params in the specified canned image.
"""
found = False
with open(fullpath) as wks:
for line in wks:
if not found:
idx = line.find("long-description:")
if idx != -1:
print()
print(line[idx + len("long-description:"):].strip())
found = True
continue
if not line.strip():
break
idx = line.find("#")
if idx != -1:
print(line[idx + len("#:"):].rstrip())
else:
break | 5,358,315 |
def index(request):
"""Display start page"""
return HttpResponseRedirect(reverse('admin:index')) | 5,358,316 |
async def check_data(user_input, hass, own_id=None):
"""Check validity of the provided date."""
ret = {}
if(CONF_ICS_URL in user_input):
try:
cal_string = await async_load_data(hass, user_input[CONF_ICS_URL])
try:
Calendar.from_ical(cal_string)
except Exception:
_LOGGER.error(traceback.format_exc())
ret["base"] = ERROR_ICS
return ret
except Exception:
_LOGGER.error(traceback.format_exc())
ret["base"] = ERROR_URL
return ret
if(CONF_TIMEFORMAT in user_input):
try:
datetime.datetime.now(get_localzone()).strftime(user_input[CONF_TIMEFORMAT])
except Exception:
_LOGGER.error(traceback.format_exc())
ret["base"] = ERROR_TIMEFORMAT
return ret
if(CONF_ID in user_input):
if(user_input[CONF_ID] < 0):
_LOGGER.error("ICS: ID below zero")
ret["base"] = ERROR_SMALL_ID
return ret
if(CONF_LOOKAHEAD in user_input):
if(user_input[CONF_LOOKAHEAD] < 1):
_LOGGER.error("ICS: Lookahead < 1")
ret["base"] = ERROR_SMALL_LOOKAHEAD
return ret
if(CONF_ID in user_input):
if((own_id != user_input[CONF_ID]) and (hass is not None)):
if(async_generate_entity_id(ENTITY_ID_FORMAT, "ics_" + str(user_input[CONF_ID]), hass=hass) != PLATFORM + ".ics_" + str(user_input[CONF_ID])):
_LOGGER.error("ICS: ID not unique")
ret["base"] = ERROR_ID_NOT_UNIQUE
return ret
if(CONF_N_SKIP in user_input):
if(user_input[CONF_N_SKIP] < 0):
_LOGGER.error("ICS: Skip below zero")
ret["base"] = ERROR_NEGATIVE_SKIP
return ret
return ret | 5,358,317 |
def write_mllr(fout, Ws, Hs=None):
"""
Write out MLLR transformations of the means in the format that
Sphinx3 understands.
@param Ws: MLLR transformations of means, one per feature stream
@ptype Ws: list(numpy.ndarray)
@param Hs: MLLR transformations of variances, one per feature stream
@ptype Hs: list(numpy.ndarray)
@param fout: Filename or filehandle to write to.
@ptype fout: string or file
"""
if isinstance(fout, file):
fh = fout
else:
fh = file(fout, 'w')
# One-class MLLR for now
fh.write("%d\n" % 1)
fh.write("%d\n" % len(Ws))
for i,W in enumerate(Ws):
fh.write("%d\n" % W.shape[0])
# Write rotation and bias terms separately
for w in W:
for x in w[1:]:
fh.write("%f " % x)
fh.write("\n")
for x in W[:,0]:
fh.write("%f " % x)
fh.write("\n")
if Hs != None:
for x in Hs[i]:
fh.write("%f " % x)
fh.write("\n")
else:
fh.write("1.0 " * W.shape[0])
fh.write("\n") | 5,358,318 |
def run(
uri,
entry_point="main",
version=None,
parameters=None,
docker_args=None,
experiment_name=None,
experiment_id=None,
backend="local",
backend_config=None,
use_conda=None,
storage_dir=None,
synchronous=True,
run_id=None,
run_name=None,
env_manager=None,
):
"""
Run an MLflow project. The project can be local or stored at a Git URI.
MLflow provides built-in support for running projects locally or remotely on a Databricks or
Kubernetes cluster. You can also run projects against other targets by installing an appropriate
third-party plugin. See `Community Plugins <../plugins.html#community-plugins>`_ for more
information.
For information on using this method in chained workflows, see `Building Multistep Workflows
<../projects.html#building-multistep-workflows>`_.
:raises: :py:class:`mlflow.exceptions.ExecutionException` If a run launched in blocking mode
is unsuccessful.
:param uri: URI of project to run. A local filesystem path
or a Git repository URI (e.g. https://github.com/mlflow/mlflow-example)
pointing to a project directory containing an MLproject file.
:param entry_point: Entry point to run within the project. If no entry point with the specified
name is found, runs the project file ``entry_point`` as a script,
using "python" to run ``.py`` files and the default shell (specified by
environment variable ``$SHELL``) to run ``.sh`` files.
:param version: For Git-based projects, either a commit hash or a branch name.
:param parameters: Parameters (dictionary) for the entry point command.
:param docker_args: Arguments (dictionary) for the docker command.
:param experiment_name: Name of experiment under which to launch the run.
:param experiment_id: ID of experiment under which to launch the run.
:param backend: Execution backend for the run: MLflow provides built-in support for "local",
"databricks", and "kubernetes" (experimental) backends. If running against
Databricks, will run against a Databricks workspace determined as follows:
if a Databricks tracking URI of the form ``databricks://profile`` has been set
(e.g. by setting the MLFLOW_TRACKING_URI environment variable), will run
against the workspace specified by <profile>. Otherwise, runs against the
workspace specified by the default Databricks CLI profile.
:param backend_config: A dictionary, or a path to a JSON file (must end in '.json'), which will
be passed as config to the backend. The exact content which should be
provided is different for each execution backend and is documented
at https://www.mlflow.org/docs/latest/projects.html.
:param use_conda: This argument is deprecated. Use `env_manager='local'` instead.
If True (the default), create a new Conda environment for the run and
install project dependencies within that environment. Otherwise, run the
project in the current environment without installing any project
dependencies.
:param storage_dir: Used only if ``backend`` is "local". MLflow downloads artifacts from
distributed URIs passed to parameters of type ``path`` to subdirectories of
``storage_dir``.
:param synchronous: Whether to block while waiting for a run to complete. Defaults to True.
Note that if ``synchronous`` is False and ``backend`` is "local", this
method will return, but the current process will block when exiting until
the local run completes. If the current process is interrupted, any
asynchronous runs launched via this method will be terminated. If
``synchronous`` is True and the run fails, the current process will
error out as well.
:param run_id: Note: this argument is used internally by the MLflow project APIs and should
not be specified. If specified, the run ID will be used instead of
creating a new run.
:param run_name: The name to give the MLflow Run associated with the project execution.
If ``None``, the MLflow Run name is left unset.
:param env_manager: Specify an environment manager to create a new environment for the run and
install project dependencies within that environment. The following values
are suppported:
- local: use the local environment
- conda: use conda
- virtualenv: use virtualenv (and pyenv for Python version management)
If unspecified, default to conda.
:return: :py:class:`mlflow.projects.SubmittedRun` exposing information (e.g. run ID)
about the launched run.
.. code-block:: python
:caption: Example
import mlflow
project_uri = "https://github.com/mlflow/mlflow-example"
params = {"alpha": 0.5, "l1_ratio": 0.01}
# Run MLflow project and create a reproducible conda environment
# on a local host
mlflow.run(project_uri, parameters=params)
.. code-block:: text
:caption: Output
...
...
Elasticnet model (alpha=0.500000, l1_ratio=0.010000):
RMSE: 0.788347345611717
MAE: 0.6155576449938276
R2: 0.19729662005412607
... mlflow.projects: === Run (ID '6a5109febe5e4a549461e149590d0a7c') succeeded ===
"""
backend_config_dict = backend_config if backend_config is not None else {}
if (
backend_config
and type(backend_config) != dict
and os.path.splitext(backend_config)[-1] == ".json"
):
with open(backend_config, "r") as handle:
try:
backend_config_dict = json.load(handle)
except ValueError:
_logger.error(
"Error when attempting to load and parse JSON cluster spec from file %s",
backend_config,
)
raise
if use_conda is not None and env_manager is not None:
raise MlflowException.invalid_parameter_value(
"`use_conda` cannot be used with `env_manager`"
)
elif use_conda is not None:
warnings.warn(
"`use_conda` is deprecated and will be removed in a future release. "
"Use `env_manager=local` instead",
FutureWarning,
stacklevel=2,
)
env_manager = _EnvManager.CONDA if use_conda else _EnvManager.LOCAL
elif env_manager is not None:
_EnvManager.validate(env_manager)
if backend == "databricks":
mlflow.projects.databricks.before_run_validations(mlflow.get_tracking_uri(), backend_config)
elif backend == "local" and run_id is not None:
backend_config_dict[MLFLOW_LOCAL_BACKEND_RUN_ID_CONFIG] = run_id
experiment_id = _resolve_experiment_id(
experiment_name=experiment_name, experiment_id=experiment_id
)
submitted_run_obj = _run(
uri=uri,
experiment_id=experiment_id,
entry_point=entry_point,
version=version,
parameters=parameters,
docker_args=docker_args,
backend_name=backend,
backend_config=backend_config_dict,
env_manager=env_manager,
storage_dir=storage_dir,
synchronous=synchronous,
run_name=run_name,
)
if synchronous:
_wait_for(submitted_run_obj)
return submitted_run_obj | 5,358,319 |
def general_search_v2(params, sed_mod, lnprior, Alambda,
sed_obs, sed_obs_err=0.1,
vpi_obs=None, vpi_obs_err=None,
Lvpi=1.0, Lprior=1.0,
cost_order=2, av_llim=-0.001, debug=False):
"""
when p = [teff, logg, [M/H], Av, DM], theta = [teff, logg, [M/H]],
given a set of SED,
find the best theta and estimate the corresponding Av and DM
"""
n_band = len(sed_obs)
n_mod = sed_mod.shape[0]
# cope with scalar sed_obs_err
if isinstance(sed_obs_err, np.float):
sed_obs_err = np.ones_like(sed_obs, np.float) * sed_obs_err
# select good bands
ind_good_band = np.isfinite(sed_obs) & (sed_obs_err > 0)
n_good_band = np.sum(ind_good_band)
if n_good_band < 4:
# n_good_band = 3: unique solution
# so n_good_band should be at least 4
return [np.ones((4,), ) * np.nan for i in range(3)]
# use a subset of bands
sed_mod_select = sed_mod[:, ind_good_band]
# observed SED
sed_obs_select = sed_obs[ind_good_band]
sed_obs_err_select = sed_obs_err[ind_good_band]
# extinction coefs
Alambda_select = Alambda[ind_good_band]
# WLS to guess Av and DM
av_est, dm_est = guess_avdm_wls(
sed_mod_select, sed_obs_select, sed_obs_err_select, Alambda_select)
# cost(SED)
res_sed = sed_mod_select + av_est.reshape(-1, 1) * Alambda_select \
+ dm_est.reshape(-1, 1) - sed_obs_select
lnprob_sed = -0.5 * np.nansum(
np.abs(res_sed / sed_obs_err_select) ** cost_order, axis=1)
# cost(VPI)
if vpi_obs is not None and vpi_obs_err is not None and Lvpi > 0:
vpi_mod = 10 ** (2 - 0.2 * dm_est)
lnprob_vpi = -0.5 * ((vpi_mod - vpi_obs) / vpi_obs_err) ** 2.
else:
lnprob_vpi = np.zeros((n_mod,), np.float)
lnprob_vpi = np.where(np.isfinite(lnprob_vpi), lnprob_vpi, 0) * Lvpi
# lnprob = cost(SED) + cost(VPI) + prior
if Lprior > 0:
lnprob_prior = lnprior * Lprior
# posterior probability
lnpost = lnprob_sed + lnprob_vpi + lnprob_prior
# eliminate neg Av
lnpost[av_est < av_llim] = -np.inf
lnpost -= np.nanmax(lnpost)
# for debugging the code
if debug:
return dict(params=params,
av_est=av_est,
dm_est=dm_est,
lnprob_sed=lnprob_sed,
lnprob_vpi=lnprob_vpi,
lnprior=lnprior)
# normalization
post = np.exp(lnpost)
L0 = np.sum(post)
# weighted mean
# ind_mle = np.argmax(lnpost)
# av_mle = av_est[ind_mle]
# dm_mle = dm_est[ind_mle]
# p_mle = params[ind_mle]
L1_av = np.sum(av_est * post)
L1_dm = np.sum(dm_est * post)
L1_p = np.sum(params * post.reshape(-1, 1), axis=0)
L2_av = np.sum(av_est ** 2 * post)
L2_dm = np.sum(dm_est ** 2 * post)
L2_p = np.sum(params ** 2 * post.reshape(-1, 1), axis=0)
sigma_av = np.sqrt(L2_av / L0 - L1_av ** 2 / L0 ** 2)
sigma_dm = np.sqrt(L2_dm / L0 - L1_dm ** 2 / L0 ** 2)
sigma_p = np.sqrt(L2_p / L0 - L1_p ** 2 / L0 ** 2)
# MLE model
ind_mle = np.argmax(lnprob_sed + lnprob_vpi)
av_mle = av_est[ind_mle]
dm_mle = dm_est[ind_mle]
p_mle = params[ind_mle]
p_mle = np.hstack([p_mle, av_mle, dm_mle])
p_mean = np.hstack([L1_p/L0, L1_av/L0, L1_dm/L0])
p_err = np.hstack([sigma_p, sigma_av, sigma_dm])
rms_sed_mle = np.sqrt(np.nanmean(res_sed[ind_mle] ** 2.))
rms_sed_min = np.min(np.sqrt(np.nanmean(res_sed ** 2., axis=1)))
return dict(p_mle=p_mle,
p_mean=p_mean,
p_err=p_err,
rmsmle=rms_sed_mle,
rmsmin=rms_sed_min,
ind_mle=ind_mle,
n_good=np.sum(ind_good_band)) | 5,358,320 |
def test_bigquery_value_check_missing_param(kwargs, expected):
"""Assert the exception if require param not pass to BigQueryValueCheckOperatorAsync operator"""
with pytest.raises(AirflowException) as missing_param:
BigQueryValueCheckOperatorAsync(**kwargs)
assert missing_param.value.args[0] == expected | 5,358,321 |
def demangle_backtrace(backtrace):
"""
Returns a demangled backtrace.
Args:
* backtrace, a backtrace to demangle
"""
new_bt = []
frame_regex = re.compile(FRAME_PATTERN)
lines = backtrace.splitlines()
for line in lines:
frame = frame_regex.match(line)
if frame:
func = frame.group(2)
# A frame with missing symbols is a special case, so skip it
if func == '???':
new_bt.append(line)
continue
# FIXME: this logic will break once the crash probe starts sending
# function argument values; make this more generic!
if func[-2:] == '()':
# The crash probe adds the () to the function name, but c++filt
# cannot demangle a symbol with the () suffix
func_name = func[:-2]
else:
# Assume already demangled, or this is from a kernel crash record
new_bt.append(line)
continue
try:
new_func = cxxfilt.demangle(func_name)
except cxxfilt.InvalidName:
new_bt.append(line)
continue
# c++filt adds a trailing newline to the output
new_func = new_func.rstrip()
# Restore () if this was not a mangled symbol
if new_func == func_name:
new_func = func_name + '()'
repl_str = r'\1{}\3'.format(new_func)
new_line = frame_regex.sub(repl_str, line)
new_bt.append(new_line)
else:
new_bt.append(line)
return '\n'.join(new_bt) | 5,358,322 |
def _subproc_worker(pipe, parent_pipe, env_fn_wrapper, obs_bufs, ids_avail_vals, obs_shapes, obs_dtypes, keys):
"""
Control a single environment instance using IPC and
shared memory.
"""
env = env_fn_wrapper.x()
agent_ids = env.all_possible_agent_ids
parent_pipe.close()
def _write_obs(dict_obs):
obs_agent_ids = dict_obs.keys
for agent_id in agent_ids:
if agent_id in obs_agent_ids:
ids_avail_vals[agent_id].value = True
for k in keys:
dst = obs_bufs[agent_id][k].get_obj()
dst_np = np.frombuffer(dst, dtype=obs_dtypes[k]).reshape(obs_shapes[k]) # pylint: disable=W0212
np.copyto(dst_np, dict_obs[agent_id][k])
else:
ids_avail_vals[agent_id].value = False
try:
while True:
cmd, data = pipe.recv()
if cmd == 'reset':
pipe.send(_write_obs(env.reset()))
elif cmd == 'step':
obs, rewards, dones, infos = env.step(data)
pipe.send((_write_obs(obs), rewards, dones, infos))
elif cmd == 'close':
pipe.send(None)
break
else:
raise RuntimeError('Got unrecognized cmd %s' % cmd)
except KeyboardInterrupt:
print('ShmemVecEnv worker: got KeyboardInterrupt')
finally:
env.close() | 5,358,323 |
def resource_path(base_path, rel_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
import sys
# PyInstaller creates a temp folder and stores path in _MEIPASS
return os.path.join(getattr(sys, '_MEIPASS', base_path), rel_path) | 5,358,324 |
def legendre(N, x):
"""
Returns the value of Legendre Polynomial P_N(x) at position x[-1, 1].
"""
P = np.zeros(2 * N)
if N == 0:
P[0] = 1
elif N == 1:
P[1] = x
else:
P[0] = 1
P[1] = x
for i in range(2, N + 1):
P[i] = (1.0 / float(i)) * ((2 * i - 1) * x * P[i - 1] - (i - 1) *
P[i - 2])
return(P[N]) | 5,358,325 |
def updateFile(path, value):
"""
Replaces the contents of the file at the given path with the given value.
:param path: The file path of the file to overwrite.
:type path: string_types
:param value: The string to overwrite the file with.
:type value: string_types
"""
serializeDate = lambda dtOrStr: dtOrStr.strftime(dtFormatStr) if isinstance(dtOrStr, datetime) else None
with open(path, mode=u"w", encoding=u"utf-8") as openFile: # To update a file
openFile.write(dumps(value, indent=4, default=serializeDate, ensure_ascii=False))
# The default function allows it to dump datetime objects. | 5,358,326 |
def skip_any_whitespace(doc, idx):
"""Iterate through characters in ``doc`` starting from index ``idx`` until
a non-whitespace character is reached. This iteration will also attempt to
ignore comments.
Args:
doc (str): The JSPEC document.
idx (int): The starting index for the iterator.
Returns:
str: The first non-whitespace character, starting at index ``idx``
int: The index of this character in ``doc``
Raises:
JSPECDecodeError: Raised if an unterminated comment is detected.
"""
nextchar = doc[idx:idx + 1]
if nextchar not in WHITESPACE_CHARACTERS:
return nextchar, idx
while True:
idx = WHITESPACE_MATCH(doc, idx).end()
if doc[idx:idx + 2] == '//':
idx = COMMENT_MATCH(doc, idx).end()
continue
if doc[idx:idx + 2] != '/*':
break
m = MULTILINE_COMMENT_MATCH(doc, idx)
if m is None:
raise JSPECDecodeError("Unterminated comment", doc, idx)
idx = m.end()
nextchar = doc[idx:idx + 1]
return nextchar, idx | 5,358,327 |
def normalized_cross_correlation(f, g):
""" Normalized cross-correlation of f and g.
Normalize the subimage of f and the template g at each step
before computing the weighted sum of the two.
Hint: you should look up useful numpy functions online for calculating
the mean and standard deviation.
Args:
f: numpy array of shape (Hf, Wf).
g: numpy array of shape (Hg, Wg).
Returns:
out: numpy array of shape (Hf, Wf).
"""
Hf, Wf = f.shape
Hg, Wg = g.shape
if Hg%2 == 0:
Hg = Hg-1
if Wg%2 == 0:
Wg = Wg-1
g = g[:Hg,:Wg]
g_mean = np.mean(g)
g_std = np.std(g)
filter_vector = g.reshape([1,Hg*Wg])
normalized_filter_vec = (g.reshape([1,Hg*Wg]) - g_mean)/g_std
out = np.zeros((Hf, Wf))
### YOUR CODE HERE
pad_height,pad_width = int((Hg-1)/2),int((Wg-1)/2)
im_padded = zero_pad(f, pad_height, pad_width)
for i in range(Hf):
for j in range(Wf):
patch_vector = im_padded[i:i+Hg,j:j+Wg].reshape([Hg*Wg,1])
patch_mean = np.mean(patch_vector)
patch_std = np.std(patch_vector)
normalized_patch_vec = (patch_vector - patch_mean)/patch_std
out[i,j] = np.dot(normalized_filter_vec,normalized_patch_vec)
### END YOUR CODE
return out | 5,358,328 |
def sphere_coordinates(sphere, inversion=False):
"""
Compute spherical coordinates (longitude, latitude) on a sphere.
Parameters
----------
sphere: (AimsTimeSurface_3_VOID)
a sphere mesh: vertices must be on a sphere with center 0.
inversion: bool
if True, the longitude coord is inverted (useful for right hemisphere)
Return
------
(longitude, latitude): tuple, each element being a TimeTexture_FLOAT
"""
# a vector of vertices where each vertex is a 3D point
# with coordinates in millimeters
if isinstance(sphere, (aims.AimsTimeSurface_3_VOID,
aims.AimsTimeSurface_2_VOID,
aims.AimsTimeSurface_4_VOID)):
vert = sphere.vertex()
nvert = numpy.asarray(vert)
else:
nvert = numpy.asarray(sphere)
#########################################################################
# A latitude texture #
#########################################################################
radius = numpy.sqrt(numpy.square(nvert[:, 0]) + numpy.square(nvert[:, 1]))
sphere_lat = numpy.arctan2(radius, nvert[:, 2])
sphere_lat = -sphere_lat * 180. / numpy.pi + 180.
slat_tex = aims.TimeTexture(sphere_lat.astype(numpy.float32))
#########################################################################
# A longitude texture #
#########################################################################
sphere_lon = numpy.arctan2(nvert[:, 1], nvert[:, 0])
sphere_lon *= 180. / numpy.pi
sphere_lon += 180
print('inversion: ', inversion)
if inversion == "True":
print("there is an inversion", inversion)
sphere_lon = 360 - sphere_lon
slon_tex = aims.TimeTexture(sphere_lon.astype(numpy.float32))
return slon_tex, slat_tex | 5,358,329 |
def example_parameter_sets() -> Dict[str, ExampleParameterSet]:
"""Lists the available example parameter sets.
They can be downloaded with :py:func:`~download_example_parameter_sets`."""
# TODO how to add a new model docs should be updated with this part
examples = chain(
_wflow.example_parameter_sets(),
_pcrglobwb.example_parameter_sets(),
_lisflood.example_parameter_sets(),
)
return {e.name: e for e in examples} | 5,358,330 |
def get_height(img):
"""
Returns the number of rows in the image
"""
return len(img) | 5,358,331 |
def save_channels_data(data_frame, prefix, project):
"""Save channels displacement data to local cache
Saves data inside the one_params CACHE DIR.
Parameters
----------
data_frame : pandas DataFrame
DataFrame containing data to save.
prefix : str
Specify the PREFIX for the title to save CSV. CSV will be titled
'<prefix>_probe_channels.csv'. Recommend to use the trajectory
insertion x,y coords in µm as prefix. e.g. '-2243_-2000' for repeated
site.
project : str
Project that trajectories are gathered from.
Returns
-------
None.
"""
from pathlib import Path
from one.api import ONE
one = ONE()
# get alyx parameters from local system
par = one.alyx._par.as_dict()
# define the sub-path within the CACHE DIR
CHANNELS_DATA_REL_PATH = Path('histology',
'probe_data',
prefix+'_'+project+'_channels_data.csv')
# define full path - CACHE_DIR plus sub path
path_channels_data = Path(par['CACHE_DIR']).joinpath(CHANNELS_DATA_REL_PATH)
path_channels_data.parent.mkdir(exist_ok=True, parents=True)
print("Written parent DIR: ", path_channels_data.parent)
data_frame.to_csv( str(path_channels_data) )
print("Written CSV file: ", path_channels_data) | 5,358,332 |
def wpt_ask_for_name_and_coords():
"""asks for name and coordinates of waypoint that should be created"""
name = input("Gib den Namen des Wegpunkts ein: ")
print("Gib die Koordinaten ein (Format: X XX°XX.XXX, X XXX°XX.XXX)")
coordstr = input(">> ")
return name, coordstr | 5,358,333 |
def car_following_with_adp(distance_2_tan, radian_at_tan, distance_integral, K, estimated_dis, rec):
""" Control with `distance_2_tan`, `radian_at_tan` and `distance_integral`
with `K` trained from the ADP algorithm.
While following the car in front of it with a simple P controller and `distance_2_car`.
"""
state = np.array([distance_2_tan, radian_at_tan, distance_integral])
MID_K = 1.5
diff = estimated_dis - 70 # try to stay 70cm away from the previous car
pwm_mid = 60
if diff < -40:
return 0, 0
elif diff >= 60:
pwm_mid = 60
else:
pwm_mid = np.clip(45.0 + MID_K * diff, 30, 60)
print('distance:', estimated_dis, 'diff:', diff, 'mid:', pwm_mid)
rec.append([estimated_dis, pwm_mid, distance_2_tan, radian_at_tan, distance_integral])
differential_drive = np.clip(-np.matmul(K, state), -100.0, 100.0)
pwm_l_new = np.clip(pwm_mid - differential_drive / 2, 0, 100)
pwm_r_new = np.clip(pwm_mid + differential_drive / 2, 0, 100)
return pwm_l_new, pwm_r_new | 5,358,334 |
def registerFactoryAdapter(for_, klass):
"""register the basic FactoryAdapter for a given interface and class"""
name = getIfName(for_)
class temp(FactoryAdapter):
factory = klass
zope.component.provideAdapter(temp, name=name) | 5,358,335 |
def setup_sdk_imports():
"""Sets up appengine SDK third-party imports."""
if six.PY3:
return
sdk_path = os.environ.get('GAE_SDK_PATH')
if not sdk_path:
return
if os.path.exists(os.path.join(sdk_path, 'google_appengine')):
sdk_path = os.path.join(sdk_path, 'google_appengine')
if 'google' in sys.modules:
sys.modules['google'].__path__.append(
os.path.join(sdk_path, 'google'))
# This sets up libraries packaged with the SDK, but puts them last in
# sys.path to prevent clobbering newer versions
sys.path.append(sdk_path)
import dev_appserver
sys.path.extend(dev_appserver.EXTRA_PATHS)
# Fixes timezone and other os-level items.
import google.appengine.tools.os_compat
(google.appengine.tools.os_compat) | 5,358,336 |
def construct_full_available(cards, suits):
"""
Construct suit availability grid - a list of available suits for each
rank slot in each player's deck. Returns grid and array giving the the
total number of available suits for each slot.
"""
num_players, num_in_deck = cards.shape
num_available = np.ones(cards.shape)*np.nan
# will store the number of possible cards that can fill each deck slot
available = []
# will store the suits that can fill each deck slot
for player in range(num_players):
avail_for_player = []
# holds sublists of available suits for this player for each rank
for rank in np.arange(num_in_deck): # iterate over card ranks
a = get_available(cards, suits, player, rank)
# list suits availed to this player at this rank (can be empty)
avail_for_player.append(a)
num_available[player, rank] = len(a)
available.append(avail_for_player)
return num_available, available | 5,358,337 |
def combine_csvs(csv_paths:list, output_path:str):
"""
Function to combine csvs (also remove duplicates) and save as a csv
Args:
csv_paths: list of str
the list of paths to csvs to be combined
output_path: str
Path to save combined csv in
Returns:
None
"""
list_dfs = []
for path in csv_paths:
df = pd.read_csv(path, sep='\t', dtype={'place':str}, error_bad_lines=False)
list_dfs.append(df)
combined_df = pd.concat(list_dfs, ignore_index=True)
combined_df.drop_duplicates(subset=['id', 'tweet'], inplace=True, ignore_index=True)
combined_df.to_csv(output_path, sep='\t', index=False) | 5,358,338 |
def get_commands_blacklist() -> list:
"""
Get commands from `features.yml` to blacklist,
preventing them from being added to the bot
:returns: list
"""
log.info("Getting commands blacklist...")
cmds = []
if osp.isfile(features_path):
with open(features_path, 'r') as file:
data = yaml.full_load(file)
if not "commands" in data:
log.warn("Commands blacklist object not found in features.yml file")
return list() # Return empty list
commands = data["commands"]
if not commands or len(commands) == 0:
log.debug("Empty blacklist commands data, returning...")
return list() # Return empty list
for c in commands:
c_name = c["command"]
e_enabled = c["enabled"] if "enabled" in c else True
if not e_enabled:
cmds.append(c_name)
log.debug(f"Command Found | Blacklist | {c_name}")
log.info(f"Found *{len(cmds)}* commands to blacklist.")
return cmds | 5,358,339 |
def fit_gaussians(estimated_hapcov,
chromosomes=None, output_dir=None, cov_max=None, cov_min=None, level=0, cov_sample=None):
"""
Fits a 7-component Gaussian mixture model to the coverage distribution of the sample, using the appropriate attributes of the PloidyEstimation
object. The center of the first Gaussian is initialized from a narrow region around the value of the estimated_hapcov attribute. The centers of
the other Gaussians are initialized in a region around the value of estimated_hapcov multiplied by consecutive whole numbers.
The parameters of the fitted model (center, sigma and weight) for all seven Gaussians are both saved to the GaussDistParams.pkl file (in
output_dir, for later reuse) and set as the value of the distribution_dict attribute.
:param cov_sample: a sample of the coverage distribution of the investigated sample, if None, it is loaded from the temporary files of the output_dir (default: None) (array-like)
:param cov_min: the maximum value of the coverage for a position to be considered in the estimation (default: None) (int)
:param output_dir: the path to the output directory of the PloidyEstimator object, where temporary files are located. If not None, distribution parameters are saved there as GaussDistParams.pkl. (default: None) (str)
:param chromosomes: list of chromosomes for the sample (default: None) (array-like)
:param estimated_hapcov: the estimated value for the haploid coverage, used as prior (float)
:param level: the level of indentation used in verbose output (default: 0) (int)
:returns: dictionary containing the fitted parameters of the 7 Gaussians
"""
def get_samples(coverage_distribution, estimated_haploid_cov, number_of_iterations, burn_period):
K = 7
halfwidth_of_uniform = 0.2
__gc.collect()
model = __pm.Model()
with model:
p = __pm.Dirichlet('p', a=__np.array([1., 1., 1., 1., 1., 1., 1.]), shape=K)
c1 = __pm.Uniform('c1', (1 - halfwidth_of_uniform) * estimated_haploid_cov,
(1 + halfwidth_of_uniform) * estimated_haploid_cov)
means = __tt.stack([c1, c1 * 2, c1 * 3, c1 * 4, c1 * 5, c1 * 6, c1 * 7])
order_means_potential = __pm.Potential('order_means_potential',
__tt.switch(means[1] - means[0] < 0, -__np.inf, 0)
+ __tt.switch(means[2] - means[1] < 0, -__np.inf, 0))
sds = __pm.Uniform('sds', lower=0, upper=estimated_haploid_cov / 2, shape=K)
category = __pm.Categorical('category',
p=p,
shape=len(coverage_distribution))
points = __pm.Normal('obs',
mu=means[category],
sd=sds[category],
observed=coverage_distribution)
with model:
step1 = __pm.Metropolis(vars=[p, sds, means])
step2 = __pm.ElemwiseCategorical(vars=[category], values=[0, 1, 2, 3, 4, 5, 6])
__logging.getLogger("pymc3").setLevel(__logging.WARNING)
tr = __pm.sample(draw=number_of_iterations-burn_period, tune=burn_period,
step=[step1, step2], progressbar=False, verbose=0, compute_convergence_checks=False)
# trace = tr[burn_period:]
# return trace
return tr
if cov_sample is None:
cov_sample = io.get_coverage_distribution(chromosomes=chromosomes,
output_dir=output_dir,
cov_max=cov_max,
cov_min=cov_min)
iterations2 = 15000
burn_beginning2 = 10000
# logger = __logging.getLogger("pymc3")
# logger.propagate = False
trace2 = get_samples(coverage_distribution=cov_sample,
estimated_haploid_cov=estimated_hapcov,
number_of_iterations=iterations2,
burn_period=burn_beginning2)
std_trace = trace2.get_values('sds', chains=[0])
p_trace = trace2.get_values('p', chains=[0])
sigma = std_trace.mean(axis=0)
p = p_trace.mean(axis=0)
mu = __np.array([trace2.get_values('c1', chains=[0]).mean() * (i + 1) for i in range(7)])
prior_dict = {'mu': mu, 'sigma': sigma, 'p': p}
del trace2
if output_dir:
io.save_obj(prior_dict, output_dir + '/GaussDistParams')
return prior_dict | 5,358,340 |
def remove_outliers(cords, eps: int = 1, min_samples: int = 2):
"""
Remove outlying cells based on UMAP embeddings with DBScan (density based clustering)
Call as: sub.obs["d_cluster"] = remove_outliers(sub.obsm["X_umap"], min_samples = 10)
Args:
cords: adata UMAP coordinates, typically adata.obsm["X_umap"]
eps: Maximum distance between two clusters to still be considered neighbors
min_samples: Minimum samples of a cluster
Returns:
Pandas DataFrame of clusters
"""
from natsort import natsorted
from sklearn.cluster import DBSCAN
clustering = DBSCAN(eps=eps, min_samples=min_samples).fit(cords)
cluster = clustering.labels_.astype("U")
return pd.Categorical(cluster, categories=natsorted(np.unique(cluster))) | 5,358,341 |
def sum_squares(n):
"""
Returns: sum of squares from 1 to n-1
Example: sum_squares(5) is 1+4+9+16 = 30
Parameter n: The number of steps
Precondition: n is an int > 0
"""
# Accumulator
total = 0
for x in range(n):
total = total + x*x
return total | 5,358,342 |
def load_election_dates():
""" This is from before we had direct access to election data and needed it, we are still using the
data from a csv, to populate the ElectionClassDate model.
"""
logger.info('Loading election dates...')
import pandas as pd
frame = pd.read_excel('data/election_dates.xlsx')
frame.columns = [column.lower() for column in frame.columns]
load_table(
frame, 'ofec_election_dates',
indexes=('office', 'state', 'district', 'election_yr', 'senate_class'),
)
logger.info('Finished loading election dates.') | 5,358,343 |
def resolve_attribute(thing, name):
"""
A replacement resolver function for looking up symbols as members of
*thing*. This is effectively the same as ``thing.name``. The *thing* object
can be a :py:func:`~collections.namedtuple`, a custom Python class or any
other object. Each of the members of *thing* must be of a compatible data
type.
.. warning::
This effectively exposes all members of *thing*. If any members are
sensitive, then a custom resolver should be used that checks *name*
against a whitelist of attributes that are allowed to be accessed.
:param thing: The object on which the *name* attribute will be accessed.
:param str name: The symbol name that is being resolved.
:return: The value for the corresponding attribute *name*.
"""
if not hasattr(thing, name):
raise errors.SymbolResolutionError(name, thing=thing)
return getattr(thing, name) | 5,358,344 |
async def test_form_user_already_configured(hass):
"""Test we abort if already configured."""
await setup.async_setup_component(hass, "persistent_notification", {})
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "1.1.1.1", CONF_PORT: 12, CONF_SYSTEM_ID: 46},
)
config_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.somfy_mylink.config_flow.SomfyMyLinkSynergy.status_info",
return_value={"any": "data"},
), patch(
"homeassistant.components.somfy_mylink.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: "1.1.1.1",
CONF_PORT: 1234,
CONF_SYSTEM_ID: "456",
},
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert len(mock_setup_entry.mock_calls) == 0 | 5,358,345 |
def moment_fluxes(indices, wts_left, wts_right, xi_left, xi_right):
"""
Computes moment fluxes
inputs:
-------
num_nodes: number of quadrature nodes, depends on inversion algorithm
indices: moment indices, size [ num_moments, num_internal_coords ]
wts_left: weights on the left side, size [ num_nodes ]
wts_right: weights on the right side, size [ num_nodes ]
xi_left: abscissas on the left side, size [ num_internal_coords, num_nodes ]
xi_right: abscissas on the right side, size [ num_internal_corods, num_nodes ]
"""
num_moments = len(indices)
num_coords, num_nodes = xi_left.shape
flux = np.zeros(num_moments)
for i_moment in range(num_moments):
for i_node in range(num_nodes):
# compute local fluxes
flux_left = local_flux(
wts_left[i_node], xi_left[:, i_node], indices[i_moment, :]
)
flux_right = local_flux(
wts_right[i_node], xi_right[:, i_node], indices[i_moment, :]
)
# limiter (?)
flux_left = flux_left * max(xi_left[0, i_node], 0.0)
flux_right = flux_right * min(xi_right[0, i_node], 0.0)
# quadrature
flux[i_moment] += flux_left + flux_right
return flux | 5,358,346 |
def main_app():
""" Initializes and handles PySimpleGUI Frames and Windows
"""
sg.SetOptions(element_padding=(0, 0))
progressbar = [[sg.ProgressBar(100, orientation='h', size=(31, 10), key='progressbar')]]
textWaiting = [[sg.Text('STATUS: NONE', font=('Helvetica', 10), size=(20, 1),
justification='center', key='textWaiting')]]
layout = [
[sg.Button("Click here to Type the Clipboard"), sg.Text(' | '), sg.Frame('', layout=textWaiting)],
[sg.Frame('Progress', layout=progressbar), sg.Button('EXIT', size=(3, 1), font=('Helvetica', 8), button_color=('white', 'firebrick3'))]]
# location=[960, 1004],
window = sg.Window(
title="Type Clipboard",
layout=layout,
margins=(25,10),
no_titlebar=True,
keep_on_top=True,
grab_anywhere=True,
finalize=True
)
progress_bar = window['progressbar']
textWaiting = window['textWaiting']
# Create an event loop
while True:
event, values = window.read()
if event == "Click here to Type the Clipboard":
textWaiting.update("STATUS: HOLD")
clipboard_to_keystrokes(progress_bar, textWaiting)
textWaiting.update("STATUS: OK")
elif event == "EXIT" or event == sg.WIN_CLOSED:
break
window.close() | 5,358,347 |
def get_args():
"""Get the command-line arguments"""
parser = argparse.ArgumentParser(
description='Emulate wc (word count)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file',
help='Input file(s)',
metavar='FILE',
nargs='*',
type=argparse.FileType('rt'),
default=[sys.stdin])
return parser.parse_args() | 5,358,348 |
def froc_curve_per_side(df_gt, df_pred, thresholds, verbose, cases="all"):
"""
Compute FROC curve per side/breast. All lesions in a breast are considered TP if
any lesion in that breast is detected.
"""
assert cases in ["all", "cancer", "benign"]
if not cases == "all":
df_exclude = df_gt[~(df_gt["Class"] == cases)]
df_gt = df_gt[df_gt["Class"] == cases]
df_pred = df_pred[~(df_pred["StudyUID"].isin(set(df_exclude["StudyUID"])))]
df_gt["Side"] = df_gt["View"].astype(str).str[0]
df_pred["Side"] = df_pred["View"].astype(str).str[0]
total_volumes = len(df_pred.drop_duplicates(subset=["StudyUID", "View"]))
total_tps = len(df_gt.drop_duplicates(subset=["PatientID", "Side"]))
tpr = []
fps = []
if verbose:
print("{} cases FROC:".format(cases.upper()))
for th in sorted(thresholds, reverse=True):
df_th = df_pred[df_pred["Score"] >= th]
df_th_unique_tp = df_th.drop_duplicates(subset=["PatientID", "Side", "TP"])
num_tps_th = float(sum(df_th_unique_tp["TP"]))
tpr_th = num_tps_th / total_tps
num_fps_th = float(len(df_th[df_th["TP"] == 0]))
fps_th = num_fps_th / total_volumes
tpr.append(tpr_th)
fps.append(fps_th)
if verbose:
print(
"Sensitivity {0:.2f} at {1:.2f} FPs/volume (threshold: {2:.4f})".format(
tpr_th * 100, fps_th, th
)
)
return tpr, fps | 5,358,349 |
def make_lists(*args, **kwargs):
"""
The make_lists function attaches auxiliary things to an input key_list
of (normally) AD objects. Each key gets exactly one auxiliary thing from
each other list -- these lists can be as long as the key_list, or have
only one item in (in which case they don't have to be lists at all).
Parameters
----------
args: lists of str/AD (or single str/AD)
key_list and auxiliary things to be matched to each AD
kwargs["force_ad"]: bool
coerce strings into AD objects?
Returns
-------
tuple of lists
the lists made from the keys and values
"""
log = logutils.get_logger(__name__)
force_ad = kwargs.pop("force_ad", False)
if kwargs:
raise TypeError("make_lists() got unexpected keyword arguments "
"{}".format(kwargs.keys()))
ret_value = [arg if isinstance(arg, (list, tuple)) else [arg]
for arg in args]
# We allow only one value that can be assigned to multiple keys
len_list = len(ret_value[0])
if len_list > 1:
for i in range(1, len(ret_value)):
if len(ret_value[i]) == 1:
ret_value[i] *= len_list
if force_ad:
# We only want to open as many AD objects as there are unique entries,
# so collapse all items in lists to a set and multiple keys with the
# same value will be assigned references to the same open AD object
ad_map_dict = {}
for x in set(itertools.chain(*ret_value)):
try:
ad_map_dict.update({x: x if isinstance(x, astrodata.AstroData)
or x is None else astrodata.open(x)})
except:
ad_map_dict.update({x: None})
log.warning(f"Cannot open file {x}")
ret_value = [[ad_map_dict[x] for x in List] for List in ret_value]
return ret_value | 5,358,350 |
def current():
"""Prints the current configuration
"""
yaml.register_class(profile.ProfileData)
#yaml.register_class(dict)
builder_data.register_classes(yaml)
data = {
'profile': profile.get(),
'builder': {
'filepath': builder_data.get_storage_filepath(),
#'data': builder_data.get()
}
}
yaml.dump(data, sys.stdout) | 5,358,351 |
def get_free_comment_url_ajax(content_object, parent=None, ajax_type='json'):
"""
Given an object and an optional parent, this tag gets the URL to POST to for the
creation of new ``FreeThreadedComment`` objects. It returns the latest created object
in the AJAX form of the user's choosing (json or xml).
"""
kwargs = get_contenttype_kwargs(content_object)
kwargs.update({'ajax' : ajax_type})
if parent:
if not isinstance(parent, FreeThreadedComment):
raise template.TemplateSyntaxError, "get_free_comment_url_ajax requires its parent object to be of type FreeThreadedComment"
kwargs.update({'parent_id' : getattr(parent, 'pk', getattr(parent, 'id'))})
return reverse('tc_free_comment_parent_ajax', kwargs=kwargs)
else:
return reverse('tc_free_comment_ajax', kwargs=kwargs) | 5,358,352 |
def getDefensivePacts(playerOrID, askingPlayerOrID):
"""
Returns a list of CyPlayers who have a Defensive Pact with playerOrID.
The askingPlayerOrID is used to limit the list to players they have met.
"""
pacts = []
askedPlayer, askedTeam = getPlayerAndTeam(playerOrID)
askingPlayer, askingTeam = getPlayerAndTeam(askingPlayerOrID)
for player in players(alive=True, barbarian=False, minor=False):
if (askedPlayer.getTeam() != player.getTeam() and
(askingTeam.isHasMet(player.getTeam()) or gc.getGame().isDebugMode())):
if askedTeam.isDefensivePact(player.getTeam()):
pacts.append(player)
return pacts | 5,358,353 |
def test_tensor_has_basic_operations(free_alg):
"""Test some of the basic operations on tensors.
Tested in this module:
1. Addition.
2. Merge.
3. Free variable.
4. Dummy reset.
5. Equality comparison.
6. Expansion
7. Mapping to scalars.
8. Base presence testing.
"""
dr = free_alg
p = dr.names
i, j, k, l, m = p.R_dumms[:5]
x = IndexedBase('x')
r = p.R
v = p.v
tensor = (
dr.sum((l, r), x[i, l] * v[l]) +
dr.sum((m, r), x[j, m] * v[m])
)
# Without dummy resetting, they cannot be merged.
assert tensor.n_terms == 2
assert tensor.merge().n_terms == 2
# Free variables are important for dummy resetting.
free_vars = tensor.free_vars
assert free_vars == {x.label, i, j}
# Reset dummy.
reset = tensor.reset_dumms()
expected = (
dr.sum((k, r), x[i, k] * v[k]) +
dr.sum((k, r), x[j, k] * v[k])
)
assert reset == expected
assert reset.local_terms == expected.local_terms
# Merge the terms.
merged = reset.merge()
assert merged.n_terms == 1
term = merged.local_terms[0]
assert term == Term(((k, r),), x[i, k] + x[j, k], (v[k],))
# Slightly separate test for expansion.
c, d = symbols('c d')
tensor = dr.sum((i, r), x[i] * (c + d) * v[i])
assert tensor.n_terms == 1
expanded = tensor.expand()
assert expanded.n_terms == 2
# Make sure shallow expansion does not delve into the tree.
shallowly_expanded = tensor.shallow_expand()
assert shallowly_expanded.n_terms == 1
# Make sure shallow expansion does the job on the top-level.
y = IndexedBase('y')
tensor = dr.sum((i, r), (x[i] * (c + d) + y[i]) * v[i])
assert tensor.n_terms == 1
expanded = tensor.expand()
assert expanded.n_terms == 3
shallowly_expanded = tensor.shallow_expand()
assert shallowly_expanded.n_terms == 2
# Here we also test concrete summation facility.
expected = dr.sum(
(i, r), (j, [c, d]), x[i] * j * v[i]
)
assert expected == dr.sum(
(i, r), x[i] * c * v[i] + x[i] * d * v[i]
).expand()
# Test mapping to scalars.
tensor = dr.sum((i, r), x[i] * v[i, j])
y = IndexedBase('y')
substs = {x: y, j: c}
res = tensor.map2scalars(lambda x: x.xreplace(substs))
assert res == dr.sum((i, r), y[i] * v[i, c])
res = tensor.map2scalars(lambda x: x.xreplace(substs), skip_vecs=True)
assert res == dr.sum((i, r), y[i] * v[i, j])
assert res == tensor.map2amps(lambda x: x.xreplace(substs))
# Test base presence.
tensor = dr.einst(x[i] * v[i])
assert tensor.has_base(x)
assert tensor.has_base(v)
assert not tensor.has_base(IndexedBase('y'))
assert not tensor.has_base(Vec('w'))
# Test Einstein summation over multiple ranges.
a1, a2 = p.a1, p.a2
summand = x[a1, a2] * v[a1, a2]
res = dr.einst(summand).simplify()
assert res.n_terms == 4
ranges = (p.R, p.S)
assert res == dr.sum((a1, ranges), (a2, ranges), summand).simplify() | 5,358,354 |
def cosine_score(vector1, vector2):
"""Calculate cosine cosine score between two spectral vectors."""
return np.dot(vector1, vector2)/np.sqrt(np.dot(np.dot(vector1, vector1), np.dot(vector2, vector2))) | 5,358,355 |
def fixture_times() -> Problem[int]:
"""Generate a problem which tests a times function."""
@test_case(4, 6)
@test_case(-2, 16)
@test_case(2, -3, aga_hidden=True, aga_output=-6)
@problem()
def times(x: int, y: int) -> int:
"""Compute x * y."""
return x * y
return times | 5,358,356 |
def get_axis_bounds(ax=None):
"""Obtain bounds of axis in format compatible with ipyleaflet
Returns:
bounds np.array with lat and lon bounds.
bounds.tolist() gives [[s, w],[n, e]]
"""
if ax is None:
ax = plt.gca()
return np.array([ax.get_ylim(), ax.get_xlim()]).T | 5,358,357 |
def test_log_command_error(fixed_time, tmpdir):
"""Test the --log option logs when command fails."""
cmd = FakeCommand(error=True)
log_path = tmpdir.joinpath("log")
cmd.main(["fake", "--log", log_path])
with open(log_path) as f:
assert f.read().startswith("2019-01-17T06:00:37,040 fake") | 5,358,358 |
def get_pymatgen_structure(cell:tuple) -> Structure:
"""
Get pymatgen structure from cell.
Args:
cell: Cell (lattice, scaled_positions, symbols).
"""
return Structure(lattice=cell[0],
coords=cell[1],
species=cell[2]) | 5,358,359 |
def test_run_command_no_output():
"""Test run a command without output"""
# GIVEN a command that returns no output
cmd = ["cd", "./"]
# WHEN running it with execute command
res = execute_command(cmd)
# THEN assert that the empty string is returned
assert res == "" | 5,358,360 |
def lambda_handler(event, context):
"""
launch the function Stop_Instances() in the lambda function
Handler for the Lambda function "lambda_function.lambda_handler"
Timeout need to be more than 1 minute, so that our function can run perfectly
if you have an important number of instances to be shutdown, change the parameter of timeout
"""
Stop_Instances() | 5,358,361 |
def get_next_event(event_id: int):
"""Returns the next event from the selected one.
This route may fail if the event is not repeated, or if the event is
too far ahead in time (to avoid over-generation of events).
"""
# TODO(funkysayu): Implement the user visibility limit.
# Check if we already created the event.
maybe_created = Event.query.filter_by(parent_id=event_id).one_or_none()
if maybe_created is not None:
return jsonify(maybe_created.to_dict())
event = Event.query.filter_by(id=event_id).one_or_none()
if event is None:
return jsonify(error='Event %r not found' % event_id), 404
try:
next_event = event.create_next_event()
except ValueError:
return jsonify(
error='Cannot create the next occurrence of a non-repeated event.'), 412
# Ensure we have an event generation limit.
if next_event.date - event.date > MAX_TIMEDELTA_EVENT_GENERATION:
return jsonify(
error='Event is over the maximum generation period',
max_period=MAX_TIMEDELTA_EVENT_GENERATION), 400
db.session.add(next_event)
db.session.commit()
return jsonify(next_event.to_dict()) | 5,358,362 |
def conditional_samples(x_3, x_prime_3, MC_method, M):
"""Generate mixed sample sets of interest distributed accroding to a conditional PDF.
Parameters
----------
x_3 : np.ndarray
Array with shape (n_draws, 3).
x_prime : np.ndarray
Array with shape (n_draws, 3).
MC_method : string
Specify the Monte Carlo estimator. One of ["brute force", "DLR"],
where "DLR" denotes to the double loop reordering approach.
M : int
The number of conditional bins to genetate if `MC_method` is "DLR".
Returns
-------
x_mix : np.ndarray
Mixed sample sets. Shape has the form (n_draws, 3, n_draws, 3).
"""
n_draws, n_params = x_3.shape
if MC_method == "Brute force":
x_3_mix = np.zeros((n_draws, n_params, n_draws, n_params))
for i in range(n_params):
for j in range(n_draws):
x_3_mix[j, i] = x_3
x_3_mix[j, i, :, i] = x_prime_3[j, i]
if MC_method == "DLR":
conditional_bin = x_3[:M]
x_3_mix = np.zeros((M, n_params, n_draws, n_params))
# subdivide unconditional samples into M eaually bins,
# within each bin x_i being fixed.
for i in range(n_params):
for j in range(M):
x_3_mix[j, i] = x_3
x_3_mix[j, i, :, i] = conditional_bin[j, i]
return x_3_mix | 5,358,363 |
def logcmd(message):
"""
Logs command out of message in a file.
"""
if not path.isdir("SAVES"):
os.mkdir("SAVES")
with open("SAVES/cmdlog.txt", "a") as fw:
time = strftime("%d.%m.%Y %H:%M:%S", gmtime())
fw.write("[%s] [%s (%s)] [%s (%s)] '%s'\n" % (time, message.server.name, message.server.id, message.author.name, message.author.id, message.content)) | 5,358,364 |
def recombine(geno_matrix, chr_index, no_loci): #, no_samples):
"""
Recombine at randomly generated breakpoints.
"""
recomb = {0: 0, 1: 2, 2: 1, 3: 3} # '0|1' <-> '1|0'
no_samples = geno_matrix.shape[0]
#print(no_samples)
masked, bp_list = designate_breakpoints(chr_index, no_loci, no_samples)
#masked, bp_list = designate_breakpoints(chr_index, no_loci, no_samples)
z = np.copy(geno_matrix)
if np.asarray(bp_list).size > 0:
# this would modify the original geno_matrix too! Work with copy!
try:
z[masked] = np.vectorize(recomb.get)(z[masked])
except:
return z
return z | 5,358,365 |
def update_record_files_async(object_version):
"""Get the bucket id and spawn a task to update record metadata."""
# convert to string to be able to serialize it when sending to the task
str_uuid = str(object_version.bucket_id)
return update_record_files_by_bucket.delay(bucket_id=str_uuid) | 5,358,366 |
def check_platform():
"""
str returned
"""
import platform
return platform.system() | 5,358,367 |
def partie(nb_joueurs, bot, mode_bot, reprendre_partie=None,automat=False):
"""
Fonction principale regroupant les appels des moments fondamentaux
pour la parte Azul
"""
hauteur_partie, largeur_partie = 600,1025
cree_fenetre(largeur_partie,hauteur_partie)
rectangle(0,0,largeur_partie,hauteur_partie,remplissage="#5D2F25")
texte(10,5,"sauvegarde en cours ✔",couleur="green",taille=10)
joueurs,fabriques,sac,defausse,bot,nb_joueurs, mode_bot = init(reprendre_partie, nb_joueurs, bot, mode_bot)
print("Le sac est actuellement de taille :", len(sac))
while not partie_fini(joueurs):
dessin_plateau_joueurs(joueurs)
dessin_fabriques(fabriques)
quitter = offre_de_fabriques(fabriques, joueurs, bot, mode_bot,automat=automat)
if quitter:
ferme_fenetre()
return
decoration_mur(joueurs, defausse)
fabriques = creer_fabrique_tuiles(nb_joueurs,sac, defausse)
creer_sauvegarde(sac.extend(defausse),joueurs,fabriques, defausse, bot, mode_bot)
#--FIN DE PARTIE--
if partie_fini(joueurs):
score_fin(joueurs) #Recalcule des points à la fin de la partie
supprimer_sauvegarde()
efface_tout()
vainqueurs = gagnants(joueurs)
dessin_gagnants(vainqueurs)
_,x,y=attente_clic_ou_touche()
ferme_fenetre() | 5,358,368 |
def get_field_keys(table):
""" Field keys for a selected table
:param table:
:return: list op dictionaries
"""
cql = 'SHOW FIELD KEYS FROM \"{}\"'.format(table)
response = db_man.influx_qry(cql).get_points()
return [x for x in response] | 5,358,369 |
def extract_text_from_spans(spans, join_with_space=True, remove_integer_superscripts=True):
"""
Convert a collection of page tokens/words/spans into a single text string.
"""
if join_with_space:
join_char = " "
else:
join_char = ""
spans_copy = spans[:]
if remove_integer_superscripts:
for span in spans:
flags = span['flags']
if flags & 2**0: # superscript flag
if is_int(span['text']):
spans_copy.remove(span)
else:
span['superscript'] = True
if len(spans_copy) == 0:
return ""
spans_copy.sort(key=lambda span: span['span_num'])
spans_copy.sort(key=lambda span: span['line_num'])
spans_copy.sort(key=lambda span: span['block_num'])
# Force the span at the end of every line within a block to have exactly one space
# unless the line ends with a space or ends with a non-space followed by a hyphen
line_texts = []
line_span_texts = [spans_copy[0]['text']]
for span1, span2 in zip(spans_copy[:-1], spans_copy[1:]):
if not span1['block_num'] == span2['block_num'] or not span1['line_num'] == span2['line_num']:
line_text = join_char.join(line_span_texts).strip()
if (len(line_text) > 0
and not line_text[-1] == ' '
and not (len(line_text) > 1 and line_text[-1] == "-" and not line_text[-2] == ' ')):
if not join_with_space:
line_text += ' '
line_texts.append(line_text)
line_span_texts = [span2['text']]
else:
line_span_texts.append(span2['text'])
line_text = join_char.join(line_span_texts)
line_texts.append(line_text)
return join_char.join(line_texts).strip() | 5,358,370 |
def cf_resource_pool(cli_ctx, *_):
"""
Client factory for resourcepools.
"""
return cf_connectedvmware(cli_ctx).resource_pools | 5,358,371 |
def make_packing_list(doc):
"""make packing list for Product Bundle item"""
if doc.get("_action") and doc._action == "update_after_submit": return
parent_items = []
for d in doc.get("items"):
if frappe.db.get_value("Product Bundle", {"new_item_code": d.item_code}):
for i in get_product_bundle_items(d.item_code):
update_packing_list_item(doc, i.item_code, flt(i.qty)*flt(d.stock_qty), d, i.description)
if [d.item_code, d.name] not in parent_items:
parent_items.append([d.item_code, d.name])
cleanup_packing_list(doc, parent_items) | 5,358,372 |
def init_log():
""" Initialise the logging. """
level = script_args.log_level
log_dir = os.path.abspath(script_args.log_dir)
logger = logging.getLogger(__name__)
log_format = (
'[%(asctime)s] [%(levelname)s] '
'[%(name)s] [%(funcName)s():%(lineno)s] '
'[PID:%(process)d] %(message)s')
if not os.path.isdir(log_dir):
logging.error('Logging directory \'%s\' does not exist', log_dir)
sys.exit(os.EX_IOERR)
dir_re = re.compile(u'/$')
if not re.match(dir_re, log_dir):
log_dir += "/"
# Define the logging stream
stream = open(log_dir + LOG_FILE, 'w+')
log_levels = {
'unset': logging.NOTSET,
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}
log_level = log_levels[level]
coloredlogs.install(
level=log_level,
fmt=log_format,
datefmt='%d/%m/%Y %H:%M:%S',
stream=stream)
log('Logging to \'%s\' at level \'%s\'' % (log_dir + LOG_FILE, level))
return logger | 5,358,373 |
def get_bcolz_col_names(cols):
"""整理适应于bcolz表中列名称规范,返回OrderedDict对象"""
trantab = str.maketrans(IN_TABLE, OUT_TABLE) # 制作翻译表
# col_names = OrderedDict(
# {col: get_acronym(col.translate(trantab)) for col in cols})
col_names = OrderedDict()
for col in cols:
if col in (AD_FIELD_NAME, SID_FIELD_NAME, TS_FIELD_NAME):
col_names[col] = col
else:
col_names[col] = regular_name(col, trantab)
if len(col_names.values()) != len(set(col_names.values())):
raise ValueError("整理后得列名称包含重复值")
return col_names | 5,358,374 |
def classify_loss(logits, target, eps):
"""
"""
if eps > 0:
loss = cross_entropy_with_smoothing(logits, target, eps, None)
else:
loss = F.cross_entropy(logits, target.view(-1))
return loss | 5,358,375 |
def flip(position, adjacent):
"""finds the furthest position on grid up to which the player has captured enemy pieces"""
interval = (adjacent[0] - position[0], adjacent[1] - position[1])
if adjacent[0] < 0 or adjacent[0] > (8*tile_size):
return False
elif adjacent[1] < 0 or adjacent[1] > (8*tile_size):
return False
check_piece = (adjacent[0] + interval[0], adjacent[1] + interval[1])
if check_piece in current_piece:
flip_back(adjacent, (interval[0] * -1, interval[1] * -1))
else:
return flip(adjacent, check_piece) | 5,358,376 |
def is_android_raw(raw):
"""
Returns a string that describes the type of file, for common Android
specific formats
"""
val = None
# We do not check for META-INF/MANIFEST.MF,
# as you also want to analyze unsigned APKs...
# AndroidManifest.xml should be in every APK.
# classes.dex and resources.arsc are not required!
# if raw[0:2] == b"PK" and b'META-INF/MANIFEST.MF' in raw:
# TODO this check might be still invalid. A ZIP file with stored APK inside would match as well.
# probably it would be better to rewrite this and add more sanity checks.
if raw[0:2] == b"PK" and b'AndroidManifest.xml' in raw:
val = "APK"
elif raw[0:3] == b"dex":
val = "DEX"
elif raw[0:3] == b"dey":
val = "DEY"
elif raw[0:4] == b"\x03\x00\x08\x00" or raw[0:4] == b"\x00\x00\x08\x00":
val = "AXML"
elif raw[0:4] == b"\x02\x00\x0C\x00":
val = "ARSC"
return val | 5,358,377 |
def icmp_worker(shutdown: Event, q: queue.Queue):
"""A worker thread which processes ICMP requests; sending packets and listening for matching responses."""
state = {}
with ICMPv4Socket(None, True) as sock:
while not shutdown.is_set():
# Send one
try:
item = q.get(block=False, timeout=0.001)
request = item._request
state[(request._id, request._sequence)] = item
# log.info(f"Sending request {item._request!r}")
sock.send(item._request)
except (ICMPLibError, ICMPSocketError, queue.Empty):
pass
# Recieve one
try:
if response := sock.receive(None, 0.001):
key = (response.id, response.sequence)
if key in state:
# log.info(f"Got response {response!r}")
state[key].set(response)
del state[key]
else:
# log.warning(f"Recieved non-matching response {response!r}")
pass
except (ICMPLibError, ICMPSocketError):
pass
# GC one
if key := next(iter(state.keys()), None):
if state[key].ready():
del state[key]
# Sleep one
sleep(shutdown, 0.001) | 5,358,378 |
def zeros_from_spec(nested_spec, batch_size):
"""Create nested zero Tensors or Distributions.
A zero tensor with shape[0]=`batch_size is created for each TensorSpec and
A distribution with all the parameters as zero Tensors is created for each
DistributionSpec.
Args:
nested_spec (nested TensorSpec or DistributionSpec):
batch_size (int): batch size added as the first dimension to the shapes
in TensorSpec
Returns:
nested Tensor or Distribution
"""
def _zero_tensor(spec):
if batch_size is None:
shape = spec.shape
else:
spec_shape = tf.convert_to_tensor(value=spec.shape, dtype=tf.int32)
shape = tf.concat(([batch_size], spec_shape), axis=0)
dtype = spec.dtype
return tf.zeros(shape, dtype)
param_spec = nest_utils.to_distribution_param_spec(nested_spec)
params = tf.nest.map_structure(_zero_tensor, param_spec)
return nest_utils.params_to_distributions(params, nested_spec) | 5,358,379 |
def type_to_str(t):
"""Return str of variable type."""
if not hasattr(t, "broadcastable"):
return str(t)
s = broadcastable_to_str(t.broadcastable)
if s == "":
s = str(t.dtype)
else:
s = dtype_to_char(t.dtype) + s
return s | 5,358,380 |
def install_packages():
"""
will Install python packages for accessing google cloud
:return:
"""
print("Installing packeges.")
print_log("Initiated...")
global pip_sources
for pip_pkg in pip_sources:
s = check_call([sys.executable, '-m', 'pip', 'install', pip_pkg])
print s
print("Reloading python packages") | 5,358,381 |
def save(self, fname="", ext="", slab="", **kwargs):
"""Saves all current database information.
APDL Command: SAVE
Parameters
----------
fname
File name and directory path (248 characters maximum,
including the characters needed for the directory path).
An unspecified directory path defaults to the working
directory; in this case, you can use all 248 characters
for the file name.
ext
Filename extension (eight-character maximum).
slab
Mode for saving the database:
ALL - Save the model data, solution data and post data
(element tables, etc.). This value is the default.
MODEL - Save the model data (solid model, finite element
model, loadings, etc.) only.
SOLU - Save the model data and the solution data (nodal
and element results).
Notes
-----
Saves all current database information to a file (File.DB).
In interactive mode, an existing File.DB is first written to a
backup file (File.DBB). In batch mode, an existing File.DB is
replaced by the current database information with no backup.
The command should be issued periodically to ensure a current
file backup in case of a system "crash" or a "line drop." It
may also be issued before a "doubtful" command so that if the
result is not what was intended the database may be easily
restored to the previous state. A save may be time consuming
for large models. Repeated use of this command overwrites the
previous data on the file (but a backup file is first written
during an interactive run). When issued from within POST1,
the nodal boundary conditions in the database (which were read
from the results file) will overwrite the nodal boundary
conditions existing on the database file.
Internal nodes may be created during solution (for example,
via the mixed u-P formulation or generalized plane strain
option for current- technology elements, the Lagrangian
multiplier method for contact elements or the MPC184 elements,
or the quadratic or cubic option of the BEAM188 and PIPE288
elements). It is sometimes necessary to save the internal
nodes in the database for later operations, such as cutting
boundary interpolations (CBDOF) for submodeling. To do so,
issue the SAVE command after the first SOLVE command.
In general, saving after solving is always a good practice.
This command is valid in any processor.
"""
return self.run(f"SAVE,{fname},{ext},,{slab}", **kwargs) | 5,358,382 |
def test_feature_flexiblerollout_stickiness_50_customfield_39(unleash_client):
"""
Feature.flexible.rollout.custom.stickiness_50 should be enabled without customField=39
"""
# Set up API
responses.add(responses.POST, URL + REGISTER_URL, json={}, status=202)
responses.add(responses.GET, URL + FEATURES_URL, json=json.loads(MOCK_JSON), status=200)
responses.add(responses.POST, URL + METRICS_URL, json={}, status=202)
# Tests
unleash_client.initialize_client()
assert unleash_client.is_enabled("Feature.flexible.rollout.custom.stickiness_50", {'customField': '39'}) | 5,358,383 |
def rollout(
env,
agent,
max_path_length=np.inf,
render=False,
render_kwargs=None,
fast_rgb=True
):
"""
The following value for the following keys will be a 2D array, with the
first dimension corresponding to the time dimension.
- observations
- actions
- rewards
- next_observations
- terminals
The next two elements will be lists of dictionaries, with the index into
the list being the index into the time
- agent_infos
- env_infos
"""
if render_kwargs is None:
render_kwargs = {}
observations = []
actions = []
rewards = []
terminals = []
agent_infos = []
env_infos = []
rgb_array = []
o = env.reset()
agent.reset()
next_o = None
path_length = 0
if hasattr(env, 'sim') and 'fixed' in env.sim.model.camera_names:
camera_name = 'fixed'
else:
camera_name = None
if render:
# import ipdb; ipdb.set_trace(context=10)
if render_kwargs['mode'] == 'rgb_array':
if not fast_rgb:
rgb_array.append(env.sim.render(500, 500, camera_name=camera_name))
else:
rgb_array.append(np.zeros((500, 500, 3), dtype=np.uint8))
else:
env.render(**render_kwargs)
# print("###############################")
while path_length < max_path_length:
a, agent_info = agent.get_action(o)
# print(a)
next_o, r, d, env_info = env.step(a)
observations.append(o)
rewards.append(r)
terminals.append(d)
actions.append(a)
agent_infos.append(agent_info)
env_infos.append(env_info)
path_length += 1
if d:
break
o = next_o
if render:
if render_kwargs['mode'] == 'rgb_array':
if path_length % 3 == 0 or not fast_rgb:
rgb_array.append(env.sim.render(500, 500, camera_name=camera_name))
else:
rgb_array.append(np.zeros((500, 500, 3), dtype=np.uint8))
else:
env.render(**render_kwargs)
actions = np.array(actions)
if len(actions.shape) == 1:
actions = np.expand_dims(actions, 1)
observations = np.array(observations)
if len(observations.shape) == 1:
observations = np.expand_dims(observations, 1)
next_o = np.array([next_o])
next_observations = np.vstack(
(
observations[1:, :],
np.expand_dims(next_o, 0)
)
)
result = dict(
observations=observations,
actions=actions,
rewards=np.array(rewards).reshape(-1, 1),
next_observations=next_observations,
terminals=np.array(terminals).reshape(-1, 1),
agent_infos=agent_infos,
env_infos=env_infos,
)
if len(rgb_array) > 0 and rgb_array[0] is not None:
result['rgb_array'] = np.array(rgb_array)
return result | 5,358,384 |
def _get_colors(data, verbose=False):
"""
Get how often each color is used in data.
Parameters
----------
data : dict
with key 'path' pointing to an image
verbose : bool, optional
Returns
-------
color_count : dict
Maps a grayscale value (0..255) to how often it was in `data`
"""
color_count = {}
for i in range(256):
color_count[i] = 0
for i, data_item in enumerate(data):
if i % 1000 == 0 and i > 0 and verbose:
print("%i of %i done" % (i, len(data)))
fname = os.path.join(".", data_item["path"])
img = scipy.ndimage.imread(fname, flatten=False, mode="L")
for row in img:
for pixel in row:
color_count[pixel] += 1
return color_count | 5,358,385 |
def data_dir():
"""The data directory."""
return DATA | 5,358,386 |
def create_single_test(j):
"""Walk through the json cases and recursively write the test cases"""
si = []
for tnum, c in enumerate(j['cases']):
if 'cases' in c:
si.extend(create_single_test(c))
else:
si.extend(write_testcase(c, tnum))
return si | 5,358,387 |
def _token_text(token):
"""Helper to get the text of a antlr token w/o the <EOF>"""
istream = token.getInputStream()
if istream is None:
return token.text
n = istream.size
if token.start >= n or token.stop >= n:
return []
return token.text | 5,358,388 |
def show_interface(enode, dev, shell=None):
"""
Show the configured parameters and stats of an interface.
:param enode: Engine node to communicate with.
:type enode: topology.platforms.base.BaseNode
:param str dev: Unix network device name. Ex 1, 2, 3..
:rtype: dict
:return: A combined dictionary as returned by both
:func:`topology_lib_ip.parser._parse_ip_addr_show`
:func:`topology_lib_ip.parser._parse_ip_stats_link_show`
"""
assert dev
cmd = 'ip addr list dev {ldev}'.format(ldev=dev)
response = enode(cmd, shell=shell)
first_half_dict = _parse_ip_addr_show(response)
d = None
if (first_half_dict):
cmd = 'ip -s link list dev {ldev}'.format(ldev=dev)
response = enode(cmd, shell=shell)
second_half_dict = _parse_ip_stats_link_show(response)
d = first_half_dict.copy()
d.update(second_half_dict)
return d | 5,358,389 |
def status(task=None, tasktypes=None, nightstr=None, states=None,
expid=None, spec=None, db_postgres_user="desidev_ro"):
"""Check the status of pipeline tasks.
Args:
Returns:
None
"""
dbpath = io.get_pipe_database()
db = pipedb.load_db(dbpath, mode="r", user=db_postgres_user)
rundir = io.get_pipe_rundir()
logdir = os.path.join(rundir, io.get_pipe_logdir())
tasks = OrderedDict()
summary = False
if (tasktypes is None) and (nightstr is None):
summary = True
if task is None:
ttypes = None
if tasktypes is not None:
ttypes = list()
for tt in pipetasks.base.default_task_chain:
if tt in tasktypes:
ttypes.append(tt)
else:
ttypes = list(pipetasks.base.default_task_chain)
if states is None:
states = task_states
else:
for s in states:
if s not in task_states:
raise RuntimeError("Task state '{}' is not valid".format(s))
allnights = io.get_nights(strip_path=True)
nights = pipeprod.select_nights(allnights, nightstr)
for tt in ttypes:
tasks[tt] = get_tasks(
db, [tt], nights, states=states, expid=expid, spec=spec
)
else:
ttypes = [pipetasks.base.task_type(task)]
tasks[ttypes[0]] = [task]
tstates = OrderedDict()
for typ, tsks in tasks.items():
tstates[typ] = pipedb.check_tasks(tsks, db=db)
if len(ttypes) == 1 and len(tasks[ttypes[0]]) == 1:
# Print status of this specific task
thistype = ttypes[0]
thistask = tasks[thistype][0]
status_task(thistask, thistype, tstates[thistype][thistask], logdir)
else:
if len(ttypes) > 1 and len(nights) > 1:
# We have multiple nights and multiple task types.
# Just print totals.
if summary:
status_summary(ttypes, nights, tasks, tstates)
else:
status_night_totals(ttypes, nights, tasks, tstates)
status_pixel_totals(ttypes, tasks, tstates)
elif len(ttypes) > 1:
# Multiple task types for one night. Print the totals for each
# task type.
thisnight = nights[0]
status_night_totals(ttypes, nights, tasks, tstates)
elif len(nights) > 1:
# We have just one task type, print the state totals for each night
# OR the full task list for redshift or spectra tasks.
thistype = ttypes[0]
print("Task type {}".format(thistype))
if thistype == "spectra" or thistype == "redshift":
status_pixel_tasks(ttypes, tasks, tstates)
else:
status_night_totals(ttypes, nights, tasks, tstates)
else:
# We have one type and one night, print the full state of every
# task.
thistype = ttypes[0]
thisnight = nights[0]
print("Task type {}".format(thistype))
status_night_tasks(ttypes, nights, tasks, tstates)
status_pixel_tasks(ttypes, tasks, tstates)
return | 5,358,390 |
def event_role_invite(transaction):
"""
GET /role-invites/1/event
:param transaction:
:return:
"""
with stash['app'].app_context():
event = EventFactoryBasic()
db.session.add(event)
role_invite = RoleInviteFactory()
db.session.add(role_invite)
db.session.commit() | 5,358,391 |
def get_params_for_category_api(category):
"""Method to get `GET` parameters for querying MediaWiki for category details.
:param category: category name to be passed in params.
:return: GET parameters `params`
"""
params = CATEGORY_API_PARAMS.copy()
params['cmtitle'] = 'Category:' + category
return params | 5,358,392 |
def get_dict_or_generate(dictionary, key, generator):
"""Get value from dict or generate one using a function on the key"""
if key in dictionary:
return dictionary[key]
value = generator(key)
dictionary[key] = value
return value | 5,358,393 |
def test_number_columns_defaults_to_3(entries_27, settings_number_of_columns_null):
"""The number of columns defaults to 3 if not provided"""
instance = EvenVMCView()
rows = instance.process_entries(entries_27)
num_cols = len(rows[0])
assert num_cols == 3 | 5,358,394 |
def createNotInConfSubGraph(graphSet, possibleSet):
"""
Return a subgraph by removing all incoming
edges to nodes in the possible set.
"""
subGraph = {}
for i in graphSet:
subGraph[i] = graphSet[i] - possibleSet
return subGraph | 5,358,395 |
def _backprop_gradient_pure(dL, L):
"""
Given the derivative of an objective fn with respect to the cholesky L,
compute the derivate with respect to the original matrix K, defined as
K = LL^T
where L was obtained by Cholesky decomposition
"""
dL_dK = np.tril(dL).copy()
N = L.shape[0]
for k in range(N - 1, -1, -1):
for j in range(k + 1, N):
for i in range(j, N):
dL_dK[i, k] -= dL_dK[i, j] * L[j, k]
dL_dK[j, k] -= dL_dK[i, j] * L[i, k]
for j in range(k + 1, N):
dL_dK[j, k] /= L[k, k]
dL_dK[k, k] -= L[j, k] * dL_dK[j, k]
dL_dK[k, k] /= (2 * L[k, k])
return dL_dK | 5,358,396 |
def get_random_instance() -> random.Random:
"""
Returns the Random instance in the random module level.
"""
return random._inst | 5,358,397 |
def to(cond, inclusive = True):
"""
Stream elements until the one that fits some condition.
Arguments:
cond -- Either a function or some other object. In the first case, the
function will be applied to each element; in the second case, the object
will be compared (using ==) with each element.
Keyword Arguments:
inclusive -- Whether the element first matching the criteria is streamed
(default True)
See Also:
:func:`dagpype.filt`
:func:`dagpype.from_`
:func:`dagpype.from_to`
:func:`dagpype.skip`
:func:`dagpype.nth`
:func:`dagpype.slice_`
:func:`dagpype.tail`
Examples:
>>> source([1, 2, 3, 4, 3, 2, 1]) | to(2) | to_list()
[1, 2]
>>> source([1, 2, 3, 4, 3, 2, 1]) | to(2, False) | to_list()
[1]
>>> source([1, 2, 3, 4, 3, 2, 1]) | to(lambda d: d % 3 == 0) | to_list()
[1, 2, 3]
"""
@filters
def _dagpype_internal_fn_act(target):
try:
if isinstance(cond, types.FunctionType):
while True:
e = (yield)
if cond(e):
break
target.send(e)
else:
while True:
e = (yield)
if e == cond:
break
target.send(e)
if inclusive:
target.send(e)
target.close()
except GeneratorExit:
target.close()
return _dagpype_internal_fn_act | 5,358,398 |
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
"""
Builds the logistic regression model by calling the function you've implemented previously
Arguments:
X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
print_cost -- Set to true to print the cost every 100 iterations
Returns:
d -- dictionary containing information about the model.
"""
# initialize parameters with zeros (≈ 1 line of code)
w, b = initialize_with_zeros(X_train.shape[0])
print(w.shape)
print(b)
# Gradient descent (≈ 1 line of code)
parameters, grads, costs = optimize(b=b,learning_rate=learning_rate,num_iterations=num_iterations,print_cost=print_cost,w=w,X=X_train,Y=Y_train)
# Retrieve parameters w and b from dictionary "parameters"
w = parameters["w"]
b = parameters["b"]
# Predict test/train set examples (≈ 2 lines of code)
Y_prediction_test = predict(b=b,w=w,X=X_test)
Y_prediction_train = predict(b=b,w=w,X=X_train)
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}
return d | 5,358,399 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.