content
stringlengths
22
815k
id
int64
0
4.91M
def _test(): """Run the Bio.Motif module's doctests. This will try and locate the unit tests directory, and run the doctests from there in order that the relative paths used in the examples work. """ import doctest import os if os.path.isdir(os.path.join("..","..","Tests")) : print "Runing doctests..." cur_dir = os.path.abspath(os.curdir) os.chdir(os.path.join("..","..","Tests")) doctest.testmod() os.chdir(cur_dir) del cur_dir print "Done"
1,300
def allocate_memory_addresses(instructions: list[Instruction], reserved_memory_names: set[str]): """ Allocate memory addresses for SharedName and Slice, and replace AddressOf and MemoryOf with PrimitiveValue. """ allocated_before: tuple[int, int] | None = None # (bank_id, address) def malloc(size: int) -> tuple[str, int]: """ Allocates consecutive memory addresses of size `size` and returns the starting address. """ nonlocal allocated_before assert 1 <= size <= 512 if allocated_before is not None and size <= 512 - allocated_before[1]: start = allocated_before[1] allocated_before = (allocated_before[0], allocated_before[1] + size) return f'bank{allocated_before[0]}', start for i in itertools.count((0 if allocated_before is None else allocated_before[0]) + 1): if f'bank{i}' not in reserved_memory_names: allocated_before = (i, size) return f'bank{i}', 0 raise LogicError def process_argument(arg: OperandValue) -> OperandValue: match arg: case AddressOf(name=(SharedName() | MemorySlice()) as n) | MemoryOf(name=(SharedName() | MemorySlice()) as n): assert (n.memory is None and n.index is None) or (n.memory is not None and n.index is not None) if n.memory is None: memory_name, memory_address = malloc(n.size if isinstance(n, MemorySlice) else 1) n.memory = LocalName(memory_name, processor=arg.processor) n.index = memory_address if isinstance(arg, AddressOf): return PrimitiveValue(arg.ctx, n.index) else: return n.memory case _: return arg for ins in instructions: match ins: case Call(): list(map(process_argument, ins.args)) case Jump(): list(map(process_argument, ins.args)) case LongJump(): process_argument(ins.arg) case Label(): pass case _: raise LogicError(ins) for ins in instructions: match ins: case Call(): ins.args = list(map(process_argument, ins.args)) case Jump(): ins.args = list(map(process_argument, ins.args)) case LongJump(): pass case Label(): pass case _: raise LogicError(ins)
1,301
def find_blobs(B): """find and return all blobs in the image, using eight-connectivity. returns a labeled image, the bounding boxes of the blobs, and the blob masks cropped to those bounding boxes""" B = np.array(B).astype(bool) labeled, objects = label_blobs(B) blobs = [labeled[obj] == ix + 1 for ix, obj in zip(range(len(objects)), objects)] return labeled, objects, blobs
1,302
def showOperandLines(rh): """ Produce help output related to operands. Input: Request Handle """ if rh.function == 'HELP': rh.printLn("N", " For the MakeVM function:") else: rh.printLn("N", "Sub-Functions(s):") rh.printLn("N", " directory - " + "Create a virtual machine in the z/VM user directory.") rh.printLn("N", " help - Displays this help information.") rh.printLn("N", " version - " + "show the version of the makeVM function") if rh.subfunction != '': rh.printLn("N", "Operand(s):") rh.printLn("N", " --cpus <cpuCnt> - " + "Specifies the desired number of virtual CPUs the") rh.printLn("N", " " + "guest will have.") rh.printLn("N", " --maxcpu <maxCpuCnt> - " + "Specifies the maximum number of virtual CPUs the") rh.printLn("N", " " + "guest is allowed to define.") rh.printLn("N", " --ipl <ipl> - " + "Specifies an IPL disk or NSS for the virtual") rh.printLn("N", " " + "machine's directory entry.") rh.printLn("N", " --dedicate <vdevs> - " + "Specifies a device vdev list to dedicate to the ") rh.printLn("N", " " + "virtual machine.") rh.printLn("N", " --loadportname <wwpn> - " + "Specifies a one- to eight-byte fibre channel port ") rh.printLn("N", " " + "name of the FCP-I/O device to define with a LOADDEV ") rh.printLn("N", " " + "statement in the virtual machine's definition") rh.printLn("N", " --loadlun <lun> - " + "Specifies a one- to eight-byte logical unit number ") rh.printLn("N", " " + "name of the FCP-I/O device to define with a LOADDEV ") rh.printLn("N", " " + "statement in the virtual machine's definition") rh.printLn("N", " --logonby <byUsers> - " + "Specifies a list of up to 8 z/VM userids who can log") rh.printLn("N", " " + "on to the virtual machine using their id and password.") rh.printLn("N", " --maxMemSize <maxMem> - " + "Specifies the maximum memory the virtual machine") rh.printLn("N", " " + "is allowed to define.") rh.printLn("N", " --setReservedMem - " + "Set the additional memory space (maxMemSize - priMemSize)") rh.printLn("N", " " + "as reserved memory of the virtual machine.") rh.printLn("N", " <password> - " + "Specifies the password for the new virtual") rh.printLn("N", " " + "machine.") rh.printLn("N", " <priMemSize> - " + "Specifies the initial memory size for the new virtual") rh.printLn("N", " " + "machine.") rh.printLn("N", " <privClasses> - " + "Specifies the privilege classes for the new virtual") rh.printLn("N", " " + "machine.") rh.printLn("N", " --profile <profName> - " + "Specifies the z/VM PROFILE to include in the") rh.printLn("N", " " + "virtual machine's directory entry.") rh.printLn("N", " <userid> - " + "Userid of the virtual machine to create.") return
1,303
def star_dist(a, n_rays=32, mode='cpp'): """'a' assumbed to be a label image with integer values that encode object ids. id 0 denotes background.""" n_rays >= 3 or _raise(ValueError("need 'n_rays' >= 3")) if mode == 'python': return _py_star_dist(a, n_rays) elif mode == 'cpp': return _cpp_star_dist(a, n_rays) elif mode == 'opencl': return _ocl_star_dist(a, n_rays) else: _raise(ValueError("Unknown mode %s" % mode))
1,304
def main(source_file, dump_dir): """ Read triplets from source_file and dump graph in dump_dir """ print("Reading Entire Data...") all_triplets = list(tqdm(read_stuffie_output(source_file), ascii=True, disable=False)) print("Reading Complete...") create_graphs_multicore(all_triplets, dump_dir) # g_core = timeit(create_graph)(all_triplets) # print(f"Both graphs are equivalent: {g_multi_core == g_core}") # print("Saving graph for future use...") # g_multi_core.save(target_file)
1,305
def run_tasks(simulation_tasks, local_working_dir, s3_io, logger, max_concurrency): """Runs a CBM3 project simulation task :: Example simulation_tasks simulation_tasks = [ {"project_code": "AB", "simulation_ids": [1, 2]}, {"project_code": "BCB", "simulation_ids": [21, 22]} ] Args: simulation_tasks (list): list of simulation tasks to run. local_working_dir (str): writeable directory for processing CBM simulation s3_io (cbm3_aws.s3_io.S3IO) object for managing cbm3_aws uploads and downloads for AWS S3 logger (logging.Logger): logger for this EC2 instance max_concurrency (int): maximum number of concurrent CBM simulations spawned by this process """ # download resources logger.info("download resources") toolbox_env_path = os.path.join( local_working_dir, "toolbox_env") s3_io.download( local_path=toolbox_env_path, s3_key="resource", resource_name="toolbox_env") archive_index_path = os.path.join( local_working_dir, "archive_index.mdb") s3_io.download( local_path=archive_index_path, s3_key="resource", resource_name="archive_index_database") cbm_executables_dir = os.path.join( local_working_dir, "cbm_executables") s3_io.download( local_path=cbm_executables_dir, s3_key="resource", resource_name="cbm_executables") stand_recovery_rules_dir = os.path.join( local_working_dir, "stand_recovery_rules") s3_io.download( local_path=stand_recovery_rules_dir, s3_key="resource", resource_name="stand_recovery_rules") disturbance_rules_path = os.path.join( stand_recovery_rules_dir, "99a_disturbance_rules.csv") disturbance_classes_path = os.path.join( stand_recovery_rules_dir, "99b_disturbance_classes.csv") logger.info("download projects") # download projects local_project_dir = os.path.join(local_working_dir, "projects") if not os.path.exists(local_project_dir): os.makedirs(local_project_dir) required_projects = set([x["project_code"] for x in simulation_tasks]) local_projects = {} for project_code in required_projects: local_project_path = os.path.join( local_project_dir, f"{project_code}.mdb") s3_io.download( local_path=local_project_path, s3_key="project", project_code=project_code) local_projects[project_code] = local_project_path local_results_dir = os.path.join(local_working_dir, "results") if not os.path.exists(local_results_dir): os.makedirs(local_results_dir) args_list = [] tasks = list( iterate_tasks(simulation_tasks, local_projects, local_results_dir)) for task in tasks: os.makedirs(os.path.dirname(task.results_database_path)) args_list.append({ "project_path": task.project_path, "project_simulation_id": task.simulation_id, "aidb_path": archive_index_path, "cbm_exe_path": cbm_executables_dir, "results_database_path": task.results_database_path, "tempfiles_output_dir": task.tempfiles_output_dir, "stdout_path": task.stdout_path, "copy_makelist_results": True, "dist_classes_path": disturbance_classes_path, "dist_rules_path": disturbance_rules_path }) logger.info("starting CBM3 simulations") logger.info(dict(tasks=args_list)) # max_workers=1 here since this script is currently run as one of many # duplicate processes. If we add further conncurrency here it will # make the worker too busy. list(projectsimulator.run_concurrent( args_list, toolbox_env_path, max_workers=max_concurrency)) logger.info("CBM3 simulations finished") logger.info("Upload results") for task in tasks: logger.info(dict( project_code=task.project_code, simulation_id=task.simulation_id)) s3_io.upload( local_path=task.results_database_path, s3_key="results", project_code=task.project_code, simulation_id=task.simulation_id) # remove the project db so it wont be uploaded in the next step os.unlink(task.results_database_path) # upload all other files and dirs where the project was loaded as # "tempfiles" This will include the run flat files, stdout file and # the run log. s3_io.upload( local_path=os.path.dirname(task.tempfiles_output_dir), s3_key="tempfiles", project_code=task.project_code, simulation_id=task.simulation_id) logger.info("CBM3 tasks finished")
1,306
def get_bond_enthalpy(element1: str, element2: str, bond='single bond') -> int: """Utility function that retrieves the bond enthalpy between element1 and element2 (regardless or order) An optional argument, bond, describing the bond (single, double, triple) could be specified If not specified, bond defaults to 'single' The optional argument exception is used to distinguish the double bond between carbon atoms in benzene""" enthalpies_dict = enthalpies[bond] if element1 in enthalpies_dict and element2 in enthalpies_dict[element1]: return enthalpies_dict[element1][element2] elif element2 in enthalpies_dict and element1 in enthalpies_dict[element2]: return enthalpies_dict[element2][element1] else: return 0
1,307
def approx_sample(num_items: int, num_samples: int) -> np.array: """Fast approximate downsampling.""" if num_items <= num_samples: return np.ones(num_items, dtype=np.bool8) np.random.seed(125) # Select each xy with probability (downsample_to / len(x)) to yield # approximately downsample_to selections. fraction_kept = float(num_samples) / num_items return np.random.sample(size=num_items) < fraction_kept
1,308
def load_accessions(args): """ Process a set of accession records and load to the database """ AccessionRecord = namedtuple( "AccessionRecord", "batch sourcefile sourceline filename bytes timestamp md5 relpath" ) def iter_accession_records_from(catalog_file): """ Load the accession catalog into batch, dirlist, & asset objects """ with open(catalog_file, 'r') as handle: reader = csv.DictReader(handle, delimiter=',') for row in reader: yield AccessionRecord(*row) use_database_file(args.database) session = Session() # Process single file or directory of files print(f"Loading accessions from {args.source}") if os.path.isfile(args.source): filepaths = [args.source] elif os.path.isdir(args.source): filepaths = [ os.path.join(args.source, f) for f in os.listdir(args.source) ] # Check whether batch exists and if not create it for sourcefile in filepaths: base = os.path.basename(sourcefile) batchname = os.path.splitext(base)[0] print(batchname) session.add(Batch(name=batchname)) session.commit() batch_count = session.query(Batch).count() print(f"total {batch_count} batches") # Create all dirlist objects print("Adding dirlists...", end="") all_dirlists = set() for rec in iter_accession_records_from(args.source): dirlistname = rec.sourcefile batchname = rec.batch if dirlistname not in all_dirlists: all_dirlists.add(dirlistname) r, = session.query(Batch.id).filter( Batch.name == batchname).one() session.add(Dirlist(filename=dirlistname, batch_id=int(r))) session.commit() dirlist_count = session.query(Dirlist).count() print(f"added {dirlist_count} dirlists") # Create asset objects for rec in iter_accession_records_from(args.source, args.filter): sourcefile_id, = session.query(Dirlist.id).filter( Dirlist.filename == rec.sourcefile).one() asset = Asset(md5=rec.md5, timestamp=rec.timestamp, filename=rec.filename, bytes=rec.bytes, dirlist_id=sourcefile_id, dirlist_line=rec.sourceline ) session.add(asset) session.commit() added_count = session.query(Asset).count() print(f"Adding assets...added {added_count} assets", end="\r") print("")
1,309
def extract_out_cos(transmat, cos, state): """ Helper function for building HMMs from matrices: Used for transition matrices with 'cos' transition classes. Extract outgoing transitions for 'state' from the complete list of transition matrices Allocates: .out_id vector and .out_a array (of size cos x N) """ lis = [] # parsing indixes belonging to postive probabilites for j in range(cos): for i in range(len(transmat[j][state])): if transmat[j][state][i] != 0.0 and i not in lis: lis.append(i) #lis.sort() #print "lis: ", lis trans_id = ghmmwrapper.int_array_alloc(len(lis)) probsarray = ghmmwrapper.double_matrix_alloc(cos, len(lis)) # C-function # creating list with positive probabilities for k in range(cos): for j in range(len(lis)): ghmmwrapper.double_matrix_setitem(probsarray, k, j, transmat[k][state][lis[j]]) # initializing C state index array for i in range(len(lis)): ghmmwrapper.int_array_setitem(trans_id, i, lis[i]) return [len(lis),trans_id,probsarray]
1,310
def import_statistics(sourcefile,starttime): """ Forms a dictionary for MarkovModel from the source csv file input -------- sourcefile: Source csv file for Markov Model starttime : For which hour the optimization is run Returns a dictionary statistics2 keys are (time,iniState,finState) time:timestep """ statistics1={} statistics2={} with open(sourcefile, newline='') as myFile: reader = csv.reader(myFile) rw_nb=0 for row in reader: ts=rw_nb//4 statistics1[ts,int(row[1]),int(row[2])]=float(row[3]) rw_nb+=1 if row[0]==starttime: listTop=ts for tS,ini,fin in sorted(statistics1.keys()): if tS-listTop>=0: statistics2[tS-listTop,ini,fin]=statistics1[tS,ini,fin] else: statistics2[tS-listTop+int(len(statistics1.keys())/4),ini,fin]=statistics1[tS,ini,fin] return statistics2
1,311
def image_repository_validation(func): """ Wrapper Validation function that will run last after the all cli parmaters have been loaded to check for conditions surrounding `--image-repository`, `--image-repositories`, and `--resolve-image-repos`. The reason they are done last instead of in callback functions, is because the options depend on each other, and this breaks cyclic dependencies. :param func: Click command function :return: Click command function after validation """ def wrapped(*args, **kwargs): ctx = click.get_current_context() guided = ctx.params.get("guided", False) or ctx.params.get("g", False) image_repository = ctx.params.get("image_repository", False) image_repositories = ctx.params.get("image_repositories", False) or {} resolve_image_repos = ctx.params.get("resolve_image_repos", False) parameters_overrides = ctx.params.get("parameters_overrides", {}) template_file = ( ctx.params.get("t", False) or ctx.params.get("template_file", False) or ctx.params.get("template", False) ) # Check if `--image-repository`, `--image-repositories`, or `--resolve-image-repos` are required by # looking for resources that have an IMAGE based packagetype. required = any( [ _template_artifact == IMAGE for _template_artifact in get_template_artifacts_format(template_file=template_file) ] ) validators = [ Validator( validation_function=lambda: bool(image_repository) + bool(image_repositories) + bool(resolve_image_repos) > 1, exception=click.BadOptionUsage( option_name="--image-repositories", ctx=ctx, message="Only one of the following can be provided: '--image-repositories', " "'--image-repository', or '--resolve-image-repos'. " "Do you have multiple specified in the command or in a configuration file?", ), ), Validator( validation_function=lambda: not guided and not (image_repository or image_repositories or resolve_image_repos) and required, exception=click.BadOptionUsage( option_name="--image-repositories", ctx=ctx, message="Missing option '--image-repository', '--image-repositories', or '--resolve-image-repos'", ), ), Validator( validation_function=lambda: not guided and ( image_repositories and not resolve_image_repos and not _is_all_image_funcs_provided(template_file, image_repositories, parameters_overrides) ), exception=click.BadOptionUsage( option_name="--image-repositories", ctx=ctx, message="Incomplete list of function logical ids specified for '--image-repositories'. " "You can also add --resolve-image-repos to automatically create missing repositories.", ), ), ] for validator in validators: validator.validate() # Call Original function after validation. return func(*args, **kwargs) return wrapped
1,312
def safe_encode(text, incoming=None, encoding='utf-8', errors='strict'): """Encodes incoming str/unicode using `encoding`. If incoming is not specified, text is expected to be encoded with current python's default encoding. (`sys.getdefaultencoding`) :param incoming: Text's current encoding :param encoding: Expected encoding for text (Default UTF-8) :param errors: Errors handling policy. See here for valid values http://docs.python.org/2/library/codecs.html :returns: text or a bytestring `encoding` encoded representation of it. :raises TypeError: If text is not an isntance of str """ if not isinstance(text, six.string_types): raise TypeError("%s can't be encoded" % type(text)) if not incoming: incoming = (sys.stdin.encoding or sys.getdefaultencoding()) if isinstance(text, six.text_type): return text.encode(encoding, errors) elif text and encoding != incoming: # Decode text before encoding it with `encoding` text = safe_decode(text, incoming, errors) return text.encode(encoding, errors) return text
1,313
def _scan(fin): """Scan a clustal format MSA file and yield tokens. The basic file structure is begin_document header? (begin_block (seq_id seq seq_index?)+ match_line? end_block)* end_document Usage: for token in scan(clustal_file): do_something(token) """ header, body, block = range(3) yield Token("begin") leader_width = -1 state = header for L, line in enumerate(fin): if state == header: if line.isspace(): continue m = header_line.match(line) state = body if m is not None: yield Token("header", m.group()) continue # Just keep going and hope for the best. # else: # raise ValueError("Cannot find required header") if state == body: if line.isspace(): continue yield Token("begin_block") state = block # fall through to block if state == block: if line.isspace(): yield Token("end_block") state = body continue m = match_line.match(line) if m is not None: yield Token("match_line", line[leader_width:-1]) continue m = seq_line.match(line) if m is None: raise ValueError("Parse error on line: %d (%s)" % (L, line)) leader_width = len(m.group(1)) yield Token("seq_id", m.group(1).strip()) yield Token("seq", m.group(2).strip()) if m.group(3): yield Token("seq_num", m.group(3)) continue # END state blocks. If I ever get here something has gone terrible wrong raise RuntimeError() # pragma: nocover if state == block: yield Token("end_block") yield Token("end") return
1,314
def create_source_location_str(frame, key): """Return string to use as source location key Keyword arguments: frame -- List of frame records key -- Key of the frame with method call Takes frame and key (usually 1, since 0 is the frame in which getcurrentframe() was called) Extracts line number and code context, i. e. when TrackingNode's loc-function was called Turns them into a string to use as key for the source location number Returns the string """ o_frames = inspect.getouterframes(frame) sl_frame = o_frames.pop(key) sl_lineno = sl_frame.lineno # list with 1 element sl_cc = sl_frame.code_context.pop(0) # strip it off leading whitespaces sl_cc = sl_cc.lstrip() source_location = f"{sl_lineno} {sl_cc}" return source_location
1,315
def uv_lines(reglist, uv='uv', sty={}, npoints=1001, inf=50., eps=1e-24): """ """ for reg in reglist: for b in reg.blocks: smin, smax, ds = -5., 25., 1. vals = np.arange(smin, smax, ds) cm = plt.cm.gist_rainbow cv = np.linspace(0,1,len(vals)) for i in range(len(vals)): style1 = dict(c=cm(cv[i]), lw=.8, ls='-', zorder=6000) style1.update(sty) b.add_curves_uv(xh.cm.uvlines([vals[i]], uv=uv, uvbounds=b.uvbounds, sty=style1, c=0., inf=inf, npoints=npoints)) return reglist
1,316
def test_init_handle_validation_error( cd_tmp_path: Path, cp_config: CpConfigTypeDef, mocker: MockerFixture, ) -> None: """Test init handle ValidationError.""" mocker.patch( f"{MODULE}.Runway", spec=Runway, spec_set=True, init=Mock(side_effect=ValidationError([], Mock())), # type: ignore ) cp_config("min_required", cd_tmp_path) runner = CliRunner() result = runner.invoke(cli, ["init"]) assert result.exit_code == 1 assert "ValidationError" in result.output
1,317
def printPageHeader(pageName, pageTitle="", initScript=None, otherHeaders=[], hideSavedSearches=False, hideNamedBugs=False, showSavedSearchSaver=False, showNamedBugSaver=False, bugView=None, bugid=None): """ Print out the standard page headers. """ global currentPage if currentPage: raise TiqitError("Already started printing a page: %s" % currentPage) currentPage = pageName timeMark("header") # Some prep prefs = loadPrefs() cfg = Config() if pageTitle: pageTitle += " - " + pages[pageName].site.titlefmt % cfg.section('general').get('sitetitle') else: pageTitle = pages[pageName].site.titlefmt % cfg.section('general').get('sitetitle') baseurl = getBaseHost() bodyClass = bugView and bugView.bodyClass or '' # Print the HTTP header, including any cookies. outgoingCookies = plugins.getOutgoingCookies() if outgoingCookies: for c in outgoingCookies: print c print "Set-Cookie: update=; Max-Age=0; path=%s" % getBasePath() print "Content-Type: text/html; charset=utf-8" print """ <html> <head> <title>%s</title> <base href='%s'> <link rel='apple-touch-icon' href='%s'> <link rel='shortcut icon' type='%s' href='%s'> <link rel='stylesheet' type='text/css' href='styles/print.css' media='print'>""" % (pageTitle, baseurl, pages[pageName].site.appleiconurl, pages[pageName].site.imgtype, pages[pageName].site.imgurl) for head in otherHeaders: print head # Print styles print "<link rel='stylesheet' type='text/css' href='styles/tiqit.css' media='screen'>" for style in pages[pageName].styles: print "<link rel='stylesheet' type='text/css' href='styles/%s.css' media='screen'>" % style # Plugins may want to add styles too for style in plugins.getPageStyles(pageName): print "<link rel='stylesheet' type='text/css' href='styles/%s.css' media='screen'>" % style # Print custom styles (overrides any other styles) cfg_section = Config().section('general') if cfg_section.has_key('customstyles'): for style in cfg_section.getlist('customstyles'): print ("<link rel='stylesheet' type='text/css' " + "href='%s' media='screen'>") % style print "<script type='text/javascript' src='scripts/tiqit.js'></script>" print "<script type='text/javascript' src='scripts/Sortable.js'></script>" for script in pages[pageName].scripts: print "<script type='text/javascript' src='scripts/%s.js'></script>" % script # Plugins may want to add scripts too for script in plugins.getPageScripts(pageName): print "<script type='text/javascript' src='scripts/%s.js'></script>" % script if bugView: print "<link rel='stylesheet' type='text/css' href='styles/%s.css' media='screen'>" % bugView.name print "<script type='text/javascript' src='scripts/%s.js'></script>" % bugView.name print """ <script type='text/javascript'> <!-- Tiqit.version = "%s";""" % VERSION_STRING print " tiqitUserID = '%s'" % encodeHTML(os.environ['REMOTE_USER']) + ";" print """ Tiqit.prefs = new Object();""" # Print all the lovely preferences for p in prefs.defaults: if type(prefs[p]) in (str, unicode): print " Tiqit.prefs['%s'] = '%s';" % (encodeHTML(p), encodeHTML(prefs[p])) elif type(prefs[p]) == list: print " Tiqit.prefs['%s'] = new Array('%s');" % (encodeHTML(p), "','".join(map(encodeHTML, prefs[p]))) elif type(prefs[p]) == dict: # Don't include it @@@ if dict prefs ever needed in JS, add this None else: raise ValueError, "Don't support prefs of type %s" % type(prefs[p]) print """ Tiqit.config = new Object();""" # Print the configuration for s in cfg.cfg.sections(): print " Tiqit.config['%s'] = new Object();" % encodeHTML(s) for o, v in cfg.cfg.items(s, raw=True): print " Tiqit.config['%s']['%s'] = '%s';" % (encodeHTML(s), encodeHTML(o), encodeHTML(v.replace('\n', '\\n'))) # If the page requires an initialisation script, print it if initScript: print initScript else: print " function init() { };" print """ --> </script> </head> <body class='%s' onload='runInitOnce()'> <!-- Attempt to use Application Cache <iframe style='display: none' src='cache.py'></iframe>--> <div id='tiqitContainer'%s> <div id='tiqitHeader'>""" % (bodyClass, " class='tiqitWatermark'" if prefs.miscHideWatermark == 'false' else "") # Now we're on the page proper. Print the relevant search box print pages[pageName].site.searchfunc() # Print the list of links for this page print "<p>" links = pages[pageName].links + [LINK_INFO] if prefs.miscToolbar == 'icons': print " | ".join(["<a href='%s' title='%s'><img src='%s' alt='%s'></a>" % (x.target, x.tooltip, x.img, x.title) for x in links]) elif prefs.miscToolbar == 'text': print " | ".join(["<a href='%s' title='%s'>%s</a>" % (x.target, x.tooltip, x.title) for x in links]) else: print " | ".join(["<a href='%s' title='%s'><img src='%s' alt='%s'>%s</a>" % (x.target, x.tooltip, x.img, x.title, x.title) for x in links]) print """</p> </div>""" # Print Saved Searches and Named Bugs now if prefs.miscHideSavedSearches != 'on' and not hideSavedSearches: _printNamedPages('search', 'Saved Searches', 'results/%s/' % os.environ['REMOTE_USER'], showSavedSearchSaver and 'Save Search as' or None, "search") if prefs.miscHideNamedBugs != 'on' and not hideNamedBugs: _printNamedPages('namedBug', 'Named Bugs', 'view/', showNamedBugSaver and 'Name this bug' or None, "bug", bugid=bugid) print '\n'.join(plugins.printToolbars(pages[pageName])) print "<div id='tiqitContent'>"
1,318
def rand_initialisation(X, n_clusters, seed, cste): """ Initialize vector centers from X randomly """ index = []; repeat = n_clusters; # Take one index if seed is None: idx = np.random.RandomState().randint(X.shape[0]); else: idx = np.random.RandomState(seed+cste).randint(X.shape[0]); while repeat != 0: # Let's check that we haven't taken this index yet if idx not in index: index.append(idx); repeat = repeat - 1; if seed is not None: idx = np.random.RandomState(seed+cste+repeat).randint(X.shape[0]); return X[index];
1,319
def l2_hinge_loss(X, Y, W, C, N): """ Computes the L2 regularized Hinge Loss function, and its gradient over a mini-batch of data. :param X: The feature matrix of size (F+1, N). :param Y: The label vector of size (N, 1). :param W: The weight vector of size (F+1, 1). :param C: A hyperparameter of SVM soft margin that determines the number of observations allowed in the margin. :param N: The number of samples in the mini-batch. :return: Loss (a scalar) and gradient (a vector of size (F+1, 1)). """ l = 1 / N * C loss = (l / 2) * norm(W) ** 2 + 1 / N * np.sum(np.maximum(np.zeros((1, N)), 1 - np.multiply(Y.T, np.matmul(W.T, X)))) grad = l * W + 1 / N * sum([-Y[i] * X[:, i:i + 1] if Y[i] * np.matmul(W.T, X[:, i:i + 1]) < 1 else 0 for i in range(N)]) return loss, grad
1,320
def args(): """Setup argument Parsing.""" parser = argparse.ArgumentParser( usage='%(prog)s', description='OpenStack Inventory Generator', epilog='Inventory Generator Licensed "Apache 2.0"') parser.add_argument( '-f', '--file', help='Inventory file.', required=False, default='openstack_inventory.json' ) parser.add_argument( '-s', '--sort', help='Sort items based on given key i.e. physical_host', required=False, default='component' ) exclusive_action = parser.add_mutually_exclusive_group(required=True) exclusive_action.add_argument( '-r', '--remove-item', help='host name to remove from inventory, this can be used multiple' ' times.', action='append', default=[] ) exclusive_action.add_argument( '-d', '--remove-group', help='group name to remove from inventory, this can be used multiple' ' times.', action='append', default=[] ) exclusive_action.add_argument( '-l', '--list-host', help='', action='store_true', default=False ) exclusive_action.add_argument( '-g', '--list-groups', help='List groups and containers in each group', action='store_true', default=False ) exclusive_action.add_argument( '-G', '--list-containers', help='List containers and their groups', action='store_true', default=False ) exclusive_action.add_argument( '-e', '--export', help='Export group and variable information per host in JSON.', action='store_true', default=False ) exclusive_action.add_argument( '--clear-ips', help='''Clears IPs from the existing inventory, but leaves all other information intact. LXC interface files and load balancers will *not* be modified.''', action='store_true', default=False ) return vars(parser.parse_args())
1,321
def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): """ Raise an assertion if two objects are not equal up to desired precision. The test verifies identical shapes and verifies values with abs(desired-actual) < 0.5 * 10**(-decimal) Given two array_like objects, check that the shape is equal and all elements of these objects are almost equal. An exception is raised at shape mismatch or conflicting values. In contrast to the standard usage in numpy, NaNs are compared like numbers, no assertion is raised if both objects have NaNs in the same positions. Parameters ---------- x : array_like The actual object to check. y : array_like The desired, expected object. decimal : integer (decimal=6) desired precision err_msg : string The error message to be printed in case of failure. verbose : bool If True, the conflicting values are appended to the error message. Raises ------ AssertionError If actual and desired are not equal up to specified precision. See Also -------- assert_almost_equal: simple version for comparing numbers assert_array_equal: tests objects for equality Examples -------- the first assert does not raise an exception >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan], [1.0,2.333,np.nan]) >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], \t\t\t[1.0,2.33339,np.nan], decimal=5) ... <type 'exceptions.AssertionError'>: AssertionError: Arrays are not almost equal <BLANKLINE> (mismatch 50.0%) x: array([ 1. , 2.33333, NaN]) y: array([ 1. , 2.33339, NaN]) >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], \t\t\t[1.0,2.33333, 5], decimal=5) <type 'exceptions.ValueError'>: ValueError: Arrays are not almost equal x: array([ 1. , 2.33333, NaN]) y: array([ 1. , 2.33333, 5. ]) """ from numpy.core import around, number, float_ from numpy.core.numerictypes import issubdtype from numpy.core.fromnumeric import any as npany def compare(x, y): try: if npany(gisinf(x)) or npany( gisinf(y)): xinfid = gisinf(x) yinfid = gisinf(y) if not xinfid == yinfid: return False # if one item, x and y is +- inf if x.size == y.size == 1: return x == y x = x[~xinfid] y = y[~yinfid] except TypeError: pass z = abs(x-y) if not issubdtype(z.dtype, number): z = z.astype(float_) # handle object arrays return around(z, decimal) <= 10.0**(-decimal) assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, header='Arrays are not almost equal')
1,322
def train_val_test_split(dataframe, val_ratio=.2, test_ratio=.2): """ Takes a dataframe and returns a random train/validate/test split param test_ratio: the percentage of data to put into the test portion - must be between 0.0 and 1.0 param val_ratio: the percentage of data to put into the validation portion - must be between 0.0 and 1.0 test_ratio + val_ratio must also be <= 1.0 - if test_ratio + val_ratio == 1, train will be empty returns: train, validate, test """ # ensure test_ratio is [0,1] if (test_ratio > 1.0) | (test_ratio < 0): raise ValueError('test_ratio must be between 0.0 and 1.0, found', test_ratio) # ensure val_ratio is [0,1] if (val_ratio > 1.0) | (val_ratio < 0): raise ValueError('test_ratio must be between 0.0 and 1.0, found', val_ratio) # ensure test + val <= 1 if (test_ratio + val_ratio > 1.0): raise ValueError('test_ratio + val_ratio must be <= 1.0, found', test_ratio + val_ratio) # split once to get test train, test = train_test_split(dataframe, test_ratio) # recalculate ratio and split again to get val train_ratio = 1 - (val_ratio + test_ratio) sub_ratio = val_ratio / (val_ratio + train_ratio) train, val = train_test_split(train, sub_ratio) # return the results return train, val, test
1,323
def diff_pf_potential(phi): """ Derivative of the phase field potential. """ return phi**3-phi
1,324
def process_image(image, label, height=224, width=224): """ Resize the images to a fixes input size, and rescale the input channels to a range of [-1,1]. Args: image: "tensor, float32", image input. label: "tensor, int64", image label. height: "int64", (224, 224, 3) -> (height, 224, 3). width: "int64", (224, 224, 3) -> (224, width, 3). Returns: image input, image label. """ image = tf.cast(image, tf.float32) image = image / 255. image = tf.image.resize(image, (height, width)) return image, label
1,325
def run_server(hostname, port, commandQueue): """ Runs a server and listen for commands sent to the crazyflie :param hostname: :param port: :param commandQueue: :return: """ server = HTTPServer((hostname, port), CrazyHandler) server.commandQueue = commandQueue server.serve_forever()
1,326
def astz(data): """ [X] ASTZ - Request actual status STBY (dyno stopping) or SSIM (road load) or SMTR (constant speed) or SRPM (constant RPM) or SKZK (constant motor force) """ responds = data.split(" ") if len(responds) > 2: if responds[1] == 'STBY': state = 0 elif responds[1] in ["SSIM","SMTR","SRPM","SKZK"]: state = 1 else: print responds[1] state = 2 else: state = 3 return state
1,327
def get_peer_snappi_chassis(conn_data, dut_hostname): """ Get the Snappi chassis connected to the DUT Note that a DUT can only be connected to a Snappi chassis Args: conn_data (dict): the dictionary returned by conn_graph_fact. Example format of the conn_data is given below: {u'device_conn': {u'sonic-s6100-dut': {u'Ethernet64': {u'peerdevice': u'snappi-sonic', u'peerport': u'Card4/Port1', u'speed': u'100000'}, u'Ethernet68': {u'peerdevice': u'snappi-sonic', u'peerport': u'Card4/Port2', u'speed': u'100000'}, u'Ethernet72': {u'peerdevice': u'snappi-sonic', u'peerport': u'Card4/Port3', u'speed': u'100000'}, u'Ethernet76': {u'peerdevice': u'snappi-sonic', u'peerport': u'Card4/Port4', u'speed': u'100000'}}}, u'device_console_info': {u'sonic-s6100-dut': {}}, u'device_console_link': {u'sonic-s6100-dut': {}}, u'device_info': {u'sonic-s6100-dut': {u'HwSku': u'Arista-7060CX-32S-C32', u'Type': u'DevSonic'}}, u'device_pdu_info': {u'sonic-s6100-dut': {}}, u'device_pdu_links': {u'sonic-s6100-dut': {}}, u'device_port_vlans': {u'sonic-s6100-dut': {u'Ethernet64': {u'mode': u'Access', u'vlanids': u'2', u'vlanlist': [2]}, u'Ethernet68': {u'mode': u'Access', u'vlanids': u'2', u'vlanlist': [2]}, u'Ethernet72': {u'mode': u'Access', u'vlanids': u'2', u'vlanlist': [2]}, u'Ethernet76': {u'mode': u'Access', u'vlanids': u'2', u'vlanlist': [2]}}}, u'device_vlan_list': {u'sonic-s6100-dut': [2, 2, 2, 2]}, u'device_vlan_map_list': {u'sonic-s6100-dut': {u'19': 2}}, u'device_vlan_range': {u'sonic-s6100-dut': [u'2']}} dut_hostname (str): hostname of the DUT Returns: The name of the peer Snappi chassis or None """ device_conn = conn_data['device_conn'] if dut_hostname not in device_conn: return None dut_device_conn = device_conn[dut_hostname] peer_devices = [dut_device_conn[port]['peerdevice'] for port in dut_device_conn] peer_devices = list(set(peer_devices)) if len(peer_devices) == 1: return peer_devices[0] else: return None
1,328
def gen_hwpc_report(): """ Return a well formated HWPCReport """ cpua = create_core_report('1', 'e0', '0') cpub = create_core_report('2', 'e0', '1') cpuc = create_core_report('1', 'e0', '2') cpud = create_core_report('2', 'e0', '3') cpue = create_core_report('1', 'e1', '0') cpuf = create_core_report('2', 'e1', '1') cpug = create_core_report('1', 'e1', '2') cpuh = create_core_report('2', 'e1', '3') socketa = create_socket_report('1', [cpua, cpub]) socketb = create_socket_report('2', [cpuc, cpud]) socketc = create_socket_report('1', [cpue, cpuf]) socketd = create_socket_report('2', [cpug, cpuh]) groupa = create_group_report('1', [socketa, socketb]) groupb = create_group_report('2', [socketc, socketd]) return create_report_root([groupa, groupb])
1,329
def special_crossentropy(y_true, y_pred): """特殊的交叉熵 """ task = K.cast(y_true < 1.5, K.floatx()) mask = K.constant([[0, 0, 1, 1, 1]]) y_pred_1 = y_pred - mask * 1e12 y_pred_2 = y_pred - (1 - mask) * 1e12 y_pred = task * y_pred_1 + (1 - task) * y_pred_2 y_true = K.cast(y_true, 'int32') loss = K.sparse_categorical_crossentropy(y_true, y_pred, from_logits=True) return K.mean(loss)
1,330
def nonensembled_map_fns(data_config): """Input pipeline functions which are not ensembled.""" common_cfg = data_config.common map_fns = [ data_transforms.correct_msa_restypes, data_transforms.add_distillation_flag(False), data_transforms.cast_64bit_ints, data_transforms.squeeze_features, data_transforms.randomly_replace_msa_with_unknown(0.0), data_transforms.make_seq_mask, data_transforms.make_msa_mask, data_transforms.make_hhblits_profile, data_transforms.make_random_crop_to_size_seed, ] if common_cfg.use_templates: map_fns.extend([data_transforms.fix_templates_aatype, data_transforms.make_pseudo_beta('template_')]) map_fns.extend([data_transforms.make_atom14_masks,]) return map_fns
1,331
def _getMark(text): """ Return the mark or text entry on a line. Praat escapes double-quotes by doubling them, so doubled double-quotes are read as single double-quotes. Newlines within an entry are allowed. """ line = text.readline() # check that the line begins with a valid entry type if not re.match(r'^\s*(text|mark) = "', line): raise ValueError('Bad entry: ' + line) # read until the number of double-quotes is even while line.count('"') % 2: next_line = text.readline() if not next_line: raise EOFError('Bad entry: ' + line[:20] + '...') line += next_line entry = re.match(r'^\s*(text|mark) = "(.*?)"\s*$', line, re.DOTALL) return entry.groups()[1].replace('""', '"')
1,332
def get_screen_resolution_str(): """ Get a regexp like string with your current screen resolution. :return: String with your current screen resolution. """ sizes = [ [800, [600]], [1024, [768]], [1280, [720, 768]], [1366, [768]], [1920, [1080, 1200]], ] sizes_mobile = [[768, [1024]], [720, [1280]], [768, [1280, 1366]], [1080, [1920]]] default_w = 1920 default_h = 1080 default_mobile_w = 1080 default_mobile_h = 1920 is_mobile = False window = Gtk.Window() screen = window.get_screen() nmons = screen.get_n_monitors() maxw = 0 maxh = 0 sizew = 0 sizeh = 0 if nmons == 1: maxw = screen.get_width() maxh = screen.get_height() else: for m in range(nmons): mg = screen.get_monitor_geometry(m) if mg.width > maxw or mg.height > maxw: maxw = mg.width maxh = mg.height if maxw > maxh: v_array = sizes else: v_array = sizes_mobile is_mobile = True for m in v_array: if maxw <= m[0]: sizew = m[0] sizeh = m[1][len(m[1]) - 1] for e in m[1]: if maxh <= e: sizeh = e break break if sizew == 0: if is_mobile: sizew = default_mobile_w sizeh = default_mobile_h else: sizew = default_w sizeh = default_h return r"%sx%s" % (sizew, sizeh)
1,333
def page_body_id(context): """ Get the CSS class for a given page. """ path = slugify_url(context.request.path) if not path: path = "home" return "page-{}".format(path)
1,334
def main(): """Go Main Go""" mesosite = get_dbconn('mesosite') mcursor = mesosite.cursor() print(('%5s %5s %5s %5s %5s %5s %5s %5s' ) % ("NWSLI", "LOW", "ACTN", "BANK", "FLOOD", "MOD", "MAJOR", "REC")) net = sys.argv[1] nt = NetworkTable(net) for sid in nt.sts: process_site(mcursor, sid, net) mcursor.close() mesosite.commit() mesosite.close()
1,335
def check_product_out_of_range_formatter_ref_error_task(infiles, outfile, ignored_filter): """ {path[2][0]} when len(path) == 1 """ with open(outfile, "w") as p: pass
1,336
def load_centers(network, name, eps): """Load values of centers from the specified network by name. :param network: Network to load center values :param name: Name of parameter with centers :return: Normalized centers """ assert name in network.params.keys(), 'Cannot find name: {} in params'.format(name) params = network.params[name] assert len(params) == 1 centers = params[0].data norms = np.sqrt(np.sum(np.square(centers), axis=1, keepdims=True) + eps) normalized_centers = centers / norms return normalized_centers
1,337
def naive_sort_w_matrix(array): """ :param array: array to be sorted :return: a sorted version of the array, greatest to least, with the appropriate permutation matrix """ size = len(array) def make_transposition(i, j): mat = np.identity(size) mat[i, i] = 0 mat[j, j] = 0 mat[i, j] = 1 mat[j, i] = 1 return mat sort_array = np.zeros(size) permutation = np.identity(size) for i in range(size): big = -float("inf") ix = i for j in range(i, size): if array[j] > big: big = array[j] ix = j sort_array[i] = big permutation = make_transposition(i, ix) @ permutation return sort_array, permutation
1,338
def plot_route(cities, route, name='diagram.png', ax=None): """Отрисовка маршрута""" mpl.rcParams['agg.path.chunksize'] = 10000 if not ax: fig = plt.figure(figsize=(5, 5), frameon=False) axis = fig.add_axes([0, 0, 1, 1]) axis.set_aspect('equal', adjustable='datalim') plt.axis('off') axis.scatter(cities['x'], cities['y'], color='red', s=4) route = cities.reindex(route) route.loc[route.shape[0]] = route.iloc[0] axis.plot(route['x'], route['y'], color='purple', linewidth=1) plt.savefig(name, bbox_inches='tight', pad_inches=0, dpi=200) plt.close() else: ax.scatter(cities['x'], cities['y'], color='red', s=4) route = cities.reindex(route) route.loc[route.shape[0]] = route.iloc[0] ax.plot(route['x'], route['y'], color='purple', linewidth=1) return ax
1,339
def sync(event, context): """Sync projects with cloud datastore.""" del event, context #unused client = ndb.Client() with client.context(): github_client = Github(get_access_token()) repo = github_client.get_repo('google/oss-fuzz') projects = get_projects(repo) sync_projects(projects)
1,340
def aes_cbc_mac( key: bytes, b: bytes, iv: bytes=None, pad=False ) -> bytes: """ AES CBC-MAC. :param key: The verification key. :param b: The buffer to be authenticated. :param iv: The initial vector. :param pad: Whether to apply PKCS-7 padding to the buffer. :return: A valid MAC for b, with given key and IV. """ if pad: b = matasano.blocks.pkcs_7(b, 16) return matasano.blocks.aes_cbc( key=key, b=b, iv=iv, decrypt=False, random_iv=False )[0][-16:]
1,341
def seamus(): """ Preview for Seamus page """ context = make_context() # Read the books JSON into the page. with open('www/static-data/books.json', 'rb') as readfile: books_data = json.load(readfile) books = sorted(books_data, key=lambda k: k['title']) # Harvest long tag names for book in books: tag_list = [] for tag in book['tags']: tag_list.append(context['COPY']['tags'][tag]['value']) book['tag_list'] = tag_list context['books'] = books return render_template('seamus-preview.html', **context)
1,342
def get_model_opt(model_path): """ Get the options to initialize a model evaluator """ opt_path = os.path.dirname(model_path.rstrip('/')) + '/opt.json' # load the options used while training the model opt = json.load(open(opt_path)) opt = dotdict(opt) opt.load_weights_folder = model_path return opt
1,343
def resolve_alias(term: str) -> str: """ Resolves search term aliases (e.g., 'loc' for 'locations'). """ if term in ("loc", "location"): return "locations" elif term == "kw": return "keywords" elif term == "setting": return "setting" elif term == "character": return "characters" else: return term
1,344
def to_number(value: Union[Any, Iterable[Any]], default: Any = math.nan) -> Union[NumberType, List[NumberType]]: """ Attempts to convert the passed object to a number. Returns ------- value: Scalar * list,tuple,set -> list of Number * int,float -> int, float * str -> int, float * generic -> float if float() works, else math.nan """ if isinstance(value, str): return _convert_string_to_number(value, default) if isinstance(value, (list, tuple, set)): return [to_number(i, default) for i in value] try: converted_number = float(value) except (ValueError, TypeError): converted_number = default if not _is_null(converted_number) and math.floor(converted_number) == converted_number: converted_number = int(converted_number) return converted_number
1,345
def save_snapshot(graph, epoch, result_subdir, graph_ema=None, model_name=None, is_best=False, state=None): """ Save snapshot :param graph: model :type graph: torch.nn.Module :param epoch: epoch index :type epoch: int :param result_subdir: path to save :type result_subdir: str :param model_name: model name :type model_name: str :param is_best: whether or not is best model :type is_best: bool :param state: other state to save :type state: dict """ state_to_save = { 'graph': graph.module.state_dict() if hasattr(graph, 'module') else graph.state_dict(), 'epoch': epoch } if state is not None: state_to_save.update(state) if graph_ema is not None: state_to_save['graph_ema'] = \ graph_ema.module.state_dict() if hasattr(graph_ema, 'module') else graph_ema.state_dict() # save current state if model_name is None: model_name = get_model_name(epoch) save_path = os.path.join(result_subdir, model_name) torch.save(state_to_save, save_path) # save best state if is_best: best_path = os.path.join(result_subdir, get_best_model_name()) shutil.copy(save_path, best_path)
1,346
def secretValue(value=None, bits=64): """ A single secret value bits: how many bits long the value is value: if not None, then a specific (concrete or symbolic) value which this value takes on """ return AbstractNonPointer(bits=bits, value=value, secret=True)
1,347
def reload_api(): """ Reinitialize the API client with a new API key. This method may block if no valid keys are currently available. """ global API API = TwitterSearch(*KEYS.advance().splitlines())
1,348
def year_frac(d_end, d_start=0, dates_string=False, trading_calendar=True): """ :returns year fraction between 2 (business) dates :params dates are datetimes, if string then insert dates_string=True """ delta_days = days_between(d_end, d_start, dates_string, trading_calendar) year = 252. if trading_calendar else 365.25 return delta_days / year
1,349
def _indent(s): # type: (str) -> int """ Compute the indentation of s, or None of an empty line. Example: >>> _indent("foo") 0 >>> _indent(" bar") 4 >>> _indent(" ") >>> _indent("") """ t = s.lstrip() return len(s) - len(t) if t else None
1,350
def normalize_middle_high_german( text: str, to_lower_all: bool = True, to_lower_beginning: bool = False, alpha_conv: bool = True, punct: bool = True, ): """Normalize input string. to_lower_all: convert whole text to lowercase alpha_conv: convert alphabet to canonical form punct: remove punctuation >>> from cltk.alphabet import gmh >>> from cltk.languages.example_texts import get_example_text >>> gmh.normalize_middle_high_german(get_example_text("gmh"))[:50] 'ik gihorta ðat seggen\\nðat sih urhettun ænon muotin' """ if to_lower_all: text = text.lower() if to_lower_beginning: text = text[0].lower() + text[1:] text = re.sub(r"(?<=[\.\?\!]\s)(\w)", lambda x: x.group(1).lower(), text) if alpha_conv: text = ( text.replace("ē", "ê") .replace("ī", "î") .replace("ā", "â") .replace("ō", "ô") .replace("ū", "û") ) text = text.replace("ae", "æ").replace("oe", "œ") if punct: text = re.sub(r"[\.\";\,\:\[\]\(\)!&?‘]", "", text) return text
1,351
def _convert_format(input_format, reverse=0): """Convert FITS format spec to record format spec. Do the opposite if reverse = 1. """ fmt = input_format (repeat, dtype, option) = _parse_tformat(fmt) if reverse == 0: if dtype in _fits2rec.keys(): # FITS format if dtype == 'A': output_format = _fits2rec[dtype]+`repeat` # to accomodate both the ASCII table and binary table column # format spec, i.e. A7 in ASCII table is the same as 7A in # binary table, so both will produce 'a7'. if fmt.lstrip()[0] == 'A' and option != '': output_format = _fits2rec[dtype]+`int(option)` # make sure option is integer else: _repeat = '' if repeat != 1: _repeat = `repeat` output_format = _repeat+_fits2rec[dtype] elif dtype == 'X': nbytes = ((repeat-1) / 8) + 1 # use an array, even if it is only ONE u1 (i.e. use tuple always) output_format = _FormatX(`(nbytes,)`+'u1') output_format._nx = repeat elif dtype == 'P': output_format = _FormatP('2i4') output_format._dtype = _fits2rec[option[0]] elif dtype == 'F': output_format = 'f8' else: raise ValueError, "Illegal format %s" % fmt else: if dtype == 'a': output_format = option+_rec2fits[dtype] elif isinstance(dtype, _FormatX): print 'X format' elif dtype+option in _rec2fits.keys(): # record format _repeat = '' if repeat != 1: _repeat = `repeat` output_format = _repeat+_rec2fits[dtype+option] else: raise ValueError, "Illegal format %s" % fmt return output_format
1,352
def _module(root_pkg, name): """Imports the module, catching `ImportError` Args: root_pkg (str): top level package name(str): unqualified name of the module to be imported Returns: module: imported module """ def _match_exc(e): return re.search( ' {}$|{}'.format( # py2 _module_from_cmd(name), # py3 _module_name((root_pkg, name)), ), str(e), ) try: return _import(root_pkg, name) except Exception as e: if (isinstance(e, ImportError) and _match_exc(e) or isinstance(e, (argh.CommandError, CommandError)) ): sys.stderr.write(str(e) + "\n") else: raise return None
1,353
def clean_elec_demands_dirpath(tmp_path: Path) -> Path: """Create a temporary, empty directory called 'processed'. Args: tmp_path (Path): see https://docs.pytest.org/en/stable/tmpdir.html Returns: Path: Path to a temporary, empty directory called 'processed'. """ dirpath = tmp_path / "processed" mkdir(dirpath) return dirpath / "SM_electricity"
1,354
def get_file_from_project(proj: Project, file_path): """ Returns a file object (or None, if error) from the HEAD of the default branch in the repo. The default branch is usually 'main'. """ try: file = proj.files.raw(file_path=file_path, ref=proj.default_branch) LintReport.trace(f'Accessing \'{file_path}\' from {proj.name}.') return file except gitlab.GitlabGetError as _: LintReport.trace( f'Problem accessing \'{file_path}\' from {proj.name}.') return None
1,355
def test_dataset_compute_response(fractal_compute_server): """ Tests that the full compute response is returned when calling Dataset.compute """ client = ptl.FractalClient(fractal_compute_server) # Build a dataset ds = ptl.collections.Dataset("ds", client, default_program="psi4", default_driver="energy", default_units="hartree") ds.add_entry("He1", ptl.Molecule.from_data("He -1 0 0\n--\nHe 0 0 1")) ds.add_entry("He2", ptl.Molecule.from_data("He -1.1 0 0\n--\nHe 0 0 1.1")) ds.save() # Compute fewer molecules than query limit response = ds.compute("HF", "sto-3g") assert len(response.ids) == 2 # Compute more molecules than query limit client.query_limit = 1 response = ds.compute("HF", "sto-3g") assert len(response.ids) == 2
1,356
def get_logger_by_name(name: str): """ Gets the logger given the type of logger :param name: Name of the value function needed :type name: string :returns: Logger """ if name not in logger_registry.keys(): raise NotImplementedError else: return logger_registry[name]
1,357
def gen_appr_(): """ 16 consonants """ appr_ = list(voiced_approximant) appr_.extend(unvoiced_approximant) appr_.extend(voiced_lateral_approximant) return appr_
1,358
def convert_unit( to_convert: Union[float, int, Iterable[Union[float, int, Iterable]]], old_unit: Union[str, float, int], new_unit: Union[str, float, int], ) -> Union[float, tuple]: """ Convert a number or sequence of numbers from one unit to another. If either unit is a number it will be treated as the number of points per unit. So 72 would mean 1 inch. Args: to_convert (float, int, Iterable): The number / list of numbers, or points, to convert old_unit (str, float, int): A unit accepted by fpdf.FPDF or a number new_unit (str, float, int): A unit accepted by fpdf.FPDF or a number Returns: (float, tuple): to_convert converted from old_unit to new_unit or a tuple of the same """ unit_conversion_factor = get_scale_factor(new_unit) / get_scale_factor(old_unit) if isinstance(to_convert, Iterable): return tuple( map(lambda i: convert_unit(i, 1, unit_conversion_factor), to_convert) ) return to_convert / unit_conversion_factor
1,359
def image_preprocess(image, image_size: Union[int, Tuple[int, int]]): """Preprocess image for inference. Args: image: input image, can be a tensor or a numpy arary. image_size: single integer of image size for square image or tuple of two integers, in the format of (image_height, image_width). Returns: (image, scale): a tuple of processed image and its scale. """ input_processor = dataloader.DetectionInputProcessor(image, image_size) input_processor.normalize_image() input_processor.set_scale_factors_to_output_size() image = input_processor.resize_and_crop_image() image_scale = input_processor.image_scale_to_original return image, image_scale
1,360
def create_cluster_meta(cluster_groups): """Return a ClusterMeta instance with cluster group support.""" meta = ClusterMeta() meta.add_field('group') cluster_groups = cluster_groups or {} data = {c: {'group': v} for c, v in cluster_groups.items()} meta.from_dict(data) return meta
1,361
def dict_has_key_and_value_include_str(the_dict,key,str): """指定字典中包括键,并且键值包含某个字符片段""" if the_dict.__contains__(key): if str in the_dict[key]: return True return False
1,362
def get_confinement_values_by_hand(): """MSDs in this dataset are too incomplete/noisy to be very confident of any automatic method to call confinement levels. this function allows you to specify them by hand.""" confinements = {} cmap = cmap_from_list(mscd_unbound_only.reset_index()['meiosis'].unique()) for label, d in mscd_unbound_only.groupby(['locus', 'genotype', 'meiosis']): plt.cla() plt.errorbar(d.reset_index()['delta'], d['mean'], d['ste'], c=cmap(label[2]), label=str(label)) plt.yscale('log') plt.xscale('log') plt.legend() ax = plt.gca() ax.yaxis.set_major_locator(MultipleLocator(0.1)) ax.yaxis.set_major_formatter(FormatStrFormatter('%0.01f')) ax.yaxis.set_minor_locator(MultipleLocator(0.01)) ax.yaxis.set_minor_formatter(FormatStrFormatter('')) plt.grid(True) plt.pause(0.01) ctype = input('confinement type: ') cval = float(input('approximage conf level')) confinements[label] = {'type': str(ctype), 'R': cval}
1,363
def get_list_size(ls:List[Any]) -> float: """Return size in memory of a list and all its elements""" return reduce(lambda x, y: x + y, (sys.getsizeof(v) for v in ls), 0) + sys.getsizeof(ls)
1,364
def get_wrapper_depth(wrapper): """Return depth of wrapper function.""" return wrapper.__wrapped__.__wrappers__ + (1 - wrapper.__depth__)
1,365
def get_formsets(what, extra=0, **kwargs): """Returns a list of formset instances""" try: related_fields = {} relation_config = get_form_config('Relations', **kwargs) operation = 'create' if 'Create' in what else 'update' for relation in relation_config: field_config = relation_config[relation] related_fields[relation] = get_form_fields(operation, field_config) def get_related_model(relation): """Returns related model""" args = get_app_model_as_params(**kwargs) args.pop() args.append(relation) return apps.get_model(*args) return [inlineformset_factory( get_model(**kwargs), get_related_model(relation), fields=related_fields[relation], extra=extra ) for relation in related_fields] except KeyError: return []
1,366
def round_even(number): """Takes a number and returns it rounded even""" # decimal.getcontext() -> ROUND_HALF_EVEN is default return Decimal(number).quantize(0)
1,367
def mquiver(xs, ys, v, **kw): """wrapper function for quiver xs and ys are arrays of x's and y's v is a function R^2 -> R^2, representing vector field kw are passed to quiver verbatim""" X,Y = np.meshgrid(xs, ys) V = [[v(x,y) for x in xs] for y in ys] VX = [[w[0] for w in q] for q in V] VY = [[w[1] for w in q] for q in V] plt.quiver(X, Y, VX, VY, **kw)
1,368
def _build_conditional_single(cond, vals, model_cls=None): """ Builds the single conditional portion of a where clause. Args: cond (()/[]): The tuple/list containing the elements for a single conditional statement. See Model.query_direct() docs for full details on the format. vals ({str:str/int/bool/datetime/enum/etc}): The mapping of variable names as they will be used within parameterized format (i.e. `%(<>)s` format) in the returned `clause`. This is expected to contain all variables already built into the where clause currently being processed and will be modified here if a value/variable is part of the conditional. model_cls (Class<Model<>> or None): The class itself of the model holding the valid column names. Can be None if skipping that check for increased performance, but this is ONLY recommended if the source of the column names in the structured `where` parameter is internally controlled and was not subject to external user input to avoid SQL injection attacks. Returns: (str): The portion of the clause that represents this single conditional. Any variables will be in parameterized format (i.e. `%(<>)s` format). Note that the `vals` provided will be modified by adding any new variables included in this portion of the clause. Raises: (NonexistentColumnError): Raised if the column provided in the `cond` does not exist in the official list of columns in the provided model (only possible if model_cls provided as non-None). (ValueError): Raised if the LogicOp provided as part of the `cond` is not a valid LogicOp option for this Orm. """ if model_cls is not None: _validate_cols([cond[0]], model_cls) if cond[1] is model_meta.LogicOp.NOT_NULL: return f'{cond[0]} NOT NULL' # The rest below have a value, so all would use same key val_key = f'wval{str(len(vals))}' if cond[1] is model_meta.LogicOp.EQ \ or cond[1] is model_meta.LogicOp.EQUAL \ or cond[1] is model_meta.LogicOp.EQUALS: vals[val_key] = cond[2] return f'{cond[0]} = %({val_key})s' if cond[1] is model_meta.LogicOp.LT \ or cond[1] is model_meta.LogicOp.LESS_THAN: vals[val_key] = cond[2] return f'{cond[0]} < %({val_key})s' if cond[1] is model_meta.LogicOp.LTE \ or cond[1] is model_meta.LogicOp.LESS_THAN_OR_EQUAL: vals[val_key] = cond[2] return f'{cond[0]} <= %({val_key})s' if cond[1] is model_meta.LogicOp.GT \ or cond[1] is model_meta.LogicOp.GREATER_THAN: vals[val_key] = cond[2] return f'{cond[0]} > %({val_key})s' if cond[1] is model_meta.LogicOp.GTE \ or cond[1] is model_meta.LogicOp.GREATER_THAN_OR_EQUAL: vals[val_key] = cond[2] return f'{cond[0]} >= %({val_key})s' err_msg = f'Invalid or Unsupported Logic Op: {cond[1]}' logger.error(err_msg) raise ValueError(err_msg)
1,369
def isNullOutpoint(tx): """ isNullOutpoint determines whether or not a previous transaction output point is set. """ nullInOP = tx.txIn[0].previousOutPoint if ( nullInOP.index == wire.MaxUint32 and nullInOP.hash == ByteArray(0, length=HASH_SIZE) and nullInOP.tree == wire.TxTreeRegular ): return True return False
1,370
def cmyk_to_rgb(c, m, y, k): """ """ r = (1.0 - c) * (1.0 - k) g = (1.0 - m) * (1.0 - k) b = (1.0 - y) * (1.0 - k) return r, g, b
1,371
def get_by_id(group_id: int, db: Session = Depends(get_db), member: MemberModel = Depends(get_active_member)): """Get group by id""" item = service.get_by_id(db, group_id) return item
1,372
def get_jasperdr(version, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create Jasper DR model with specific parameters. Parameters: ---------- blocks : int Number of blocks. model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ import numpy as np blocks, repeat = tuple(map(int, version.split("x"))) main_stage_repeat = blocks // 5 channels_per_stage = [256, 256, 384, 512, 640, 768, 896, 1024] kernel_sizes_per_stage = [11, 11, 13, 17, 21, 25, 29, 1] dropout_rates_per_stage = [0.2, 0.2, 0.2, 0.2, 0.3, 0.3, 0.4, 0.4] stage_repeat = np.full((8,), 1) stage_repeat[1:-2] *= main_stage_repeat channels = sum([[a] * r for (a, r) in zip(channels_per_stage, stage_repeat)], []) kernel_sizes = sum([[a] * r for (a, r) in zip(kernel_sizes_per_stage, stage_repeat)], []) dropout_rates = sum([[a] * r for (a, r) in zip(dropout_rates_per_stage, stage_repeat)], []) net = JasperDr( channels=channels, kernel_sizes=kernel_sizes, dropout_rates=dropout_rates, repeat=repeat, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net
1,373
def productionadjustment(): # TODO: get route to display """Renders the home page.""" return render_template( 'productionadjustment.html', title='Production Adjustment', year=datetime.now().year, wellsel=bigagg )
1,374
def upload_directory_targetid(token, local_dir, target_folder_id, skip_existed=False, show_skip_info=True, fencrypt=None): """ token: request_token local_dir: 需要上传的文件夹, 如r"d:\to_be_uploaded" target_folder_path: 上传的目标位置的父文件夹id 这个函数不是递归函数,使用os.walk mkdir_p upload完成上传任务 如果目标文件夹已经存在,会print一行[WARN]; 如果目标文件已经存在,会以在文件末尾添加(1)的形式上传 而不是替换! """ #检查本地目录要是一个目录 global a assert os.path.isdir(local_dir), "expected a folder, local_dir={local_dir}".format(**locals()) name = getfilename(local_dir) #要上传的文件夹名称 fid = str(target_folder_id) target_folder_lsdir = lsdir(a, fid) # 对父目录进行了列目录,现在直接mkdir_p生成文件夹吧, sh*t cache! try: targetfid = mkdir(token, name, parent_id = fid) except FileExistsError: targetfid = [i[1].split("_")[1] for i in target_folder_lsdir if i[0]==name and i[1].split("_")[0]=="folder"][0] print(targetfid) cache = {"fs":{}, "path":"/", "fid": targetfid} cache["fs"].update(generate_fscache("", fid = targetfid, prefix="/", cache=cache)) #print(cache["fs"]) target_folder_path = "" for root, dirs, files in os.walk(local_dir): for dir in dirs: dirname = root.replace(local_dir,"",1).replace("\\",'/')+"/"+dir #"/Image/aha" mkdir_p(token, target_folder_path+dirname, cache) for filename in files: relative_root = root.replace(local_dir,"",1).replace("\\",'/') #"/Image/aha"或者"" remote_abs_folder = target_folder_path+relative_root #"uploaded/Image/aha"或者"uploaded" 注意虽然叫做abs实际上还是相对于cache["path"]的相对目录 remote_abs_filepath = remote_abs_folder+"/"+safefilename(filename) #"uploaded/Image/aha/example.jpg"或者"uploaded/example.jpg" #print(remote_abs_folder, cache) type, folder_id = path_to_typed_id(remote_abs_folder, cache) assert type=="folder", "expected folder {remote_abs_folder}".format(**locals()) local_filepath = os.path.join(local_dir, relative_root[1:], filename) if skip_existed and remote_abs_filepath in cache["fs"]: if show_skip_info: print("skip existed file: {remote_abs_filepath}".format(**locals())) continue filesize = getsize(local_filepath) if filesize>BLOCKSIZE: data=block(open(local_filepath,"rb"), showhint=False) else: data=open(local_filepath,"rb").read() newfileid = upload(token,filename,data,filesize,folder_id=folder_id,fencrypt=fencrypt) cache["fs"][remote_abs_filepath] = ("file", newfileid, filesize) return targetfid
1,375
def thumbnail_download(data, k, url, mongo): """ 缩略图下载 :return: 返回信息 """ if data['thumbnail_urls'] is not None: logging.debug(r'开始下载缩略图: %s' % data['thumbnail_urls']) mongo.update(url, COD.REDTHU) try: referer = url if k != '163' else None # 网易的图片不加referer thumbnail_local_files = files_download( data['thumbnail_urls'], referer=referer ) except Exception as e: logging.debug(r'下载缩略图失败') mongo.update(url, COD.THUERR) message = e.args raise AssertionError(r'{}:下载缩略图失败,\n message: {}'.format(url, message[0])) logging.debug(r'下载缩略图成功') mongo.update(url, COD.GETTHU) return thumbnail_local_files else: # 视频使用openCV生成缩略图,暂未开发 logging.debug(r'缩略图为空,使用openCV生成缩略图,暂未开发') mongo.update(url, COD.THUNIL) return None
1,376
def inMandelSet(x: int, y: int, max_iteration: int) -> int: """inMandelSet determines if complex(x,y) is in the mandelbrot set.""" z = 0 for k in range(max_iteration): z = z ** 2 + complex(x,y) if abs(z) > 2: return k return k
1,377
def pytest_configure(config): """Configure pytest options.""" # Fixtures for fixture in ('matplotlib_config',): config.addinivalue_line('usefixtures', fixture) warning_lines = r""" error:: ignore:.*`np.bool` is a deprecated alias.*:DeprecationWarning ignore:.*String decoding changed with h5py.*:FutureWarning ignore:.*SelectableGroups dict interface is deprecated.*:DeprecationWarning ignore:.*Converting `np.character` to a dtype is deprecated.*:DeprecationWarning ignore:.*distutils Version classes are deprecated.*:DeprecationWarning # for the persistence of metadata and Raw Annotations within mne-python # Epochs class ignore:.*There were no Annotations stored in.*:RuntimeWarning always::ResourceWarning """ # noqa: E501 for warning_line in warning_lines.split('\n'): warning_line = warning_line.strip() if warning_line and not warning_line.startswith('#'): config.addinivalue_line('filterwarnings', warning_line)
1,378
def isBinaryPalindrome(num): """assumes num is an integer returns True if num in binary form is a palindrome, else False""" return str(bin(num))[2::] == str(bin(num))[:1:-1]
1,379
def modify_last_edit(entry_id: int, database: str = None): """Updates the database to reflect the last time the given entry was changed :param entry_id: an int representing the given entry :param database: a Connection or str representing the database that is being modified """ db = connect(database) if database else connect(default_database()) with closing(db) as d: now = datetime.now() d.execute('UPDATE dates SET last_edit=? WHERE entry_id=?', (now, entry_id)) d.commit()
1,380
def main(): """The main CLI entry-point.""" import thermos.commands import os,pip options = docopt(__doc__, version=VERSION) def create_structure(): app_name = options['<appname>'] if not os.path.exists(app_name): os.makedirs(app_name) os.chdir(os.getcwd()+"/"+app_name) os.system('git init') os.system("touch .gitignore") os.system("touch README.md") with open('.gitignore','w+') as gitignore: gitignore.write('virtual/ \n *.pyc \n start.sh') gitignore.close() if not os.path.exists('tests'): os.makedirs('tests') config_file = 'class Config:\n\tpass \n class ProdConfig(Config):\n\tpass\ \nclass DevConfig(Config): \n\tDEBUG = True\n\n\ config_options={"production":ProdConfig,"default":DevConfig}' manage_file = "from flask_script import Manager,Server\n\ from app import create_app,db\n\n\ app = create_app('default')\n\n\ manager = Manager(app)\n\n\ manager.add_command('server', Server)\n\n\ if __name__ == '__main__':\n\ \tmanager.run()'\ " with open('config.py','w+') as config: config.write(config_file) config.close() with open('manage.py','w+') as manage: manage.write(manage_file) manage.close() if not os.path.exists('app'): os.makedirs('app') os.chdir('app') folders = ['static','templates','static/css','static/js','static/images'] base_html = "{% extends 'bootstrap/base.html' %}\n<!doctype html>\n<html><head>{% block head %}\ <link rel='stylesheet' href=\"{{ url_for('static', filename='style.css') }}\">\ <title>{% block title %}{% endblock %} - My Webpage</title>\ {% endblock %} </head> <body> <div id='content'>{% block content %}{% endblock %}</div><div id='footer'>\ {% block footer %}\ &copy; Copyright 2010 by <a href='http://domain.invalid/'>you</a>.\ {% endblock %} </div> </body></html>" for folder in folders: if not os.path.exists(folder): os.makedirs(folder) if folder=='templates': with open('templates/base.html','w+') as base_tem: base_tem.write(base_html) base_tem.close() init_file = "from flask import Flask\nfrom config import config_options\nfrom flask_bootstrap import Bootstrap\nfrom flask_sqlalchemy import SQLAlchemy\n\n\nbootstrap = Bootstrap()\ndb = SQLAlchemy()\ndef create_app(config_state):\n\tapp = Flask(__name__)\n\tapp.config.from_object(config_options[config_state])\n\n\n\tbootstrap.init_app(app)\n\tdb.init_app(app)\n\tfrom .main import main as main_blueprint\n\tapp.register_blueprint(main_blueprint)\n\treturn app" with open('__init__.py','w+') as init: init.write(init_file) init.close() with open('models.py','w+') as models: models.write("#models") models.close() if not os.path.exists('main'): os.makedirs('main') os.chdir('main') main_init_file = "from flask import Blueprint\nmain = Blueprint('main',__name__)\n\nfrom . import views,error" view_file="from . import main\n\[email protected]('/')\ndef index():\n\treturn '<h1> Hello World </h1>'" error_file="from flask import render_template\nfrom . import main\n\[email protected]_errorhandler(404)\ndef for_Ow_four(error):\n\t'''\n\tFunction to render the 404 error page\n\t'''\n\treturn render_template('fourOwfour.html'),404" blueprint_files = ['__init__.py' ,'views.py' ,'error.py'] for blueprint_file in blueprint_files: if blueprint_file == '__init__.py': with open(blueprint_file,'w+') as m_init: m_init.write(main_init_file) m_init.close() elif blueprint_file == 'views.py': with open(blueprint_file,'w+') as vw: vw.write(view_file) vw.close() else: with open(blueprint_file,'w+') as er: er.write(error_file) er.close() os.chdir('..') os.chdir('..') with open('tests/__init__.py','a') as test_init: test_init.close() with open('start.sh','w+') as start: start.write('python3.6 manage.py server') start.close() os.system('chmod a+x start.sh') from platform import python_version version= str(python_version())[:3] virtual="python%s -m venv virtual"%(version) os.system(virtual) os.system('. virtual/bin/activate') dependencies = ['flask','flask-script', 'flask-bootstrap','gunicorn','flask-wtf','flask-sqlalchemy'] for dependency in dependencies: pip.main(['install',dependency]) os.system('pip freeze > requirements.txt') with open('Procfile','w+') as proc: proc.write('web: gunicorn manage:app') proc.close() #ANIMATION CODE! screen = curses.initscr() width = screen.getmaxyx()[1] height = screen.getmaxyx()[0] size = width*height char = [" ", ".", ":", "^", "*", "x", "s", "S", "#", "$"] b = [] curses.curs_set(0) curses.start_color() curses.init_pair(1,0,0) curses.init_pair(2,1,0) curses.init_pair(3,3,0) curses.init_pair(4,4,0) screen.clear for i in range(size+width+1): b.append(0) for i in range(100): for i in range(int(width/9)): b[int((random.random()*width)+width*(height-1))]=65 for i in range(size): b[i]=int((b[i]+b[i+1]+b[i+width]+b[i+width+1])/4) color=(4 if b[i]>15 else (3 if b[i]>9 else (2 if b[i]>4 else 1))) if(i<size-1): screen.addstr( int(i/width), i%width, char[(9 if b[i]>9 else b[i])], curses.color_pair(color) | curses.A_BOLD ) screen.refresh() screen.timeout(30) if (screen.getch()!=-1): break curses.endwin() animation = "|/-\\" for i in range(20): time.sleep(0.1) sys.stdout.write("\r" + animation[i % len(animation)]) sys.stdout.flush() #do something print("End!") cprint("\nCREATED APPLICATION FOLDER STRUCTURE\n HAPPY flasking :)\n","green") BASE_FOLDER=os.getcwd() app_folder = 'cd {}'.format(BASE_FOLDER) os.system(app_folder) else: cprint("\nAnother folder with same name already exists\nPlease try with another name\n","red") def check_app_is_flask(): existing_file_folders = ['app','virtual','config.py','manage.py','Procfile','README.md','requirements.txt','start.sh'] if all(os.path.exists(fl) for fl in existing_file_folders): return True else: cprint("\nPlease navigate into the flask folder\n","red") return False def create_blueprint(blueprint_name): os.makedirs(blueprint_name) os.chdir(blueprint_name) blueprint_name_init_file = "from flask import Blueprint\n{} = Blueprint('{}',__name__)\n\nfrom . import views,error".format(blueprint_name,blueprint_name) view_file="from . import {}\n\n@{}.route('/')\ndef index():\n\treturn '<h1> Hello world </h1>'".format(blueprint_name,blueprint_name) error_file="from flask import render_template\nfrom . import {}\n\n@{}.app_errorhandler(404)\ndef four_Ow_four(error):\n\t'''\n\tFunction to render the 404 error page\n\t'''\n\treturn render_template('fourOwfour.html'),404".format(blueprint_name,blueprint_name) blueprint_files = ['__init__.py', 'views.py', 'error.py'] for blueprint_file in blueprint_files: if blueprint_file == '__init__.py': with open(blueprint_file,'w+') as b_init: b_init.write(blueprint_name_init_file) b_init.close() elif blueprint_file == 'views.py': with open(blueprint_file,'w+') as v: v.write(view_file) v.close() else: with open(blueprint_file,'w+') as err: err.write(error_file) err.close() def create_template(template_name): with open(template_name+'.html','w+') as template: template.write("{% extends 'base.html' %}") template.close() def add_blueprint(): if check_app_is_flask(): os.chdir('app') blueprint_name = options['<blueprintname>'] if not os.path.exists(blueprint_name): create_blueprint(blueprint_name) temp_message = "Blueprint {} created!".format(blueprint_name) else: temp_message = "Blueprint {} already exists!".format(blueprint_name) cprint(temp_message,"magenta") def add_template(): if check_app_is_flask(): os.chdir('app') os.chdir('templates') template_name = options['<templatename>'] if not os.path.exists(template_name+'.html'): create_template(template_name) temp_message = "Template {} created!".format(template_name) else: temp_message = "Template {} already exists!".format(template_name) cprint(temp_message,"magenta") if options['create']: try: if options['app'] and options['<appname>']: create_structure() if options['blueprint'] and options['<blueprintname>']: add_blueprint() if options['template'] and options['<templatename>']: add_template() except: cprint("\nOops!An error occured\nPlease try again\n","red")
1,381
def mprv_from_entropy(entropy: GenericEntropy, passphrase: str, lang: str, xversion: bytes) -> bytes: """Return a BIP32 master private key from entropy.""" mnemonic = mnemonic_from_entropy(entropy, lang) mprv = mprv_from_mnemonic(mnemonic, passphrase, xversion) return mprv
1,382
def analyze_audio(audio_filename, target_freq=TARGET_FREQS, win_size=5000, step=200, min_delay=BEEP_DURATION, sensitivity=250, verbose=True): """ Analyze the given audio file to find the tone markers, with the respective frequency and time position. :param str audio_filename: The Audio filename to analyze to find the markers. :param tuple target_freq: A tuple containing the int frequencies ( in Hertz ) that the function should recognize. :param int win_size: The size of the moving window for the analysys. Increasing the window increases the accuracy but takes longer. :param int step: the increment between each window. :param float min_delay: Minimum duration, in seconds, of the beep to be recognized. :param int sensitivity: Minimum value of relative amplitude of the beep to be recognized. :param bool verbose: If true, print some info on the screen. :return: a list of dict containing the markers positions and frequencies. """ print("Analyzing the Audio...") # Open the wav audio track # Get the sample rate (fs) and the sample data (data) fs, data = wavfile.read(audio_filename) # Calculate the duration, in seconds, of a sample sample_duration = 1.0 / fs # Get the total number of samples total_samples = data.shape[0] # Calculate the frequencies that the fourier transform can analyze frequencies = np.fft.fftfreq(win_size) # Convert them to Hertz hz_frequencies = frequencies * fs # Calculate the indexes of the frequencies that are compatible with the target_freq freq_indexes = [] for freq in target_freq: # Find the index of the nearest element index = (np.abs(hz_frequencies - freq)).argmin() freq_indexes.append(index) # This will hold the duration of each frequency pulse duration_count = {} # Initialize the dictionary for freq in target_freq: duration_count[freq] = 0 # Initialize the counter count = 0 # This list will hold the analysis result results = [] # Analyze the audio dividing the samples into windows, and analyzing each # one separately for window in mit.windowed(data, n=win_size, step=step, fillvalue=0): # Calculate the FFT of the current window fft_data = np.fft.fft(window) # Calculate the amplitude of the transform fft_abs = np.absolute(window) # Calculate the mean of the amplitude fft_mean = np.mean(fft_abs) # Calculate the current time of the window ctime = count * sample_duration # Check, for each target frequency, if present for i, freq in enumerate(target_freq): # Get the relative amplitude of the current frequency freq_amplitude = abs(fft_data[freq_indexes[i]]) / fft_mean # If the amplitude is greater than the sensitivity, # Increase the duration counter for the current frequency if freq_amplitude > sensitivity: duration_count[freq] += step * sample_duration else: # If the duration is greater than the minimum delay, add the result if duration_count[freq] > min_delay: results.append({'time': ctime, 'freq': freq}) # Print the result if verbose if verbose: print("--> found freq:", freq, "time:", ctime) duration_count[freq] = 0 count += step # Print the progress every 100000 samples if verbose and count % 100000 == 0: percent = round((count/total_samples) * 100) print("\rAnalyzing {}% ".format(percent), end="") print() # Reset the new line return results
1,383
def read_usgs_file(file_name): """ Reads a USGS JSON data file (from https://waterdata.usgs.gov/nwis) Parameters ---------- file_name : str Name of USGS JSON data file Returns ------- data : pandas DataFrame Data indexed by datetime with columns named according to the parameter's variable description """ with open(file_name) as json_file: text = json.load(json_file) data = _read_usgs_json(text) return data
1,384
def action_about(sender): """Open the "About" view.""" about_view.present('sheet', hide_close_button=True)
1,385
def get_cantus_firmus(notes): """ Given a list of notes as integers, will return the lilypond notes for the cantus firmus. """ result = "" # Ensure the notes are in range normalised = [note for note in notes if note > 0 and note < 18] if not normalised: return result # Set the duration against the first note. result = NOTES[normalised[0]] + " 1 " # Translate all the others. result += " ".join([NOTES[note] for note in normalised[1:]]) # End with a double bar. result += ' \\bar "|."' # Tidy up double spaces. result = result.replace(" ", " ") return result
1,386
def get_wildcard_values(config): """Get user-supplied wildcard values.""" return dict(wc.split("=") for wc in config.get("wildcards", []))
1,387
def predict(model_filepath, config, input_data): """Return prediction from user input.""" # Load model model = Model.load(model_filepath + config['predicting']['model_name']) # Predict prediction = int(np.round(model.predict(input_data), -3)[0]) return prediction
1,388
def Print_Beeper(scanDIM=1): """Prints pv to copy/paste into the beeper""" branch=CheckBranch() if branch == "c": print("29idcEA:det1:Acquire") scanIOC=BL_ioc() pv="29id"+scanIOC+":scan"+str(scanDIM)+".FAZE" print(pv) print("ID29:BusyRecord")
1,389
def main() -> int: """Runs protoc as configured by command-line arguments.""" parser = _argument_parser() args = parser.parse_args() if args.plugin_path is None and args.language not in BUILTIN_PROTOC_LANGS: parser.error( f'--plugin-path is required for --language {args.language}') args.out_dir.mkdir(parents=True, exist_ok=True) include_paths: List[str] = [] if args.include_file: include_paths = [f'-I{line.strip()}' for line in args.include_file] wrapper_script: Optional[Path] = None # On Windows, use a .bat version of the plugin if it exists or create a .bat # wrapper to use if none exists. if os.name == 'nt' and args.plugin_path: if args.plugin_path.with_suffix('.bat').exists(): args.plugin_path = args.plugin_path.with_suffix('.bat') _LOG.debug('Using Batch plugin %s', args.plugin_path) else: with tempfile.NamedTemporaryFile('w', suffix='.bat', delete=False) as file: file.write(f'@echo off\npython {args.plugin_path.resolve()}\n') args.plugin_path = wrapper_script = Path(file.name) _LOG.debug('Using generated plugin wrapper %s', args.plugin_path) cmd: Tuple[Union[str, Path], ...] = ( 'protoc', f'-I{args.compile_dir}', *include_paths, *DEFAULT_PROTOC_ARGS[args.language](args), *args.sources, ) try: process = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) finally: if wrapper_script: wrapper_script.unlink() if process.returncode != 0: _LOG.error('Protocol buffer compilation failed!\n%s', ' '.join(str(c) for c in cmd)) sys.stderr.buffer.write(process.stdout) sys.stderr.flush() return process.returncode
1,390
def gauss3D_FIT(xyz, x0, y0, z0, sigma_x, sigma_y, sigma_z): """ gauss3D_FIT((x,y,z),x0,y0,z0,sigma_x,sigma_y,sigma_z) Returns the value of a gaussian at a 2D set of points for the given standard deviations with maximum normalized to 1. The Gaussian axes are assumed to be 90 degrees from each other. xyz - x0, y0, z0 = the x, y, z centers of the Gaussian sigma_x, sigma_y, sigma_z = The std. deviations of the Gaussian. Note ----- Be careful about the indexing used in meshgrid and the order in which you pass the x, y, z variables in. Parameters ---------- xyz: tuple of ndarrays A tuple containing the 3D arrays of points (from meshgrid) x0, y0, z0: float The x, y, z centers of the Gaussian sigma_x, sigma_y, sigma_z: float The standard deviations of the Gaussian. Returns ------- g3_norm: ndarray A flattened array for fitting. """ x0 = float(x0) y0 = float(y0) z0 = float(z0) x = xyz[0] y = xyz[1] z = xyz[2] g3 = np.exp( -( (x - x0) ** 2 / (2 * sigma_x ** 2) + (y - y0) ** 2 / (2 * sigma_y ** 2) + (z - z0) ** 2 / (2 * sigma_z ** 2) ) ) g3_norm = g3 / np.max(g3.flatten()) return g3_norm.ravel()
1,391
async def tell(message: str) -> None: """Send a message to the user. Args: message: The message to send to the user. """ return await interaction_context().tell(message)
1,392
def PGetDim (inFFT): """ Get dimension of an FFT returns array of 7 elements * inFFT = Python Obit FFT """ ################################################################ # Checks if not PIsA(inFFT): raise TypeError("inFFT MUST be a Python Obit FFT") return Obit.FFTGetDim(inFFT.me) # end PGetDim
1,393
def score_detail(fpl_data): """ convert fpl_data into Series Index- multi-index of team, pos, player, opp, minutes """ l =[] basic_index = ["player", "opp", "minutes"] for i in range(len(fpl_data["elements"])): ts=achived_from(fpl_data, i, True) name = (fpl_data["elements"][i]["first_name"]+ fpl_data["elements"][i]["second_name"]) if len(ts)==0: continue ts=pd.concat([ts,], keys=[name], names=basic_index) ele = pos_map(fpl_data)[fpl_data["elements"][i]['element_type']] ts=pd.concat([ts,], keys=[ele], names=["pos"]+basic_index) team = team_map(fpl_data)[fpl_data["elements"][i]['team']] ts=pd.concat([ts,], keys=[team], names=["team", "pos"]+basic_index) l.append(ts) return pd.concat(l)
1,394
def edition_view(measurement, workspace, exopy_qtbot): """Start plugins and add measurements before creating the execution view. """ pl = measurement.plugin pl.edited_measurements.add(measurement) measurement.root_task.add_child_task(0, BreakTask(name='Test')) item = MeasurementEditorDockItem(workspace=workspace, measurement=measurement, name='test') return DockItemTestingWindow(widget=item)
1,395
def addPlayer(name, addr_text, address, ad): # pylint: disable=unused-argument # address is part of call back """Add the player name and mac address to players global variable and the name and rssi (if present) to on-screen list.""" rssi = ad.rssi if ad else None players.append((name, addr_text)) rps_display.addPlayer(name, rssi=rssi)
1,396
def menu_items(): """ Add a menu item which allows users to specify their session directory """ def change_session_folder(): global session_dir path = str(QtGui.QFileDialog.getExistingDirectory(None, 'Browse to new session folder -')) session_dir = path utils.setrootdir(path) writetolog("*" * 79 + "\n" + "*" * 79) writetolog(" output directory: " + session_dir) writetolog("*" * 79 + "\n" + "*" * 79) lst = [] lst.append(("Change session folder", change_session_folder)) return(lst)
1,397
def calculate_pair_energy_np(coordinates, i_particle, box_length, cutoff): """ Calculates the interaction energy of one particle with all others in system. Parameters: ``````````` coordinates : np.ndarray 2D array of [x,y,z] coordinates for all particles in the system i_particle : int the particle row for which to calculate energy box_length : float the length of the simulation box cutoff : float the cutoff interaction length Returns: ```````` e_total : float the pairwise energy between the i-th particle and other particles in system """ particle = coordinates[i_particle][:] coordinates = np.delete(coordinates, i_particle, 0) e_array = np.zeros(coordinates.shape) dist = calculate_distance_np(particle, coordinates, box_length) e_array = dist[dist < cutoff] e_array = calculate_LJ_np(e_array) e_total = e_array.sum() return e_total
1,398
def inside_loop(iter): """ >>> inside_loop([1,2,3]) 3 >>> inside_loop([]) Traceback (most recent call last): ... UnboundLocalError: local variable 'i' referenced before assignment """ for i in iter: pass return i
1,399