content
stringlengths
22
815k
id
int64
0
4.91M
def main(): """ lcd = SmartOpen_LCD() lcd.set_blacklight(150) color = lcd.color_table['white'] lcd.fill_screen(color) print("done") """ lcd = SmartOpen_LCD() lcd.set_blacklight(150) color = random.choice(list(lcd.color_table.keys())) lcd.fill_screen(color) width = 240 height = 240 box_length = width // 16 for y in range(16): for x in range(16): color = random.choice(list(lcd.color_table.keys())) box_x = x * box_length box_y = y * box_length lcd.draw_rectangle(box_x, box_y, box_length, box_length, color=color) print("done") """ lcd = SmartOpen_LCD() lcd.set_blacklight(150) while 1: for color in lcd.color_table.keys(): lcd.fill_screen(color) lcd.wait(1) """
900
def profileown(): """Display user's profile""" return render_template("profile.html", user=session, person=session, books=None)
901
def GetChildConfigListMetadata(child_configs, config_status_map): """Creates a list for the child configs metadata. This creates a list of child config dictionaries from the given child configs, optionally adding the final status if the success map is specified. Args: child_configs: The list of child configs for this build. config_status_map: The map of config name to final build status. Returns: List of child config dictionaries, with optional final status """ child_config_list = [] for c in child_configs: pass_fail_status = None if config_status_map: if config_status_map[c['name']]: pass_fail_status = constants.FINAL_STATUS_PASSED else: pass_fail_status = constants.FINAL_STATUS_FAILED child_config_list.append({'name': c['name'], 'boards': c['boards'], 'status': pass_fail_status}) return child_config_list
902
def get_samples(df, selected_rows, no_of_samples, records_in_db): """ get samples without shuffling columns """ df_fixed = None df_random = None generic_data_dict = [] #drop rows with 'ignore' set to 'yes' if 'ignore' in df.columns: df = df[df["ignore"] != "yes"] df = df.drop(['ignore'], axis = 1) print_info("================================================================================") print_info("Total no. of samples found in variable xls file : {}".format(len(df.index))) print_info("Total no. of samples already tested : {}".format(len(records_in_db))) print_info("Total no. of samples remaining to test : {}".format(len(df.index) - len(records_in_db))) print_info("Total no. of random samples selected in this test : {}".format(no_of_samples)) if selected_rows: print_info("Selected rows to test : {}".format(selected_rows)) print_info("================================================================================") #select user selected rows if selected_rows: selected_rows = [row-1 for row in selected_rows] df_fixed = df.iloc[selected_rows] df = df.drop(selected_rows, axis=0) #select records in df which are not in db_df db_df = pd.DataFrame(records_in_db) if db_df.columns.tolist(): df = df.merge(db_df, how = 'outer' ,indicator=True).\ loc[lambda x : x['_merge']=='left_only'] df = df.drop(['_merge'], axis = 1) if no_of_samples and len(df.index) == 0: print_error("All the samples are tested. use --reset_execution to restart test again") exit(1) if no_of_samples and no_of_samples <= len(df.index): #select random samples df_random = df.sample(n=no_of_samples) elif no_of_samples and no_of_samples > len(df.index): print_error("Given no. of samples {} is greater than remaining samples to" \ " test {}. please reduce no. of samples".format(no_of_samples, len(df.index))) exit(1) df = pd.concat([df_fixed, df_random]) generic_data_dict = df.to_dict('records') print_info("selected samples : {}".format(generic_data_dict)) print_info("================================================================================") return generic_data_dict
903
def update_member_names(oldasndict, pydr_input): """ Update names in a member dictionary. Given an association dictionary with rootnames and a list of full file names, it will update the names in the member dictionary to contain '_*' extension. For example a rootname of 'u9600201m' will be replaced by 'u9600201m_c0h' making sure that a MEf file is passed as an input and not the corresponding GEIS file. """ omembers = oldasndict['members'].copy() nmembers = {} translated_names = [f.split('.fits')[0] for f in pydr_input] newkeys = [fileutil.buildNewRootname(file) for file in pydr_input] keys_map = list(zip(newkeys, pydr_input)) for okey, oval in list(omembers.items()): if okey in newkeys: nkey = pydr_input[newkeys.index(okey)] nmembers[nkey.split('.fits')[0]] = oval oldasndict.pop('members') # replace should be always True to cover the case when flt files were removed # and the case when names were translated oldasndict.update(members=nmembers, replace=True) oldasndict['order'] = translated_names return oldasndict
904
def assert_type(assertions: list): """ use this function to do type checking on runtime - pass an array of assertions [(var_1, type_1), (var_2, type_2), ...] e.g.: [(arg1, int), (arg2, str), ....] - nesting e.g.: list[int] is not possible. Instead do (list_arg[0], int) """ for i in range(len(assertions)): assertion = assertions[i] assert isinstance(assertion[0], assertion[1]), ( "\nWrong type was passed! " + str(i + 1) + "th assertion fails:\n\t-> Variable should be of type " + str(assertion[1]) + " but is of type " + str(type(assertion[0])) )
905
def part1(entries: str) -> int: """part1 solver take a str and return an int""" houses = {(0, 0): 1} pos_x, pos_y = 0, 0 for direction in entries: delta_x, delta_y = moves[direction] pos_x += delta_x pos_y += delta_y houses[(pos_x, pos_y)] = houses.get((pos_x, pos_y), 0) + 1 return len(houses)
906
def delete_news_publisher(app_user, service_user, solution): """Delete a news publisher and revoke create news role.""" from solutions.common.bizz.messaging import POKE_TAG_BROADCAST_CREATE_NEWS key = SolutionNewsPublisher.createKey(app_user, service_user, solution) publisher = db.get(key) if publisher: db.delete(publisher) revoke_app_user_role(app_user, POKE_TAG_BROADCAST_CREATE_NEWS)
907
def init_questionnaire_db_command(): """Create CLI for creating a new database with `flask init-db`. **Careful: This will overwite the current one and all data will be lost.** """ init_questionnaire_db() click.echo('Initialized the questionnaire database')
908
def beqs( screen, asof=None, typ='PRIVATE', group='General', **kwargs ) -> pd.DataFrame: """ Bloomberg equity screening Args: screen: screen name asof: as of date typ: GLOBAL/B (Bloomberg) or PRIVATE/C (Custom, default) group: group name if screen is organized into groups Returns: pd.DataFrame """ logger = logs.get_logger(beqs, **kwargs) service = conn.bbg_service(service='//blp/refdata', **kwargs) request = service.createRequest('BeqsRequest') request.set('screenName', screen) request.set('screenType', 'GLOBAL' if typ[0].upper() in ['G', 'B'] else 'PRIVATE') request.set('Group', group) if asof: overrides = request.getElement('overrides') ovrd = overrides.appendElement() ovrd.setElement('fieldId', 'PiTDate') ovrd.setElement('value', utils.fmt_dt(asof, '%Y%m%d')) logger.debug(f'Sending request to Bloomberg ...\n{request}') conn.send_request(request=request, **kwargs) res = pd.DataFrame(process.rec_events(func=process.process_ref)) if res.empty: if kwargs.get('trial', 0): return pd.DataFrame() else: return beqs( screen=screen, asof=asof, typ=typ, group=group, trial=1, **kwargs ) if kwargs.get('raw', False): return res cols = res.field.unique() return ( res .set_index(['ticker', 'field']) .unstack(level=1) .rename_axis(index=None, columns=[None, None]) .droplevel(axis=1, level=0) .loc[:, cols] .pipe(pipeline.standard_cols) )
909
def quantile_compute(x, n_bins): """Quantile computation. Parameters ---------- x: pd.DataFrame the data variable we want to obtain its distribution. n_bins: int the number of bins we want to use to plot the distribution. Returns ------- quantiles: np.ndarray the quantiles. """ # aux.quantile(np.linspace(0, 1, 11)) # version = 0.15 quantiles = [x.quantile(q) for q in np.linspace(0, 1, n_bins+1)] quantiles = np.array(quantiles) return quantiles
910
def remove_conflicting_jars(source_path): """ Removes jars uploaded which may conflict with AppScale jars. Args: source_path: A string specifying the location of the source code. """ lib_dir = os.path.join(find_web_inf(source_path), 'lib') if not os.path.isdir(lib_dir): logger.warn('Java source does not contain lib directory') return logger.info('Removing jars from {}'.format(lib_dir)) for file in os.listdir(lib_dir): for pattern in CONFLICTING_JARS: if fnmatch.fnmatch(file, pattern): os.remove(os.path.join(lib_dir, file))
911
def print_newline(): """ Add new line """ print ""
912
def remove_separators(version): """Remove separator characters ('.', '_', and '-') from a version. A version like 1.2.3 may be displayed as 1_2_3 in the URL. Make sure 1.2.3, 1-2-3, 1_2_3, and 123 are considered equal. Unfortunately, this also means that 1.23 and 12.3 are equal. Args: version (str or Version): A version Returns: str: The version with all separator characters removed """ version = str(version) version = version.replace('.', '') version = version.replace('_', '') version = version.replace('-', '') return version
913
def parse_coverage_status(status): """Parse a coverage status""" return Status.HIT if status.upper() == 'SATISFIED' else Status.MISSED
914
def max_index(list): """Returns the index of the max value of list.""" split_list = zip(list, range(len(list))) (retval, retI) = reduce(lambda (currV, currI), (nV, nI): (currV, currI) if currV > nV else (nV, nI), split_list) return retI
915
def mask_coverage(coverage: mx.sym.Symbol, source_length: mx.sym.Symbol) -> mx.sym.Symbol: """ Masks all coverage scores that are outside the actual sequence. :param coverage: Input coverage vector. Shape: (batch_size, seq_len, coverage_num_hidden). :param source_length: Source length. Shape: (batch_size,). :return: Masked coverage vector. Shape: (batch_size, seq_len, coverage_num_hidden). """ return mx.sym.SequenceMask(data=coverage, axis=1, use_sequence_length=True, sequence_length=source_length)
916
def DFS_complete(g): """Perform DFS for entire graph and return forest as a dictionary. Result maps each vertex v to the edge that was used to discover it. (Vertices that are roots of a DFS tree are mapped to None.) """ forest = {} for u in g.vertices(): if u not in forest: forest[u] = None # u will be the root of a tree DFS(g, u, forest) return forest
917
def _transform_p_dict(p_value_dict): """ Utility function that transforms a dictionary of dicts into a dataframe representing the dicts as rows (like tuples). Is needed to keep track of the feature names and corresponding values. The underlying datastructures are confusing. :param p_value_dict: dictionary of dictionaries storing the p_values :return: dataframe where the keys are added to the p_values as columns """ # Turn dictionary of dictionaries into a collection of the key-value pairs represented as nested tuples item_dict = dict() for feat in p_value_dict: item_dict[feat] = list(p_value_dict[feat].items()) # building a matrix (nested lists) by extracting and sorting data from nested tuples # (items[0], (nested_items[0], nested_items[1])) df_matrix = [] for items in item_dict.items(): for nested_items in items[1]: df_matrix.append([nested_items[1], nested_items[0], items[0]]) return pd.DataFrame(df_matrix)
918
def iou( predict: torch.Tensor, target: torch.Tensor, mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: """ This is a great loss because it emphasizes on the active regions of the predict and targets """ dims = tuple(range(predict.dim())[1:]) if mask is not None: predict = predict * mask target = target * mask intersect = (predict * target).sum(dims) union = (predict + target - predict * target).sum(dims) + 1e-4 return (intersect / union).sum() / intersect.numel()
919
def get_sequence(seq_id): """ TO DO: 1. redirection 303. (not tested in compliance_suite) 2. Note: compliance_suite ignores the range if it is out of bounds or if > SUBSEQUENCE_LIMIT 3. Ambiguous error code resolution in refget documentation: range: The server MUST respond with a Bad Request error if one or more ranges are out of bounds of the sequence. If the server supports circular chromosomes and the chromosome is not circular or the range is outside the bounds of the chromosome the server shall return Range Not Satisfiable. start, end: The server MUST respond with a Bad Request error if start is specified and is larger than the total sequence length. If the server supports circular chromosomes and the chromosome is not circular or the range is outside the bounds of the chromosome the server shall return Range Not Satisfiable. 4. Should we validate the response headers in the compliance suite? 5. check if start and end are 32 bit """ header_content = request.headers accept_type = "text/vnd.ga4gh.refget.v1.0.0+plain" # validate the accept header if "accept" in header_content and header_content["accept"] not in [accept_type,"*/*"]: # bad mock server: status = 200 when headers are incorrect return Response(status=200) # check if the sequence is present. If not, error = 404 sequence_obj = get_sequence_obj(seq_id, DATA, TRUNC512) if not sequence_obj: # bad mock server: status = 200 when sequence is not found return Response(status=200) # get start, end and range params start = request.args.get('start', default = None) end = request.args.get('end', default=None) RANGE_HEADER_PATTERN = re.compile(r'bytes=[0-9]*-[0-9]*$') if "range" in header_content: seq_range = header_content["Range"] # bad mock server: status = 404 when both start/end and range parameters are passed if (start or end) and seq_range: return Response(status=404) # bad mock server: status = 404 when range pattern is not as expected if not RANGE_HEADER_PATTERN.match(seq_range): return Response(status=404) fbs = int(header_content['Range'].split('=')[1].split('-')[0]) lbs = int(header_content['Range'].split('=')[1].split('-')[1]) if (fbs > sequence_obj.size-1) or (fbs > lbs): """ Note: we are ignoring case where (lbs > sequence_obj.size-1), (lbs-fbs+1 > SUBSEQUENCE_LIMIT) to match the compliance_suite tests filtering the case where (fbs > lbs) : Sub-sequences of circular chromosomes across the origin may not be requested via the Range header """ # since fbs and lbs <= SUBSEQUENCE_LIMIT-1, it is implied that the subsequence limit if any is satisfied # bad mock server: incorrect status = 400 return Response(status=400) elif (lbs > sequence_obj.size-1) : lbs = sequence_obj.size # bad mock server: status = 200 while returning subsequence return Response(response=(sequence_obj.sequence[fbs:lbs+1]).encode("ascii"),status=200, mimetype=accept_type) if start or end: # if start or end params are present, return the filtered sequence # if start or end params are not 32 bit unsigned int, error = 400 # if start > sequence length, error =400 # if start > end and circular not implemented, error = 501 if start: # TO DO: check if start and end are 32 bit if not start.isdigit(): #checks if start is unsigned int # bad mock server: incorrect status 200 return Response(status=200) start = int(start) else: start = 0 if end: if not end.isdigit(): #checks if end is unsigned int # bad mock server: incorrect status 200 return Response(status=200) end = int(end) else: end = sequence_obj.size if start >= sequence_obj.size or end > sequence_obj.size : # bad mock server: incorrect status 400 return Response(status=400) elif start > end: if CIRCULAR_CHROMOSOME_SUPPORT == False: # bad mock server: incorrect status 416 return Response(status=416) else: if sequence_obj.is_circular == False: # bad mock server: incorrect status 500 return Response(status=500) else: if len(sequence_obj.sequence[start:sequence_obj.size]+sequence_obj.sequence[0:end])>SUBSEQUENCE_LIMIT: # bad mock server: incorrect status 400 return Response(status=400) else: # bad mock server: incorrect status 404 return Response(response=(sequence_obj.sequence[start:sequence_obj.size]+sequence_obj.sequence[0:end]).encode("ascii"),status=404,mimetype=accept_type) elif end-start >SUBSEQUENCE_LIMIT: # bad mock server: incorrect status 200 return Response(status=200) # bad mock server: incorrect status 404 return Response(response=(sequence_obj.sequence[start:end]).encode("ascii"),status=404,mimetype=accept_type) # bad mock server: incorrect status 500 return Response(response=(sequence_obj.sequence).encode("ascii"), status=500,mimetype=accept_type)
920
def getCondVisibility(condition): """ Returns ``True`` (``1``) or ``False`` (``0``) as a ``bool``. :param condition: string - condition to check. List of Conditions: http://wiki.xbmc.org/?title=List_of_Boolean_Conditions .. note:: You can combine two (or more) of the above settings by using "+" as an ``AND`` operator, "|" as an ``OR`` operator, "!" as a ``NOT`` operator, and "[" and "]" to bracket expressions. example:: visible = xbmc.getCondVisibility('[Control.IsVisible(41) + !Control.IsVisible(12)]') """ return bool(1)
921
def write_jpeg(input: torch.Tensor, filename: str, quality: int = 75): """ Takes an input tensor in CHW layout and saves it in a JPEG file. Arguments: input (Tensor[channels, image_height, image_width]): int8 image tensor of `c` channels, where `c` must be 1 or 3. filename (str): Path to save the image. quality (int): Quality of the resulting JPEG file, it must be a number between 1 and 100. Default: 75 """ if quality < 1 or quality > 100: raise ValueError('Image quality should be a positive number ' 'between 1 and 100') torch.ops.image.write_jpeg(input, filename, quality)
922
def get_xy_strs(kpts): """ strings debugging and output """ _xs, _ys = get_xys(kpts) xy_strs = [('xy=(%.1f, %.1f)' % (x, y,)) for x, y, in zip(_xs, _ys)] return xy_strs
923
def mockTrainTextExport(poicol, resdir, city_idx=None): """ export the training trails and testing instance for inspection """ conf = Configuration() conf['expr.target'] = poicol conf['expr.model'] = 'mockTrainTextExport' if city_idx is None: cityloop(conf, resdir, 'tt') else: run_experiment(conf, resdir, 'tt', city_idx)
924
def history_cumulative(request): """ This endpoints returns the number of cumulative infections for each area given a date in history. """ days = int(request.query_params.get("days")) observed = Covid19DataPoint.objects.all() historyDate = max([d.date for d in observed]) - timedelta(days=-days) shownData = observed.filter(date=historyDate) deathData = Covid19DeathDataPoint.objects.filter(date=historyDate) #total_confirmed = sum([d.val for d in shownData]) #total_death = sum([d.val for d in deathData]) greatest_model = Covid19Model.objects.get(name="SI-kJalpha - 40x") greatest_predictions = Covid19PredictionDataPoint.objects.filter(model=greatest_model) greatest_predictions = greatest_predictions.filter(date=greatest_predictions.last().date, social_distancing=1) greatest_vals = [d.val for d in greatest_predictions] max_val = max(greatest_vals) greatest_death_model = Covid19Model.objects.get(name="SI-kJalpha - 40x (death prediction)") greatest_death_predictions = Covid19PredictionDataPoint.objects.filter(model=greatest_death_model) greatest_death_predictions = greatest_death_predictions.filter(date=greatest_death_predictions.last().date, social_distancing=1) greatest_death_vals = [d.val for d in greatest_death_predictions] max_death_val = max(greatest_death_vals) response = [{ 'area': { 'country': d.area.country, 'state': d.area.state, 'iso_2': d.area.iso_2, }, 'value': d.val, #'value_percentage': 1e3*d.val/total_confirmed, 'max_val_percentage': 1e4*d.val/max_val, 'date': d.date, 'deathValue': deathData.filter(area=d.area, date=d.date).first().val, 'max_death_percentage':1e4*deathData.filter(area=d.area, date=d.date).first().val/max_death_val, #'death_percentage': 1e3*deathData.filter(area=d.area, date=d.date).first().val/total_death, } for d in shownData] return Response(response)
925
def check_partial(func, *args, **kwargs): """Create a partial to be used by goodtables.""" new_func = partial(func, *args, **kwargs) new_func.check = func.check return new_func
926
def test_perform_search_with_configuration( monkeypatch, patch_search_command, namespace_args, grim_config, trainer_config, search_overrides ): """Tests that SearchCommand objects correctly perform searches with the specified trainer configurations. Ensures: - The correct configuration file is written for the search - The correct grimagents training command is generated """ def mock_write_yaml_file(yaml_data, file_path): assert yaml_data == trainer_config def mock_run(command): assert command == [ 'pipenv', 'run', 'python', '-m', 'grimagents', str(Path(namespace_args.configuration_file)), '--trainer-config', str(Path('config/search_config.yaml')), '--run-id', '3DBall_00', ] monkeypatch.setattr(grimagents.command_util, "write_yaml_file", mock_write_yaml_file) monkeypatch.setattr(subprocess, 'run', mock_run) search_command = SearchCommand(namespace_args) search_command.perform_search_with_configuration(trainer_config)
927
def compress_file(filename: str, dest_file: str = "") -> None: """ Open the <filename> and compress its contents on a new one. :param filename: The path to the source file to compress. :param dest_file: The name of the target file. If not provided (None), a default will be used with `<filename>.comp` """ with open_text_file(filename, "r") as source: freqs = process_frequencies(source.read()) checksum = sum(c.freq for c in freqs) # bytes tree_code = create_tree_code(freqs) table = parse_tree_code(tree_code) save_compressed_file(filename, table, checksum, dest_file)
928
def mcas(mc, entries): """Multi-entry compare-and-set. Synopsis: >>> from memcache_collections import mcas >>> mc = memcache.Client(['127.0.0.1:11211'], cache_cas=True) >>> # initialize a doubly-linked list with two elements >>> mc.set_multi({ ... 'foo': {'next': 'bar'}, ... 'bar': {'prev': 'foo'}}) [] >>> # Always use mcas_get to access entries potentially in MCAS >>> # operations. It returns an object representing a memcache entry >>> # snapshot. >>> foo_entry, bar_entry = mcas_get(mc, 'foo'), mcas_get(mc, 'bar') >>> foo_entry.key, foo_entry.value ('foo', {'next': 'bar'}) >>> # atomically insert new node in our doubly linked list via MCAS >>> mc.add('baz', {'prev': 'foo', 'next': 'bar'}) 1 >>> mcas(mc, [ ... (foo_entry, {'next': 'baz'}), ... (bar_entry, {'prev': 'baz'})]) True Function is not thread safe due to implicit CAS ID handling of the Python API. Args: mc: memcache client entries: iterable of (Entry, new_value) tuples Returns: True if MCAS completed successfully. The aggregate size of current and new values for all entries must fit within the memcache value limit (typically 1 MB). Based on "Practical lock-freedom", Keir Fraser, 2004, pp. 30-34. """ dc = _DequeClient(mc) mcas_record = _McasRecord(mc, entries) dc.AddNode(mcas_record) # very sad that we need to read this back just to get CAS ID dc.mc.gets(mcas_record.uuid) return _mcas_help(dc, mcas_record, is_originator=True)
929
def figure_8s(N_cycles=2, duration=30, mag=0.75): """ Scenario: multiple figure-8s. Parameters ---------- N_cycles : int How many cycles of left+right braking. duration : int [sec] Seconds per half-cycle. mag : float Magnitude of braking applied. """ on = [(2.0, mag), (duration - 2.0, None)] # Braking on off = [(1.0, 0), (duration - 1.0, None)] # Braking off inputs = { "delta_br": simulation.linear_control([(2, 0), *([*on, *off] * N_cycles)]), "delta_bl": simulation.linear_control([(2, 0), *([*off, *on] * N_cycles)]), } T = N_cycles * duration * 2 return inputs, T
930
def validate(integrations: Dict[str, Integration], config: Config): """Validate CODEOWNERS.""" codeowners_path = config.root / "CODEOWNERS" config.cache["codeowners"] = content = generate_and_validate(integrations) with open(str(codeowners_path)) as fp: if fp.read().strip() != content: config.add_error( "codeowners", "File CODEOWNERS is not up to date. Run python3 -m script.hassfest", fixable=True, ) return
931
def test_components(ctx): """Create textures of different components""" c1 = ctx.texture((10, 10), components=1) c2 = ctx.texture((10, 10), components=2) c3 = ctx.texture((10, 10), components=3) c4 = ctx.texture((10, 10), components=4) assert c1.components == 1 assert c2.components == 2 assert c3.components == 3 assert c4.components == 4 # Wrong number of components with pytest.raises(ValueError): ctx.texture((10, 10), components=5)
932
def run(): """Run ipfs daemon. cmd: ipfs daemon # --mount __ https://stackoverflow.com/a/8375012/2402577 """ IPFS_BIN = "/usr/local/bin/ipfs" log("==> Running [green]IPFS[/green] daemon") if not os.path.isfile(config.env.IPFS_LOG): open(config.env.IPFS_LOG, "a").close() with daemon.DaemonContext(): if cfg.IS_PRIVATE_IPFS: _env = {"LIBP2P_FORCE_PNET": "1", "IPFS_PATH": Path.home().joinpath(".ipfs")} else: _env = {"IPFS_PATH": Path.home().joinpath(".ipfs")} popen_communicate([IPFS_BIN, "daemon", "--routing=none"], stdout_file=config.env.IPFS_LOG, _env=_env) # ipfs mounted at: /ipfs # output = run(["sudo", "ipfs", "mount", "-f", "/ipfs"]) # logging.info(output) # # for home and home2 # ipfs swarm connect /ip4/192.168.1.3/tcp/4001/p2p/12D3KooWSE6pY7t5NxMLiGd4h7oba6XqxJFD2KNZTQFEjWLeHKsd
933
def grad_ast(func, wrt, motion, mode, preserve_result, verbose): """Perform AD on a single function and return the AST. Args: See `grad`. Returns: node: The AST of a module containing the adjoint and primal function definitions. required: A list of non-built in functions that this function called, and of which the primals and adjoints need to be made available in order for the returned function to run. """ node = annotate.resolve_calls(func) fence.validate(node, inspect.getsource(func)) node = anf_.anf(node) if verbose >= 2: print('ANF') print(quoting.to_source(node)) if mode == 'reverse': node, required, stack = reverse_ad.reverse_ad(node.body[0], wrt, preserve_result) if verbose >= 2: print('RAW') print(quoting.to_source(node)) if motion == 'split': node = reverse_ad.split(node, stack) else: node = reverse_ad.joint(node) if verbose >= 2: print('MOTION') print(quoting.to_source(node)) elif mode == 'forward': node, required = forward_ad.forward_ad(node.body[0], wrt, preserve_result) return node, required
934
def sortRules(ruleList): """Return sorted list of rules. Rules should be in a tab-delimited format: 'rule\t\t[four letter negation tag]' Sorts list of rules descending based on length of the rule, splits each rule into components, converts pattern to regular expression, and appends it to the end of the rule. """ ruleList.sort(key = len, reverse = True) sortedList = [] for rule in ruleList: s = rule.strip().split('\t') splitTrig = s[0].split() trig = r'\s+'.join(splitTrig) pattern = r'\b(' + trig + r')\b' s.append(re.compile(pattern, re.IGNORECASE)) sortedList.append(s) return sortedList
935
def get_all_ngram_counts(sequences, n): """ UC Computes the prevalence of ngrams in a collection of sequences. """ pass
936
def v3_settings_response(): """Define a fixture that returns a V3 subscriptions response.""" return load_fixture("v3_settings_response.json")
937
def supercelltar(tar, superdict, filemode=0o664, directmode=0o775, timestamp=None, INCARrelax=INCARrelax, INCARNEB=INCARNEB, KPOINTS=KPOINTSgammaonly, basedir="", statename='relax.', transitionname='neb.', IDformat='{:02d}', JSONdict='tags.json', YAMLdef='supercell.yaml'): """ Takes in a tarfile (needs to be open for writing) and a supercelldict (from a diffuser) and creates the full directory structure inside the tarfile. Best used in a form like :: with tarfile.open('supercells.tar.gz', mode='w:gz') as tar: automator.supercelltar(tar, supercelldict) :param tar: tarfile open for writing; may contain other files in advance. :param superdict: dictionary of ``states``, ``transitions``, ``transmapping``, ``indices`` that correspond to dictionaries with tags; the final tag ``reference`` is the basesupercell for calculations without defects. * superdict['states'][i] = supercell of state; * superdict['transitions'][n] = (supercell initial, supercell final); * superdict['transmapping'][n] = ((site tag, groupop, mapping), (site tag, groupop, mapping)) * superdict['indices'][tag] = (type, index) of tag, where tag is either a state or transition tag; or... * superdict['indices'][tag] = index of tag, where tag is either a state or transition tag. * superdict['reference'] = (optional) supercell reference, no defects :param filemode: mode to use for files (default: 664) :param directmode: mode to use for directories (default: 775) :param timestamp: UNIX time for files; if None, use current time (default) :param INCARrelax: contents of INCAR file to use for relaxation; must contain {system} to be replaced by tag value (default: automator.INCARrelax) :param INCARNEB: contents of INCAR file to use for NEB; must contain {system} to be replaced by tag value (default: automator.INCARNEB) :param KPOINTS: contents of KPOINTS file (default: gamma-point only calculation); if None or empty, no KPOINTS file at all :param basedir: prepended to all files/directories (default: '') :param statename: prepended to all state names, before 2 digit number (default: relax.) :param transitionname: prepended to all transition names, before 2 digit number (default: neb.) :param IDformat: format for integer tags (default: {:02d}) :param JSONdict: name of JSON file storing the tags corresponding to each directory (default: tags.json) :param YAMLdef: YAML file containing full definition of supercells, relationship, etc. (default: supercell.yaml); set to None to not output. **may want to change this to None for the future** """ if timestamp is None: timestamp = time.time() if len(basedir) > 0 and basedir[-1] != '/': basedir += '/' kpoints = not ((KPOINTS is None) or (KPOINTS == "")) def addfile(filename, strdata, executable=False): info = tarfile.TarInfo(basedir + filename) info.mode, info.mtime = filemode, timestamp if executable: info.mode = directmode info.size = len(strdata.encode('ascii')) tar.addfile(info, io.BytesIO(strdata.encode('ascii'))) def adddirectory(dirname): info = tarfile.TarInfo(basedir + dirname) info.type = tarfile.DIRTYPE info.mode, info.mtime = directmode, timestamp tar.addfile(info) def addsymlink(linkname, target): info = tarfile.TarInfo(basedir + linkname) info.type = tarfile.SYMTYPE info.mode, info.mtime = filemode, timestamp info.linkname = target tar.addfile(info) # our tags make for troublesome directory names; construct a mapping: states, transitions, transmapping = superdict['states'], superdict['transitions'], superdict['transmapping'] # we do a reverse sorting on state keys, so that vacancies and complexes are first; we use # normal order for the transitions. dirmapping = {k: statename + IDformat.format(n) for n, k in enumerate(sorted(states.keys(), reverse=True))} for n, k in enumerate(sorted(transitions.keys())): dirmapping[k] = transitionname + IDformat.format(n) tagmapping = {v: k for k, v in dirmapping.items()} # add the common VASP input files: (weird construction to check if kpoints is True) for filename, strdata in (('INCAR.relax', INCARrelax), ('INCAR.NEB', INCARNEB)) + \ ((('KPOINTS', KPOINTS),) if kpoints else tuple()): addfile(filename, strdata) addfile('trans.pl', str(pkg_resources.resource_string(__name__, 'trans.pl'), 'ascii'), executable=True) addfile('nebmake.pl', str(pkg_resources.resource_string(__name__, 'nebmake.pl'), 'ascii'), executable=True) addfile('Vasp.pm', str(pkg_resources.resource_string(__name__, 'Vasp.pm'), 'ascii')) # now, go through the states: if 'reference' in superdict: addfile('POSCAR', superdict['reference'].POSCAR('Defect-free reference')) for tag, super in states.items(): # directory first dirname = dirmapping[tag] adddirectory(dirname) # POSCAR file next addfile(dirname + '/POSCAR', super.POSCAR(tag)) addfile(dirname + '/INCAR', INCARrelax.format(system=tag)) addfile(dirname + '/incar.sed', SEDstring.format(system=tag)) if kpoints: addsymlink(dirname + '/KPOINTS', '../KPOINTS') addsymlink(dirname + '/POTCAR', '../POTCAR') # and the transitions: for tag, (super0, super1) in transitions.items(): # directory first dirname = dirmapping[tag] adddirectory(dirname) # POS/POSCAR files next filename = dirname + '/POSCAR.init' \ if superdict['transmapping'][tag][0] is None \ else dirname + '/POS.init' addfile(filename, super0.POSCAR('initial ' + tag)) filename = dirname + '/POSCAR.final' \ if superdict['transmapping'][tag][1] is None \ else dirname + '/POS.final' addfile(filename, super1.POSCAR('final ' + tag)) addfile(dirname + '/INCAR', INCARNEB.format(system=tag)) addfile(dirname + '/incar.sed', SEDstring.format(system=tag)) if kpoints: addsymlink(dirname + '/KPOINTS', '../KPOINTS') addsymlink(dirname + '/POTCAR', '../POTCAR') # and the transition mappings: Makefile = MAKEFILE relaxNEB = {} for tag in sorted(transmapping.keys()): dirname = dirmapping[tag] for m, t in ((transmapping[tag][0], 'init'), (transmapping[tag][1], 'final')): if m is not None: relax = dirmapping[m[0]] addfile(dirname + '/trans.' + t, map2string(relax, m[1], m[2])) Makefile += \ "{neb}/POSCAR.{type}: {neb}/trans.{type} {relax}/CONTCAR\n".format(neb=dirname, type=t, relax=relax) if relax not in relaxNEB: relaxNEB[relax] = {dirname} else: relaxNEB[relax].add(dirname) addfile('Makefile', Makefile) for relax, NEBset in relaxNEB.items(): addfile(relax + '/NEBlist', '\n'.join(k for k in sorted(NEBset)) + '\n') # JSON dictionary connecting directories and tags: (needs a trailing newline?) addfile(JSONdict, json.dumps(tagmapping, indent=4, sort_keys=True) + '\n') # YAML representation of supercell: if YAMLdef is not None: addfile(YAMLdef, yaml.dump(superdict))
938
def Geom2dLProp_Curve2dTool_FirstParameter(*args): """ * returns the first parameter bound of the curve. :param C: :type C: Handle_Geom2d_Curve & :rtype: float """ return _Geom2dLProp.Geom2dLProp_Curve2dTool_FirstParameter(*args)
939
def test_get_cd0_oswald(): """TODO: create the test when the function is finalized!""" pass
940
def decode_regression_batch_image(x_batch, y_batch, x_post_fn = None, y_post_fn = None, **kwargs): """ x_batch: L or gray (batch_size, height, width, 1) y_batch: ab channel (batch_size, height, width, 2) x_post_fn: decode function of x_batch y_post_fn: decode function of y_batch """ assert len(y_batch.shape)==4 and y_batch.shape[3]==2, "Invalid y_batch shape (batchsize, height, width, 2)" assert len(x_batch.shape)==3 and x_batch.shape[3]==1, "Invalid y_batch shape (batchsize, height, width, 1)" y_height, y_width = y_batch.shape[1:3] x_height, x_width = x_batch.shape[1:3] if x_height != y_height or x_width != y_width: y_batch = sni.zoom(y_batch, [1, 1.*x_height/y_height, 1.*x_width/y_width, 1]) # if x_batch = x_post_fn(x_batch) if x_post_fn is not None else x_batch y_batch = y_post_fn(y_batch) if y_post_fn is not None else y_batch y_batch_Lab = np.concatenate([y_batch_L, y_batch_ab], axis = 3) y_batch_RGB = np.array([cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_Lab2RGB) for image in y_batch_Lab]) return y_batch_RGB
941
def get_mag_from_obs(h, e, d0=0): """gets the magnetic north components given the observatory components. Parameters __________ h: array_like the h component from the observatory e: array_like the e component from the observatory d0: float the declination baseline angle in radians Returns _______ tuple of array_like [0]: total h component as a float [1]: total d declination as a float """ mag_h = get_mag_h_from_obs(h, e) mag_d = get_mag_d_from_obs(h, e, d0) return (mag_h, mag_d)
942
def get_local_host(choice='IP'): """ choice: 'IP' or 'NAME' """ if choice == 'IP': cmd = 'hostname -i' else: cmd = 'hostname' out = subprocess.check_output(cmd.split()) if choice == 'hostname': return out.strip('\n') else: ip_tmp = out.strip('\n').strip() if ip_tmp and address_can_be_linked(ip_tmp): ip = ip_tmp else: ip = None ip_list = get_all_ips(None) for ip_tmp in ip_list: if address_can_be_linked(ip_tmp): ip = ip_tmp break return ip
943
def set_cell(client, instance, colid, value, file_=None): """Set the value of one cell of a family table. Args: client (obj): creopyson Client. instance (str): Family Table instance name. colid (str): Column ID. value (depends on data type): Cell value. `file_` (str, optional): File name (usually an assembly). Defaults is currently active model. Returns: None """ data = { "instance": instance, "colid": colid, "value": value, } if file_ is not None: data["file"] = file_ else: active_file = client.file_get_active() if active_file: data["file"] = active_file["file"] return client._creoson_post("familytable", "set_cell", data)
944
def find_bounds(particles): """ Find the maximum and minimum bounds describing a set of particles. """ min_bound = np.array( [np.min(particles[:, 0]), np.min(particles[:, 1]), np.min(particles[:, 2])] ) max_bound = np.array( [np.max(particles[:, 0]), np.max(particles[:, 1]), np.max(particles[:, 2])] ) return max_bound, min_bound
945
def load_job_queue(job_queue, list_of_workers, list_of_jobs): """Puts each player file (string) into the job_queue, then puts in 1 poison pill for each process""" [job_queue.put(job) for job in list_of_jobs] # noinspection PyUnusedLocal [job_queue.put(None) for _dummy in list_of_workers]
946
def get_tags_date(link, default_date=None): """Extract tags and date from the link.""" tags = ["links"] date = "" fltr = [ "Bookmarks Menu", "Bookmark Bar", "Personal Toolbar Folder", "Importierte Lesezeichen", "Bookmarks Toolbar", "Kein Label vorhanden", "Unsorted Bookmarks", "Unsortierte Lesezeichen", "Recently Bookmarked", "Recent Tags", ] for parent in link.parents: if parent.name == "dl": for sibling in parent.previous_siblings: if sibling.name == "h3": tags += sibling.get_text().split(">") datestr = ( sibling.get("add_date", None) or sibling.get("last_visit", None) or sibling.get("last_modified", None) or default_date ) date = convert_date(datestr) for sibling in parent.next_siblings: if sibling.name == "h3": tags += sibling.get_text().split(">") datestr = ( sibling.get("add_date", None) or sibling.get("last_visit", None) or sibling.get("last_modified", None) or default_date ) date = convert_date(datestr) break return ([standardize_tag(i) for i in tags if i not in fltr], date)
947
def _is_future(time, time_ref=None): """ check if `time` is in future (w.r.t. `time_ref`, by default it is now) Parameters ---------- time : int or datetime the time to check (if int it's considered a timestamp, see :py:meth:`datetime.timestamp`) time_ref : int or datetime the time reference (if int it's considered a timestamp, see :py:meth:`datetime.timestamp`), if None use the present time (default: None) Returns ------- bool is in future or not """ time = _parse_time_from_input(time, "time") if time_ref is None: time_ref = datetime.now() else: time_ref = _parse_time_from_input(time_ref, "time_ref") return time > time_ref
948
def test_init_descriptor_always_initted(): """We should be able to get a height and width even on no-tty Terminals.""" t = Terminal(stream=StringIO()) eq_(type(t.height), int)
949
def afs(debugger, command, context, result, _): """ Get the address for a symbol regardless of the ASLR offset (lldb) afs viewDidLoad -[UIViewController viewDidLoad]: 0x1146c79a4 ... (lldb) afs -[MyViewController viewDidLoad]: 0x1146c79a4 """ # TODO: Allow user to pass module for symbol symbol = command.strip() if "?" in symbol: result.AppendWarning( "Symbol must not have Swift syntax sugar '{}'".format(symbol) ) result.SetStatus(lldb.eReturnStatusFailed) return target = debugger.GetSelectedTarget() # Search for functions with both the original, and auto, arguments # The default to this argument is "any" yet it seems as though you # don't actually search for any. For example: # argument | swift symbol | result # ---------+--------------------------------------------+------------- # any | ViewController.viewDidLoad | valid symbol # any | module.ViewController.viewDidLoad () -> () | none # full | ViewController.viewDidLoad | none # full | module.ViewController.viewDidLoad () -> () | valid symbol # # In this case, I would expect any + a full symbol to still return # the full symbol. This is also supposed to be a bitwise field, so I've # also tried passing multiple options, with no success functions = target.FindFunctions(symbol, lldb.eFunctionNameTypeFull) for symbol_context in target.FindFunctions(symbol): functions.Append(symbol_context) if len(functions) == 0: result.AppendWarning("No symbol found for '{}'".format(symbol)) result.SetStatus(lldb.eReturnStatusFailed) elif len(functions) == 1: result.AppendMessage(_address_for_symbol(target, functions[0], False)) result.SetStatus(lldb.eReturnStatusSuccessFinishResult) else: for context in functions: result.AppendMessage(_address_for_symbol(target, context, True)) result.SetStatus(lldb.eReturnStatusSuccessFinishResult)
950
def create_and_train_model(x_learn, y_learn, model, n_cores): """General method to create and train model""" print(model.fit(x_learn, y_learn)) start_time = datetime.now() c_val = cross_val_score(model, x_learn, y_learn, cv=10, n_jobs=n_cores) end_time = datetime.now() print(type(model).__name__, "with n_jobs =", n_cores, "took:", (end_time.second - start_time.second), "seconds") print(type(model).__name__, "cross_val_score:", c_val.mean()) return model, c_val
951
def cardidolizedimageurl(context, card, idolized, english_version=False): """ Returns an image URL for a card in the context of School Idol Contest """ prefix = 'english_' if english_version else '' if card.is_special or card.is_promo: idolized = True if idolized: if getattr(card, prefix + 'round_card_idolized_image'): return _imageurl(getattr(card, prefix + 'round_card_idolized_image'), context=context) if getattr(card, prefix + 'card_idolized_image'): return _imageurl(getattr(card, prefix + 'card_idolized_image'), context=context) return _imageurl('static/default-' + card.attribute + '.png', context=context) if getattr(card, prefix + 'round_card_image'): return _imageurl(getattr(card, prefix + 'round_card_image'), context=context) if getattr(card, prefix + 'card_image'): return _imageurl(getattr(card, prefix + 'card_image'), context=context) return _imageurl('static/default-' + card.attribute + '.png', context=context)
952
def extract_simple_tip(e): """ """ emin = e.min() emax = e.max() indices = [nearest_index(emin), nearest_index(emax)] indices.sort() imin,imax = indices imax +=1 # for python style indexing return imin, imax
953
def adjust_learning_rate_local(optimizer, epoch, lr0): """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" # if lr0<=1e-4: # if epoch < 90: # lr = lr0 * (0.99**(epoch//30)) # elif epoch < 180: # lr = lr0 * (0.9**(epoch//30)) # elif epoch < 270: # lr = lr0 * (0.7**(epoch//30)) # if lr0==1e-3*0.5: # if epoch < 90: # lr = lr0 * (0.9**(epoch//7)) # elif epoch < 180: # lr = lr0 * (0.9**(epoch//7)) # elif epoch < 270: # lr = lr0 * (0.88**(epoch//7)) # lr = lr0 * (0.99**(epoch//30)) lr = lr0 * (0.1 ** epoch) for param_group in optimizer.param_groups: param_group['lr'] = lr
954
def convert(ctx, amount: float=1, from_currency=None, to_currency=None): """ Convert. Convert between currencies. Defaults to geolocated currencies. """ _update_currencies(ctx.config.app_id, ctx.storage) if amount is None: amount = 1 if from_currency is None and to_currency is None: ctx.respond(ctx._("You haven't specified a currency pair.")) return if from_currency is None or to_currency is None: try: geocode = ctx.provider_for("geocode") except KeyError: ctx.respond(ctx._("Sorry, I don't have a geocode provider loaded, and you haven't specified both currencies.")) return user_data = yield ctx.bot.defer_from_thread(UserData.lookup_default, ctx.client, ctx.origin) if "location" not in user_data: ctx.respond(ctx._("You don't have location data set, so I can't guess what currency you want.")) return result = (yield geocode(ctx.origin))[0] currency = ccy.countryccy( [component["short_name"] for component in result["address_components"] if "country" in component["types"]][0] ) if currency is None: ctx.respond(ctx._("I don't know the currency for your location.")) return if from_currency is None: from_currency = currency if to_currency is None: to_currency = currency from_currency = from_currency.upper() to_currency = to_currency.upper() for currency in [from_currency, to_currency]: if currency not in ctx.storage.rates: ctx.respond(ctx._("I don't know the exchange rate for {currency}.").format( currency=currency )) return den = ctx.storage.rates[from_currency] num = ctx.storage.rates[to_currency] converted = amount * num / den ctx.respond(ctx._("{amount:.4f} {from_currency} ({from_currency_name}) = {converted:.4f} {to_currency} ({to_currency_name})").format( amount=amount, from_currency=from_currency, from_currency_name=ctx.storage.names[from_currency], converted=converted, to_currency=to_currency, to_currency_name=ctx.storage.names[to_currency] ))
955
def batch_generator(adjs, nbrs, nnlbls, atts, labels, N_map, E_map, Y_map, V_num=28, bsize=32, dim_f=0, dim_a=0, dim_e=0, nY=2, k=3, pk=10): """graph is processed(add padding) as needed""" epch = 0 N = len(labels) while True: order = np.random.permutation(N) for i in range(0, N-bsize, bsize): Xs = [prepare_G(adjs[x], nbrs[x], nnlbls[x], atts[x], V_num, E_map, N_map, k, dim_f, dim_a, pk=pk) for x in order[i:i+bsize]] adj, nbr, nnlbl, att, lbls = [[Xs[x][0] for x in range(len(Xs))]], [Xs[x][1] for x in range(len(Xs))], [Xs[x][2] for x in range(len(Xs))], [Xs[x][3] for x in range(len(Xs))], onehot([labels[x] for x in order[i:i+bsize]], Y_map) adj = np.swapaxes(adj, 0,1).reshape((1, bsize, V_num+1, pk*pk*dim_e)) nbr = np.swapaxes(nbr, 0,1).reshape((1, bsize, V_num+1, k)) nnlbl = np.swapaxes(nnlbl, 0,1).reshape((1, bsize, V_num+1, pk*dim_f)) att = np.swapaxes(att, 0,1).reshape((1, bsize, V_num+1, pk*dim_a)) yield [adj, nbr, nnlbl, att, lbls, epch] epch += 1
956
def moveresize(win, x=None, y=None, w=None, h=None, window_manager=None): """ This function attempts to properly move/resize a window, accounting for its decorations. It doesn't rely upon _NET_FRAME_EXTENTS, but instead, uses the actual parent window to adjust the width and height. (I've found _NET_FRAME_EXTENTS to be wildly unreliable.) :param win: Window identifier. :param x: Top left x coordinate. :param y: Top left y coordinate. :param w: Client width. :param h: Client height. :param window_manager: A class variable from Window.WindowManagers :type window_manager: int :rtype: void """ if window_manager is WindowManagers.KWin: tomove = get_parent_window(get_parent_window(win)) else: tomove = get_parent_window(win) if tomove: cx, cy, cw, ch = __get_geometry(win) px, py, pw, ph = __get_geometry(tomove) w -= pw - cw h -= ph - ch ewmh.request_moveresize_window(win, x=x, y=y, width=max(1, w), height=max(1, h), source=2)
957
def parse_pipeline_config(pipeline_config_file): """Returns pipeline config and meta architecture name.""" with tf.gfile.GFile(pipeline_config_file, 'r') as config_file: config_str = config_file.read() pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() text_format.Merge(config_str, pipeline_config) meta_arch = pipeline_config.model.WhichOneof('model') return pipeline_config, meta_arch
958
def get_pwl(time_series, pwl_epsilon): """ This is a wrapper function for getting a bounded piecewise linear approximation of the data """ if not isinstance(pwl_epsilon, (int, float)): raise TypeError("pwl_epsilon must be a numeric type!") if not (isinstance(time_series, pd.DataFrame) or isinstance(time_series, list)): raise TypeError("The argument time_series must be a Pandas Dataframe, or a list!") if isinstance(time_series, pd.DataFrame): # just how hakimis algorithm wants the data polyline_from_data = list(zip(time_series.index.tolist(), time_series[construct_variable_name(1)].values.tolist())) else: polyline_from_data = time_series if math.isclose(pwl_epsilon, 0.0): return polyline_from_data else: approx_grap = create_approximation_graph(timeseries=polyline_from_data, epsilon=pwl_epsilon) shortest_path_gen =\ nx.all_shortest_paths(approx_grap, tuple(polyline_from_data[0]), tuple(polyline_from_data[-1])) # this avoids generating all paths, since we take just the first one (saves memory and time) return next(shortest_path_gen)
959
def to_array(string): """Converts a string to an array relative to its spaces. Args: string (str): The string to convert into array Returns: str: New array """ try: new_array = string.split(" ") # Convert the string into array while "" in new_array: # Check if the array contains empty strings new_array.remove("") return new_array except: print("The parameter string is not a str") return string
960
def convert_atoms_to_pdb_molecules(atoms: t.List[Atom]) -> t.List[str]: """ This function converts the atom list into pdb blocks. Parameters ---------- atoms : t.List[Atom] List of atoms Returns ------- t.List[str] pdb strings of that molecule """ # 1) GROUP ATOMS BT MOLECULES molecules = defaultdict(list) for a in atoms: molecules[a.resi].append(a) # 2) CONSTUCT PDB BLOCKS #ref: https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/tutorials/pdbintro.html pdb_format = "ATOM {:>5d} {:<2}{:1}{:>3} {:1}{:>3d}{:1} {:>7.3f}{:>7.3f}{:>7.3f}{:>5}{:>6}{:<3}{:>2} {:>2d}" dummy_occupancy= dummy_bfactor= dummy_charge = 0.0 dummy_alt_location= dummy_chain= dummy_insertion_code= dummy_segment = "" pdb_molecules: t.List[str] = [] for m_ID in sorted(molecules): m = molecules[m_ID] atoms_as_lines: t.List[str] = [] for a in sorted(m, key= lambda x: x.id): atoms_as_lines.append(pdb_format.format(int(a.id), a.name, dummy_alt_location, a.resn, dummy_chain, int(a.resi), dummy_insertion_code, a.x, a.y, a.z, dummy_occupancy, dummy_bfactor, dummy_segment, a.elem, int(dummy_charge))) # Sort by Id: => convert str up do first space to int #atoms_as_lines = sorted(atoms_as_lines, key=lambda x: int(x[:x.index('\t')])) molecule_as_str = "TITLE "+a.resn+"\n"+'\n'.join(atoms_as_lines) + '\nEND' # molecule_as_str = molecule_as_str.replace('\t',' ') pdb_molecules.append(molecule_as_str) print(pdb_molecules[-1]) return pdb_molecules
961
def unvoiced_features(sig,fs,vcont,sil_cont): """ Unvoiced segment features. Requires voiced and silence/pauses segment detection. """ #Unvoiced features uv_seg,_,_ = unvoiced_seg(sig,fs,vcont,sil_cont) lunvoiced = [] for uv in uv_seg: lunvoiced.append(len(uv)/fs)#Length of unvoiced segment uunvoiced = np.mean(lunvoiced)#Average length # sunvoiced = np.std(lunvoiced)#variation of length uvrate = (len(uv_seg)*fs)/len(sig)#Unvoiced segments per second numuv = len(uv_seg) rPVI,nPVI = get_pvi(lunvoiced) pGPI,dGPI = get_gpi(lunvoiced,len(sig)/fs) # feats_unvoiced = np.hstack([numuv,uvrate,uunvoiced,rPVI,nPVI,pGPI,dGPI]) feats_unvoiced = {'Unvoiced_counts':numuv, 'Unvoiced_rate':uvrate, 'Unvoiced_duration':uunvoiced, 'Unvoiced_rPVI':rPVI, 'Unvoiced_nPVI':nPVI, 'Unvoiced_dGPI':dGPI} return feats_unvoiced
962
def test_markerfacecolors_allclose(axis): """Are the markerfacecolors almost correct?""" err = 1e-12 markerfacecolor = np.array([0.1, 1, 1]) axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], markerfacecolor=list(markerfacecolor + err)) pc = LinePlotChecker(axis) with pytest.raises(AssertionError): pc.assert_markerfacecolors_equal([markerfacecolor]) with pytest.raises(AssertionError): pc.assert_markerfacecolors_allclose([markerfacecolor], rtol=1e-13) pc.assert_markerfacecolors_allclose([markerfacecolor])
963
def civic_methods(method001, method002, method003): """Create test fixture for methods.""" return [method001, method002, method003]
964
def _generate_tags(encoding_type, number_labels=4): """ :param encoding_type: 例如BIOES, BMES, BIO等 :param number_labels: 多少个label,大于1 :return: """ vocab = {} for i in range(number_labels): label = str(i) for tag in encoding_type: if tag == 'O': if tag not in vocab: vocab['O'] = len(vocab) + 1 continue vocab['{}-{}'.format(tag, label)] = len(vocab) + 1 # 其实表达的是这个的count return vocab
965
def test_delete_topic(host): """ Check if can delete topic """ # Given topic_name = get_topic_name() ensure_topic( host, topic_defaut_configuration, topic_name ) time.sleep(0.3) # When test_topic_configuration = topic_defaut_configuration.copy() test_topic_configuration.update({ 'state': 'absent' }) ensure_idempotency( ensure_topic, host, test_topic_configuration, topic_name ) time.sleep(0.3) # Then for kafka_host, host_vars in kafka_hosts.items(): kfk_addr = "%s:9092" % \ host_vars['ansible_eth0']['ipv4']['address']['__ansible_unsafe'] check_configured_topic(kafka_host, test_topic_configuration, topic_name, kfk_addr)
966
def interrupts(): """ En - Re-enables interrupts [after they've been disabled by noInterrupts()] Fr - Revalide les interruptions [après qu'elles aient été désactivées par noInterrupts()] """ fichier=open(SWIRQ_PATH,'r') irqnum=0x0 ret=ioctl(fichier,SWIRQ_ENABLE,struct.pack("@B",irqnum)) irqnum=0x1 ret=ioctl(fichier,SWIRQ_ENABLE,struct.pack("@B",irqnum)) fichier.close
967
def delete_link_tag(api_client, link_id, tag_key, **kwargs): # noqa: E501 """delete_link_tag # noqa: E501 Delete link tag by key This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> response = await api.delete_link_tag(client, link_id, tag_key, async_req=True) :param link_id str: str of link. e.g. lnk0 :param tag_key str: key of tag :param async_req bool: execute request asynchronously :param bool sorted: Sort resources :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: APIResponse or awaitable if async """ local_var_params = locals() request_params = [] # noqa: E501 collection_formats = {} path_params = {"link_id": link_id, "tag_key": tag_key} query_params = [] for param in [p for p in request_params if local_var_params.get(p) is not None]: query_params.append((param, local_var_params[param])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params["Accept"] = api_client.select_header_accept( ["application/json"] ) # noqa: E501 # Authentication setting auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501 return api_client.call_api( "/links/{link_id}/tags/{tag_key}", "DELETE", path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type="object", # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get("async_req"), _return_http_data_only=local_var_params.get( "_return_http_data_only" ), # noqa: E501 _preload_content=local_var_params.get("_preload_content", True), _request_timeout=local_var_params.get("_request_timeout"), collection_formats=collection_formats, )
968
def test_sortie(mission): """ :type mission: MissionReport """ data = { 'aircraft_id': 10011, 'bot_id': 10012, 'pos': {'x': 7500.0, 'y': 0.0, 'z': 7500.0}, 'account_id': '76638c27-16d7-4ee2-95be-d326a9c499b7', 'profile_id': '8d8a0ac5-095d-41ea-93b5-09599a5fde4c', 'name': 'John Doe', 'aircraft_name': 'La-5 ser.8', 'country_id': 101, 'coal_id': 1, 'airfield_id': None, 'airstart': False, 'parent_id': None, 'payload_id': 1, 'fuel': 50, 'skin': '', 'weapon_mods_id': [], 'tik': 20, 'cartridges': 500, 'shells': 100, 'bombs': 2, 'rockets': 6, } sortie = Sortie(mission=mission, **data) assert sortie.index == 0 assert sortie.mission == mission assert sortie.aircraft_id == data['aircraft_id'] assert sortie.bot_id == data['bot_id'] assert sortie.aircraft is None assert sortie.bot is None assert sortie.pos_start == data['pos'] assert sortie.account_id == data['account_id'] assert sortie.profile_id == data['profile_id'] assert sortie.nickname == data['name'] assert sortie.aircraft_name == data['aircraft_name'].lower() assert sortie.cls == 'aircraft_light' assert sortie.cls_base == 'aircraft' assert sortie.country_id == data['country_id'] assert sortie.coal_id == 1 assert sortie.airfield_id == data['airfield_id'] assert sortie.is_airstart == data['airstart'] assert sortie.parent_id == data['parent_id'] assert sortie.payload_id == data['payload_id'] assert sortie.fuel == data['fuel'] assert sortie.skin == data['skin'] assert sortie.weapon_mods_id == data['weapon_mods_id'] assert sortie.tik_spawn == data['tik'] assert sortie.tik_takeoff is None assert sortie.tik_landed is None assert sortie.tik_end is None assert sortie.tik_last == data['tik'] assert sortie.used_cartridges == data['cartridges'] assert sortie.used_shells == data['shells'] assert sortie.used_bombs == data['bombs'] assert sortie.used_rockets == data['rockets'] assert sortie.hit_bullets == 0 assert sortie.hit_bombs == 0 assert sortie.hit_rockets == 0 assert sortie.hit_shells == 0 assert sortie.ratio == 1 assert sortie.is_disco is False assert sortie.is_ended is False assert mission.lost_aircraft[data['aircraft_id']] == sortie assert mission.lost_bots[data['bot_id']] == sortie # assert sortie in mission.active_sorties[sortie.coal_id] # assert sortie in mission.sorties # assert mission.sorties_aircraft[sortie.aircraft_id] == sortie # assert mission.sorties_bots[sortie.bot_id] == sortie # assert mission.sorties_accounts[sortie.account_id] == sortie sortie.update_ratio(current_ratio=1.2) sortie.update_ratio(current_ratio=1) assert sortie.ratio == 1.1 sortie.ending(tik=1000, cartridges=100, shells=75, bombs=1, rockets=4) assert sortie.is_ended assert sortie.tik_end == 1000 assert sortie.used_cartridges == 400 assert sortie.used_bombs == 1 assert sortie.used_shells == 25 assert sortie.used_rockets == 2 # assert sortie not in mission.active_sorties[sortie.coal_id] sortie.ending(tik=1200, cartridges=100, shells=75, bombs=1, rockets=4) assert sortie.tik_end == 1000 assert sortie.is_bailout is False assert sortie.is_captured is False assert sortie.killboard == {} assert sortie.assistboard == {} assert sortie.aircraft_damage == 0 assert sortie.bot_damage == 0 assert sortie.sortie_status == SortieStatus() assert sortie.bot_status == BotLifeStatus()
969
def test_user_details(user, mocker): """ Test for: User.biography property User.media_count property User.follower_count property User.following_count property User.user_detail method User.full_info method """ user_details = { "biography": random_string(), "media_count": random_int(), "follower_count": random_int(), "following_count": random_int(), **user.as_dict(), } full_info = {"user_detail": {"user": user_details}} details_mock = mocker.patch("instapi.client.client.user_detail_info", return_value=full_info) assert user.biography == user_details["biography"] assert user.media_count == user_details["media_count"] assert user.follower_count == user_details["follower_count"] assert user.following_count == user_details["following_count"] assert user.user_detail() == user_details assert user.full_info() == full_info details_mock.assert_called_once_with(user.pk)
970
def infect_graph(g, title): """ Function to infect the graph using SI model. Parameters: g: Graph Returns: G : Infected graph t : Time of diffusion of each node """ G=g # Model selection - diffusion time model = ep.SIModel(G) nos = 1/len(G) # Model Configuration config = mc.Configuration() config.add_model_parameter('beta', 0.03) config.add_model_parameter("fraction_infected", 0.05) model.set_initial_status(config) # Simulation execution iterations = model.iteration_bunch(200) diffusionTime={} for i in range(1,len(G)): diffusionTime[i]=-1 for i in iterations: for j in i['status']: if(i['status'][j]==1): diffusionTime[j]=i['iteration'] nodeColor = [] source_nodes = [] for i in G.nodes(): if iterations[0]["status"][i]==1: nodeColor.append('red') source_nodes.append(i) else: nodeColor.append('blue') sorted_values = sorted(diffusionTime.values()) # Sort the values sorted_dict = {} for i in sorted_values: for k in diffusionTime.keys(): if diffusionTime[k] == i: sorted_dict[k] = diffusionTime[k] plt.clf() nx.draw(G, node_color=nodeColor, with_labels=True) plt.title('Intial Phase') plt.savefig(f'./plots/{title}_Initial-infect.png') plt.clf() nx.draw(G, node_color=list(x for i,x in diffusionTime.items()),cmap=plt.cm.Reds, with_labels=True) plt.title('Final Phase') plt.savefig(f'./plots/{title}_Final-infect.png') return (G, sorted_dict, source_nodes)
971
def main_func_SHORT(): """ Func. called by the main T """ sleep(SHORT) return True
972
def RedirectStdoutStderr(filename): """Context manager that replaces stdout and stderr streams.""" if filename is None: yield return with open(filename, 'a') as stream: old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = stream sys.stderr = stream util.CheckStdoutForColorSupport() try: yield finally: sys.stdout = old_stdout sys.stderr = old_stderr util.CheckStdoutForColorSupport()
973
def split_train_test(X: pd.DataFrame, y: pd.Series, train_proportion: float = .75) \ -> Tuple[pd.DataFrame, pd.Series, pd.DataFrame, pd.Series]: """ Randomly split given sample to a training- and testing sample Parameters ---------- X : DataFrame of shape (n_samples, n_features) Data frame of samples and feature values. y : Series of shape (n_samples, ) Responses corresponding samples in data frame. train_proportion: Fraction of samples to be split as training set Returns ------- train_X : DataFrame of shape (ceil(train_proportion * n_samples), n_features) Design matrix of train set train_y : Series of shape (ceil(train_proportion * n_samples), ) Responses of training samples test_X : DataFrame of shape (floor((1-train_proportion) * n_samples), n_features) Design matrix of test set test_y : Series of shape (floor((1-train_proportion) * n_samples), ) Responses of test samples """ no_of_train_rows = math.ceil(train_proportion * X.shape[0]) X : pd.DataFrame = pd.DataFrame.join(X,y) train_data = X.sample(n=no_of_train_rows, axis=0) test_data = X.loc[X.index.difference(train_data.index), ] train_y = train_data[y.name] test_y = test_data[y.name] train_data.drop(columns=y.name,inplace=True) test_data.drop(columns=y.name,inplace=True) # print(train_data.shape) # print(test_data.shape) # print(train_y.shape) # print(test_y.shape) return train_data, train_y, test_data, test_y
974
def split_a_book(file_name, outfilename=None): """Split a file into separate sheets :param str file_name: an accessible file name :param str outfilename: save the sheets with file suffix """ book = get_book(file_name=file_name) if outfilename: saveas = outfilename else: saveas = file_name for sheet in book: filename = "%s_%s" % (sheet.name, saveas) sheet.save_as(filename)
975
def s2sdd(s): """ Converts a 4-port single-ended S-parameter matrix to a 2-port differential mode representation. Reference: https://www.aesa-cortaillod.com/fileadmin/documents/knowledge/AN_150421_E_Single_ended_S_Parameters.pdf """ sdd = np.zeros((2, 2), dtype=np.complex128) sdd[0, 0] = 0.5*(s[0, 0] - s[0, 2] - s[2, 0] + s[2, 2]) sdd[0, 1] = 0.5*(s[0, 1] - s[0, 3] - s[2, 1] + s[2, 3]) sdd[1, 0] = 0.5*(s[1, 0] - s[1, 2] - s[3, 0] + s[3, 2]) sdd[1, 1] = 0.5*(s[1, 1] - s[1, 3] - s[3, 1] + s[3, 3]) return sdd
976
def plot_gdf(gdf, map_f=None, maxitems=-1, style_func_args={}, popup_features=[], tiles='cartodbpositron', zoom=6, geom_col='geometry', control_scale=True): """ :param gdf: GeoDataFrame GeoDataFrame to visualize. :param map_f: folium.Map `folium.Map` object where the GeoDataFrame `gdf` will be plotted. If `None`, a new map will be created. :param maxitems: int maximum number of tiles to plot. If `-1`, all tiles will be plotted. :param style_func_args: dict dictionary to pass the following style parameters (keys) to the GeoJson style function of the polygons: 'weight', 'color', 'opacity', 'fillColor', 'fillOpacity', 'radius' :param popup_features: list when clicking on a tile polygon, a popup window displaying the information in the columns of `gdf` listed in `popup_features` will appear. :param tiles: str folium's `tiles` parameter. :param zoom: int initial zoom. :param geom_col: str name of the geometry column of `gdf`. :param control_scale: bool if `True`, add scale information in the bottom left corner of the visualization. The default is `True`. Returns ------- `folium.Map` object with the plotted GeoDataFrame. """ if map_f is None: # initialise map lon, lat = np.mean(np.array(list(gdf[geom_col].apply(utils.get_geom_centroid).values)), axis=0) map_f = folium.Map(location=[lat, lon], tiles=tiles, zoom_start=zoom, control_scale=control_scale) count = 0 for k in gdf.index: g = gdf.loc[k] if type(g[geom_col]) == gpd.geoseries.GeoSeries: for i in range(len(g[geom_col])): map_f = add_to_map(g[geom_col].iloc[i], g.iloc[i], map_f, popup_features=popup_features, style_func_args=style_func_args) else: map_f = add_to_map(g[geom_col], g, map_f, popup_features=popup_features, style_func_args=style_func_args) count += 1 if count == maxitems: break return map_f
977
def set_catflap_cat_inside(request, catflap_uuid): """GET so it can be used as an email link.""" catflap = CatFlap.objects.get(uuid=catflap_uuid) if not catflap.cat_inside: catflap.cat_inside = True catflap.save() track_manual_intervention(catflap, cat_inside=True) return redirect_to_status_page(request, catflap_uuid)
978
def feature_list(): """Check the library for compile-time features. The list of features are maintained in libinfo.h and libinfo.cc Returns ------- list List of :class:`.Feature` objects """ lib_features_c_array = ctypes.POINTER(Feature)() lib_features_size = ctypes.c_size_t() check_call(_LIB.MXLibInfoFeatures(ctypes.byref(lib_features_c_array), ctypes.byref(lib_features_size))) features = [lib_features_c_array[i] for i in range(lib_features_size.value)] return features
979
def _symm_herm(C): """To get rid of NaNs produced by _scalar2array, symmetrize operators where C_ijkl = C_jilk*""" nans = np.isnan(C) C[nans] = np.einsum('jilk', C)[nans].conj() return C
980
def cat(self, dim=0): """Map of 'cat' pytorch method.""" x = self dim = _dim_explicit(x[0].shape, dim) return P.concat(x, dim)
981
def survey(nara_file=None): """ Generate a summary of the data structure. :param nara_file: :return: """ if not nara_file: nara_file = os.path.abspath( os.path.join(os.path.dirname(__file__), "nara-export-latest.json") ) with open(nara_file, "r") as f: nara_json = json.load(f) lists = {} strings = {} dicts = {} digitised = [] for record in nara_json: # if record.get("documentIndex") == 228: for k, v in record.items(): # print(k) # print(type(v)) if type(v) == list: if not lists.get(k): lists[k] = {"length": 0, "values": []} if len(v) > lists[k]["length"]: lists[k]["length"] = len(v) for x in v: if type(x) != dict: lists[k]["values"].append(x) else: lists[k]["values"].append(json.dumps(x)) else: if len(v) > lists[k]["length"]: lists[k]["length"] = len(v) for x in v: if type(x) != dict: lists[k]["values"].append(x) else: lists[k]["values"].append(json.dumps(x)) lists[k]["values"] = list(set(lists[k]["values"])) elif type(v) == str: if not strings.get(k): strings[k] = {"values": []} strings[k]["values"].append(v) else: strings[k]["values"].append(v) strings[k]["values"] = list(set(strings[k]["values"])) elif type(v) == dict: dicts[k] = summarise(v) if record.get("objects"): digitised.append( { "documentIndex": record.get("documentIndex"), "title": record.get("title"), "url": record.get("url"), "date": record.get("productionDates"), } ) summary = {"strings": strings, "lists": lists, "dicts": dicts} nara_summary = os.path.abspath(os.path.join(os.path.dirname(__file__), "nara-summary.json")) nara_digitised = os.path.abspath( os.path.join(os.path.dirname(__file__), "nara-digitised.json") ) with open(nara_summary, "w") as f: json.dump(summary, f, indent=4, sort_keys=True) with open(nara_digitised, "w") as d: json.dump(digitised, d, indent=4, sort_keys=True) print(len(digitised))
982
def test_ticker_gains_negative_balance(transactions, exchange_rates_mock): """If the first transaction added is a sell, it is illegal since this causes a negative balance, which is impossible""" sell_transaction = transactions[2] tg = TickerGains(sell_transaction.ticker) er = ExchangeRate('USD', transactions[2].date, transactions[2].date) er_map = {'USD': er} with pytest.raises(ClickException) as excinfo: tg.add_transactions([sell_transaction], er_map) assert excinfo.value.message == "Transaction caused negative share balance"
983
def test_usage(): """usage""" rv, out = getstatusoutput(prg) assert rv > 0 assert out.lower().startswith('usage')
984
def _name_xform(o): """transform names to lowercase, without symbols (except underscore) Any chars other than alphanumeric are converted to an underscore """ return re.sub("\W", "_", o.lower())
985
def runner(app): """创建一个运行器,用于调用应用注册的 Click 命令""" return app.test_cli_runner()
986
def remove_all_objects(scene): """ Given a planning scene, remove all known objects. """ for name in scene.get_known_object_names(): scene.remove_world_object(name)
987
def create_activation_cache(model): """Creates an activation cache for the tensors of a model.""" input_quantizer = quantized_relu(8, 0) output_cache = {} # If using a Sequential model, the input layer is hidden. Therefore, add the # input quantization to the cache if the first layer is not an input layer if not isinstance(model.layers[0], InputLayer): output_cache[model.layers[0].input.experimental_ref()] = input_quantizer # cache graph tensors' activations for l in model.layers: output_cache[l.output.experimental_ref()] = l if isinstance(l, QActivation) or isinstance(l, QAdaptiveActivation) : output_cache[l.output.experimental_ref()] = l.quantizer elif isinstance(l, InputLayer): # assume the input is 8-bit positive value output_cache[l.output.experimental_ref()] = input_quantizer elif l.__class__.__name__ in [ "QDense", "QConv2D", "QConv1D", "QDepthwiseConv2D" ]: output_cache[l.output.experimental_ref()] = l.activation else: if isinstance(l.input, list): # right now, we just get the first one - we assume this is the leading # one. all_q = [ output_cache.get(l.input[i].experimental_ref()) for i in range(len(l.input)) ] q = all_q[0] else: q = output_cache.get(l.input.experimental_ref(), None) output_cache[l.output.experimental_ref()] = q if q is None: raise ValueError("Unknown operation in {}".format(l.name)) return output_cache
988
def feature_scatterplot(fset_path, features_to_plot): """Create scatter plot of feature set. Parameters ---------- fset_path : str Path to feature set to be plotted. features_to_plot : list of str List of feature names to be plotted. Returns ------- (str, str) Returns (docs_json, render_items) json for the desired plot. """ fset, data = featurize.load_featureset(fset_path) fset = fset[features_to_plot] colors = cycle(palette[5]) plots = np.array([[figure(width=300, height=200) for j in range(len(features_to_plot))] for i in range(len(features_to_plot))]) for (j, i), p in np.ndenumerate(plots): if (j == i == 0): p.title.text = "Scatterplot matrix" p.circle(fset.values[:,i], fset.values[:,j], color=next(colors)) p.xaxis.minor_tick_line_color = None p.yaxis.minor_tick_line_color = None p.ygrid[0].ticker.desired_num_ticks = 2 p.xgrid[0].ticker.desired_num_ticks = 4 p.outline_line_color = None p.axis.visible = None plot = gridplot(plots.tolist(), ncol=len(features_to_plot), mergetools=True, responsive=True, title="Test") # Convert plot to json objects necessary for rendering with bokeh on the # frontend render_items = [{'docid': plot._id, 'elementid': make_id()}] doc = Document() doc.add_root(plot) docs_json_inner = doc.to_json() docs_json = {render_items[0]['docid']: docs_json_inner} docs_json = serialize_json(docs_json) render_items = serialize_json(render_items) return docs_json, render_items
989
def model1(v, va, vb, ka, Wa, Wb, pa): """ A translation of the equation from Sandström's Dynamic NMR Spectroscopy, p. 14, for the uncoupled 2-site exchange simulation. v: frequency whose amplitude is to be calculated va, vb: frequencies of a and b singlets (slow exchange limit) (va > vb) ka: rate constant for state A--> state B pa: fraction of population in state Adv: frequency difference (va - vb) between a and b singlets (slow exchange) T2a, T2b: T2 (transverse relaxation time) for each nuclei returns: amplitude at frequency v """ pi = np.pi pb = 1 - pa tau = pb / ka dv = va - vb Dv = (va + vb) / 2 - v T2a = 1 / (pi * Wa) T2b = 1 / (pi * Wb) P = tau * ((1 / (T2a * T2b)) - 4 * (pi ** 2) * (Dv ** 2) + (pi ** 2) * (dv ** 2)) P += ((pa / T2a) + (pb / T2b)) Q = tau * (2 * pi * Dv - pi * dv * (pa - pb)) R = 2 * pi * Dv * (1 + tau * ((1 / T2a) + (1 / T2b))) R += pi * dv * tau * ((1 / T2b) - (1 / T2a)) + pi * dv * (pa - pb) I = (P * (1 + tau * ((pb / T2a) + (pa / T2b))) + Q * R) / (P ** 2 + R ** 2) return I
990
def calculate_laminar_flame_speed( initial_temperature, initial_pressure, species_dict, mechanism, phase_specification="", unit_registry=_U ): """ This function uses cantera to calculate the laminar flame speed of a given gas mixture. Parameters ---------- initial_temperature : pint.Quantity Initial temperature of gas mixture initial_pressure : pint.Quantity Initial pressure of gas mixture species_dict : dict Dictionary with species names (all caps) as keys and moles as values mechanism : str String of mechanism to use (e.g. "gri30.cti") phase_specification : str Phase specification for cantera solution unit_registry : pint.UnitRegistry Unit registry for managing units to prevent conflicts with parent unit registry Returns ------- pint.Quantity Laminar flame speed in m/s as a pint quantity """ gas = ct.Solution(mechanism, phase_specification) quant = unit_registry.Quantity tools.check_pint_quantity( initial_pressure, "pressure", ensure_positive=True ) tools.check_pint_quantity( initial_temperature, "temperature", ensure_positive=True ) # ensure species dict isn't empty if len(species_dict) == 0: raise ValueError("Empty species dictionary") # ensure all species are in the mechanism file bad_species = "" good_species = gas.species_names for species in species_dict: if species not in good_species: bad_species += species + "\n" if len(bad_species) > 0: raise ValueError("Species not in mechanism:\n" + bad_species) gas.TPX = ( initial_temperature.to("K").magnitude, initial_pressure.to("Pa").magnitude, species_dict ) # find laminar flame speed flame = ct.FreeFlame(gas) flame.set_refine_criteria(ratio=3, slope=0.1, curve=0.1) flame.solve(loglevel=0) return quant(flame.u[0], "m/s")
991
def random_seed(seed): """Execute code inside this with-block using the specified random seed. Sets the seed for random, numpy.random and torch (CPU). WARNING: torch GPU seeds are NOT set! Does not affect the state of random number generators outside this block. Not thread-safe. Args: seed (int) """ state = RandomState() random.seed(seed) # alter state np.random.seed(seed) torch.manual_seed(seed) yield state.set_global()
992
def extrapolate_coverage(lines_w_status): """ Given the following input: >>> lines_w_status = [ (1, True), (4, True), (7, False), (9, False), ] Return expanded lines with their extrapolated line status. >>> extrapolate_coverage(lines_w_status) == [ (1, True), (2, True), (3, True), (4, True), (5, None), (6, None), (7, False), (8, False), (9, False), ] """ lines = [] prev_lineno = 0 prev_status = True for lineno, status in lines_w_status: while (lineno - prev_lineno) > 1: prev_lineno += 1 if prev_status is status: lines.append((prev_lineno, status)) else: lines.append((prev_lineno, None)) lines.append((lineno, status)) prev_lineno = lineno prev_status = status return lines
993
def get_image_features(filename): """ Param: Path to image Returns: Desired features of image in the form of a dictionary (key = feature_name, value = feature_value) """ array, metadata = nrrd.read(filename) return {k: f(array, metadata, filename) for k, f in image_feature_functions.items()}
994
def get_DCT_transform_matrix(N): """ Return the normalised N-by-N discrete cosine transform (DCT) matrix. Applying the returned transform matrix to a vector x: D.dot(x) yields the DCT of x. Applying the returned transform matrix to a matrix A: D.dot(A) applies the DCT to the columns of A. Taking D.dot(A.dot(D.T)) applies the DCT to both columns and rows, i.e. a full 2D separable DCT transform. The inverse transform (the 1D IDCT) is D.T. Parameters ---------- N : int The size of the DCT transform matrix to return. Returns ------- D : ndarray The DCT transform matrix. Notes ----- The returned DCT matrix normalised such that is consitutes a orthonormal transform as given by equations (2.119) and (2.120) in [1]_. References ---------- .. [1] A.N. Akansu, R.A. Haddad, and P.R. Haddad, *Multiresolution Signal Decomposition: Transforms, Subbands, and Wavelets*, Academic Press, 2000. Examples -------- For example, get a 5-by-5 DCT matrix >>> import numpy as np >>> from magni.imaging.dictionaries import get_DCT_transform_matrix >>> D = get_DCT_transform_matrix(5) >>> np.round(np.abs(D), 4) array([[ 0.4472, 0.4472, 0.4472, 0.4472, 0.4472], [ 0.6015, 0.3717, 0. , 0.3717, 0.6015], [ 0.5117, 0.1954, 0.6325, 0.1954, 0.5117], [ 0.3717, 0.6015, 0. , 0.6015, 0.3717], [ 0.1954, 0.5117, 0.6325, 0.5117, 0.1954]]) and apply the 2D DCT transform to a dummy image >>> np.random.seed(6021) >>> img = np.random.randn(5, 5) >>> img_dct = D.dot(img.dot(D.T)) >>> np.round(img_dct, 4) array([[-0.5247, -0.0225, 0.9098, 0.369 , -0.477 ], [ 1.7309, -0.4142, 1.9455, -0.6726, -1.3676], [ 0.6987, 0.5355, 0.7213, -0.8498, -0.1023], [ 0.0078, -0.0545, 0.3649, -1.4694, 1.732 ], [-1.5864, 0.156 , 0.8932, -0.8091, 0.5056]]) """ @_decorate_validation def validate_input(): _numeric('N', 'integer', range_='[1;inf)') validate_input() nn, rr = np.meshgrid(*map(np.arange, (N, N))) D = np.cos((2 * nn + 1) * rr * np.pi / (2 * N)) D[0, :] /= np.sqrt(N) D[1:, :] /= np.sqrt(N/2) return D
995
def get_reduce_nodes(name, nodes): """ Get nodes that combine the reduction variable with a sentinel variable. Recognizes the first node that combines the reduction variable with another variable. """ reduce_nodes = None for i, stmt in enumerate(nodes): lhs = stmt.target.name rhs = stmt.value if isinstance(stmt.value, ir.Expr): in_vars = set(v.name for v in stmt.value.list_vars()) if name in in_vars: args = get_expr_args(stmt.value) args.remove(name) assert len(args) == 1 replace_vars_inner(stmt.value, {args[0]: ir.Var(stmt.target.scope, name+"#init", stmt.target.loc)}) reduce_nodes = nodes[i:] break; assert reduce_nodes, "Invalid reduction format" return reduce_nodes
996
def extract_mesh_descriptor_id(descriptor_id_str: str) -> int: """ Converts descriptor ID strings (e.g. 'D000016') into a number ID (e.g. 16). """ if len(descriptor_id_str) == 0: raise Exception("Empty descriptor ID") if descriptor_id_str[0] != "D": raise Exception("Expected descriptor ID to start with 'D', {}".format(descriptor_id_str)) return int(descriptor_id_str[1:])
997
def process_sources(sources_list): """ This function processes the sources result :param sources_list: A list of dictionaries :return: A list of source objects """ sources_results = [] for sources_item in sources_list: id = sources_item.get('id') name = sources_item.get('name') description = sources_item.get('description') url = sources_item.get('url') category = sources_item.get('category') language = sources_item.get('language') country = sources_item.get('country') print(sources_item) sources_object = Sources(id, name, description, url) sources_results.append(sources_object) return sources_results
998
def encrypt(message_text, key): """Method Defined for ENCRYPTION of a Simple \ String message into a Cipher Text Using \ 2x2 Hill Cipher Technique \nPARAMETERS\n message_text: string to be encrypted key: string key for encryption with length <= 4 \nRETURNS\n cipher_text: encrypted Message string """ # for 2x2 Hill Cipher length of key must be <= 4 # print("Warning: All Spaces with be lost!") cipher_text = "" key_matrix = None if len(key) <= 4: key_matrix = string_to_Matrix_Z26(key, 2, 2) else: print("Key Length must be <= 4 in 2x2 Hill Cipher") return pairs = math.ceil((len(message_text)/2)) matrix = string_to_Matrix_Z26(message_text, 2, pairs) key_inverse = matrix_inverse_Z26(key_matrix) if type(key_inverse) == type(None): print("NOTE: The provided Key is NOT Invertible,") print("To avoid failure while decryption,") print("Try again with an invertible Key") return None for i in range(pairs): result_char = (key_matrix*matrix[:, i]) % 26 cipher_text += ENGLISH_ALPHABETS[ result_char[0, 0] ] cipher_text += ENGLISH_ALPHABETS[ result_char[1, 0] ] return cipher_text
999