content
stringlengths
22
815k
id
int64
0
4.91M
def lean_left(context: TreeContext, operators: AbstractSet[str]): """ Turns a right leaning tree into a left leaning tree: (op1 a (op2 b c)) -> (op2 (op1 a b) c) If a left-associative operator is parsed with a right-recursive parser, `lean_left` can be used to rearrange the tree structure so that it properly reflects the order of association. ATTENTION: This transformation function moves forward recursively, so grouping nodes must not be eliminated during traversal! This must be done in a second pass. """ node = context[-1] assert node._children and len(node._children) == 2 assert node.tag_name in operators right = node._children[1] if right.tag_name in operators: assert right._children and len(right._children) == 2 a, b, c = node._children[0], right._children[0], right._children[1] op1 = node.tag_name op2 = right.tag_name right.result = (a, b) right.tag_name = op1 node.result = (right, c) node.tag_name = op2 swap_attributes(node, right) # continue recursively on the left branch lean_left([right], operators)
1,400
def _pattern_data_from_form(form, point_set): """Handles the form in which the user determines which algorithms to run with the uploaded file, and computes the algorithm results. Args: form: The form data point_set: Point set representation of the uploaded file. Returns: Musical pattern discovery results of the algorithms chosen by the user. """ pattern_data = [] # SIATEC min_pattern_length = form.getlist('siatec-min-pattern-length') min_pattern_length = [int(x) for x in min_pattern_length] for i in range(len(min_pattern_length)): pattern_data.append( siatec.compute( point_set=point_set, min_pattern_length=min_pattern_length[i] ) ) # timewarp-invariant algorithm window = form.getlist('timewarp-window') window = [int(x) for x in window] min_pattern_length = form.getlist('timewarp-min-pattern-length') min_pattern_length = [int(x) for x in min_pattern_length] for i in range(len(window)): pattern_data.append( time_warp_invariant.compute( point_set=point_set, window=window[i], min_pattern_length=min_pattern_length[i] ) ) return pattern_data
1,401
def run_proc(*args, **kwargs): """Runs a process, dumping output if it fails.""" sys.stdout.flush() proc = subprocess.Popen( stdout=subprocess.PIPE, stderr=subprocess.PIPE, *args, **kwargs ) stdout, stderr = proc.communicate() if proc.returncode != 0: print("\n%s: exited with %d" % (' '.join(args[0]), proc.returncode)) print("\nstdout:\n%s" % stdout.decode('utf-8')) print("\nstderr:\n%s" % stderr.decode('utf-8')) sys.exit(-1)
1,402
def test_retries_jitter_single_jrc_down(mock_random): """Jitter with single float input and jrc < 1.""" j = pypyr.retries.jitter(sleep=100, jrc=0.1) assert j(0) == 999 assert j(1) == 999 assert j(2) == 999 assert mock_random.mock_calls == [call(10, 100), call(10, 100), call(10, 100)]
1,403
def group(help_doc): """Creates group options instance in module options instnace""" return __options.group(help_doc)
1,404
def test_bfgs6(dtype, func_x0): """ Feature: ALL TO ALL Description: test cases for bfgs in PYNATIVE mode Expectation: the result match scipy """ func, x0 = func_x0 x0 = x0.astype(dtype) x0_tensor = Tensor(x0) ms_res = msp.optimize.minimize(func(mnp), x0_tensor, method='BFGS', options=dict(maxiter=None, gtol=1e-6)) scipy_res = osp.optimize.minimize(func(onp), x0, method='BFGS') match_array(ms_res.x.asnumpy(), scipy_res.x, error=5, err_msg=str(ms_res))
1,405
def convert_pdf_to_txt(path, pageid=None): """ This function scrambles the text. There may be values for LAParams that fix it but that seems difficult so see getMonters instead. This function is based on convert_pdf_to_txt(path) from RattleyCooper's Oct 21 '14 at 19:47 answer edited by Trenton McKinney Oct 4 '19 at 4:10 on <https://stackoverflow.com/a/26495057>. Keyword arguments: pageid -- Only process this page id. """ rsrcmgr = PDFResourceManager() retstr = StringIO() codec = 'utf-8' laparams = LAParams() try: device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams) except TypeError as ex: if ("codec" in str(ex)) and ("unexpected keyword" in str(ex)): device = TextConverter(rsrcmgr, retstr, laparams=laparams) fp = open(path, 'rb') interpreter = PDFPageInterpreter(rsrcmgr, device) password = "" maxpages = 0 caching = True pagenos = set() for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password,caching=caching, check_extractable=True): # print("page: {}".format(dir(page))) if (pageid is None) or (pageid == page.pageid): print("page.pageid: {}".format(page.pageid)) interpreter.process_page(page) if pageid is not None: break text = retstr.getvalue() print(text) fp.close() device.close() retstr.close() return text
1,406
def get_available_port(preferred_port: Optional[int] = None) -> int: """Finds an available port for use in webviz on localhost. If a reload process, it will reuse the same port as found in the parent process by using an inherited environment variable. If preferred_port is given, ports in the range [preferred_port, preferred_port + 20) will be tried first, before an OS provided random port is used as fallback. """ def is_available(port: int) -> bool: with socket.socket() as sock: try: sock.bind(("localhost", port)) return True except OSError: return False if os.environ.get("WEBVIZ_PORT") is None: port = None if preferred_port is not None: for port_to_test in range(preferred_port, preferred_port + 20): if is_available(port_to_test): port = port_to_test break if port is None: with socket.socket() as sock: sock.bind(("localhost", 0)) port = sock.getsockname()[1] os.environ["WEBVIZ_PORT"] = str(port) return port return int(os.environ.get("WEBVIZ_PORT"))
1,407
def safe_str(val, default=None): """Safely cast value to str, Optional: Pass default value. Returned if casting fails. Args: val: default: Returns: """ if val is None: return default if default is not None else '' return safe_cast(val, str, default)
1,408
def mod2(): """ Create a simple model for incorporation tests """ class mod2(mod1): def __init__(self, name, description): super().__init__(name, "Model 1") self.a = self.createVariable("a",dimless,"a") self.b = self.createVariable("b",dimless,"b") self.c = self.createParameter("c",dimless,"c") self.c.setValue(2.) eq21 = self.a() + self.b() + self.c() eq22 = self.b() - self.f() self.createEquation("eq21", "Generic equation 2.1", eq21) self.createEquation("eq22", "Generic equation 2.2", eq22) mod = mod2("M2", "Model 2") mod() return mod
1,409
def plot_numu_hists(events, ev, save_path, energy=r"E (GeV)"): """Plot the eff and pur plot vs neutrino energy. Args: events (dict): events output ev (pd.DataFrame): events DataFrame save_path (str): path to save plot to """ fig, axs = plt.subplots(1, 1, figsize=(12, 8)) bins = np.arange(0.25, 10.25, 0.5) styles = ["solid", "dashed", "dotted", "dashdot"] for i in range(len(events)): axs.errorbar( bins, events[i]["fom_effs"][2][0][0] * 100, yerr=events[i]["fom_effs"][2][0][1] * 100, color="tab:green", linestyle=styles[i], linewidth=2, ) axs.errorbar( bins, events[i]["fom_effs"][2][1][0] * 100, yerr=events[i]["fom_effs"][2][1][1] * 100, color="tab:olive", linestyle=styles[i], linewidth=2, ) axs.errorbar( bins, events[i]["fom_effs"][2][2][0] * 100, yerr=events[i]["fom_effs"][2][2][1] * 100, color="tab:blue", linestyle=styles[i], linewidth=2, ) axs.errorbar( bins, events[i]["fom_effs"][2][3][0] * 100, yerr=events[i]["fom_effs"][2][3][1] * 100, color="tab:red", linestyle=styles[i], linewidth=2, ) axs.errorbar( bins, events[i]["fom_purs"][2][0] * 100, yerr=events[i]["fom_purs"][2][1] * 100, color="black", linestyle=styles[i], linewidth=2, ) axs.hist( ev[ev["t_comb_cat"] == 1]["t_nu_energy"] / 1000, range=(0, 10), bins=20, color="tab:blue", density=False, alpha=0.3, weights=ev[ev["t_comb_cat"] == 1]["w"] * 0.15, ) axs.set_xlabel(energy, fontsize=30) axs.set_ylabel("Metric (\%)", fontsize=30) axs.set_ylim([0, 100]) axs.set_xlim([0.5, 10]) axs.grid() nuel = Line2D( [0], [0], color="tab:olive", linewidth=2, linestyle="solid", label=r"Beam CC $\nu_{e}$ efficiency", ) osc_nuel = Line2D( [0], [0], color="tab:green", linewidth=2, linestyle="solid", label=r"Appeared CC $\nu_{e}$ efficiency", ) numu = Line2D( [0], [0], color="tab:blue", linewidth=2, linestyle="solid", label=r"Survived CC $\nu_{\mu}$ efficiency", ) nc = Line2D( [0], [0], color="tab:red", linewidth=2, linestyle="solid", label=r"NC efficiency", ) purity = Line2D( [0], [0], color="black", linewidth=2, linestyle="solid", label=r"Survived CC $\nu_{\mu}$ purity", ) axs.legend( handles=[osc_nuel, numu, nuel, nc, purity], loc="center right", fontsize=24 ) save(save_path)
1,410
def zero_order(freq,theta,lcandidat,NumTopic): """ Calculate the Zero-Order Relevance Parameters: ---------- freq : Array containing the frequency of occurrences of each word in the whole corpus theta : Array containing the frequency of occurrences of each word in each topic lcandidat: Array containing each label candidate NumTopic : The number of the topic Returns: ------- topCandidate : Array containing the name of the top 10 score candidate for a given topic """ #W matrice qui contient le score de chaque mot pour chaque topic W=np.log(theta/freq) # score des tous les candidats pour le topic NumTopic score=np.array([]) for indice in range (len(lCandidat)): candidat=lCandidat[indice].split(" ") i=id2word.doc2idx(candidat) # supprime les -1 (qui signifie pas trouvé) i[:] = [v for v in i if v != -1] score=np.append(score,np.sum(W[k,i])) #topValue, topCandidate = top10Score(score,lCandidat) dicti=top10ScoreCandidat(score,lcandidat) return dicti
1,411
def compute_check_letter(dni_number: str) -> str: """ Given a DNI number, obtain the correct check letter. :param dni_number: a valid dni number. :return: the check letter for the number as an uppercase, single character string. """ return UPPERCASE_CHECK_LETTERS[int(dni_number) % 23]
1,412
def print_properties_as_table( cmd_ctx, properties, table_format, show_list=None): """ Print properties in tabular output format. The order of rows is ascending by property name. The spinner is stopped just before printing. Parameters: cmd_ctx (CmdContext): Context object of the command. properties (dict): The properties. table_format (string): Supported table formats are: - "table" -> same like "psql" - "plain" - "simple" - "psql" - "rst" - "mediawiki" - "html" - "latex" show_list (iterable of string): The property names to be shown. If `None`, all properties are shown. """ headers = ['Field Name', 'Value'] out_str = dict_as_table(properties, headers, table_format, show_list) cmd_ctx.spinner.stop() click.echo(out_str)
1,413
def p_boolean_expression(p): """boolean_expression : expression """
1,414
def uptime_check(delay=1): """Performs uptime checks to two URLs Args: delay: The number of seconds delay between two uptime checks, optional, defaults to 1 second. Returns: A dictionary, where the keys are the URL checked, the values are the corresponding status (1=UP, 0=DOWN) """ urls = ["https://httpstat.us/503", "https://httpstat.us/200"] url_status = {} for url in urls: url_status[url] = check_url(url)[0] time.sleep(delay) return url_status
1,415
def entropy_column(input): """returns column entropy of entropy matrix. input is motifs""" nucleotides = {'A': 0, 'T': 0, 'C': 0, 'G': 0} for item in input: nucleotides[item] = nucleotides[item]+1 for key in nucleotides: temp_res = nucleotides[key]/len(input) if temp_res > 0: nucleotides[key] = temp_res * abs(log2(temp_res)) else: continue sum = 0 for key in nucleotides: sum = sum + nucleotides[key] # print(nucleotides) return sum
1,416
def assert_array_almost_equal_nulp(x: numpy.float64, y: numpy.float64): """ usage.scipy: 1 """ ...
1,417
def truncate(path, length, **kwargs): """ Truncate the file corresponding to path, so that it is at most length bytes in size. :param path: The path for the file to truncate. :param length: The length in bytes to truncate the file to. :param kwargs: Common SMB Session arguments for smbclient. """ with open_file(path, mode='ab', **kwargs) as fd: fd.truncate(length)
1,418
def get_partner_from_token(access_token): """ Walk the token->client->user->partner chain so we can connect the the `LinkageEntity` row to a `PartnerEntity` """ tok = OauthAccessTokenEntity.query.filter_by( access_token=access_token).one_or_none() log.debug("get_partner_from_token found: {}".format(tok)) return tok.client.user.partner
1,419
def sumVoteCount(instance): """ Returns the sum of the vote count of the instance. :param instance: The instance. :type instance: preflibtools.instance.preflibinstance.PreflibInstance :return: The sum of vote count of the instance. :rtype: int """ return instance.sumVoteCount
1,420
def left_d_threshold_sequence(n,m): """ Create a skewed threshold graph with a given number of vertices (n) and a given number of edges (m). The routine returns an unlabeled creation sequence for the threshold graph. FIXME: describe algorithm """ cs=['d']+['i']*(n-1) # create sequence with n insolated nodes # m <n : not enough edges, make disconnected if m < n: cs[m]='d' return cs # too many edges if m > n*(n-1)/2: raise ValueError("Too many edges for this many nodes.") # Connected case when M>N-1 cs[n-1]='d' sum=n-1 ind=1 while sum<m: cs[ind]='d' sum += ind ind += 1 if sum>m: # be sure not to change the first vertex cs[sum-m]='i' return cs
1,421
def write_json(obj, filename): """ Write a json file, if the output directory exists. """ if not os.path.exists(os.path.dirname(filename)): return return write_file(sjson.dump(obj), filename)
1,422
def get_user_solutions(username): """Returns all solutions submitted by the specified user. Args: username: The username. Returns: A solution list. Raises: KeyError: If the specified user is not found. """ user = _db.users.find_one({'_id': username}) if not user: raise KeyError('User not found: %s' % username) solutions = _db.solutions.find( { 'owner': user['_id'] }, projection=('resemblance_int', 'solution_size', 'problem_id', '_id')) # manually select the best (and oldest) solution table = {} for solution in solutions: problem_id = solution['problem_id'] if problem_id in table: old_solution = table[problem_id] if solution['resemblance_int'] > old_solution['resemblance_int'] or \ (solution['resemblance_int'] == old_solution['resemblance_int'] and solution['_id'] < old_solution['_id']): table[problem_id] = solution else: table[problem_id] = solution # sort by problem_id solutions = table.values() solutions.sort(key=lambda solution: solution['problem_id']) return solutions
1,423
def get_terms_kullback_leibler(output_dir): """Returns all zero-order TERMs propensities of structure""" if output_dir[-1] != '/': output_dir += '/' frag_path = output_dir + 'fragments/' designscore_path = output_dir + 'designscore/' terms_propensities = dict() terms = [f.split('.')[0] for f in os.listdir(frag_path)] for term in terms: rns = get_resnums(frag_path + term + '.pdb') rn = int(term.split('_')[-1][1:]) seq_dict = zero_order_freq(rn, rns, designscore_path + 't1k_' + term + '.seq') si_dict = calc_kullback_leibler(seq_dict) terms_propensities[rn] = si_dict return terms_propensities
1,424
def add_standard_attention_hparams(hparams): """Adds the hparams used by get_standadized_layers.""" # All hyperparameters ending in "dropout" are automatically set to 0.0 # when not in training mode. # hparams used and which should have been defined outside (in # common_hparams): # Global flags # hparams.mode # hparams.hidden_size # Pre-post processing flags # hparams.layer_preprocess_sequence # hparams.layer_postprocess_sequence # hparams.layer_prepostprocess_dropout # hparams.norm_type # hparams.norm_epsilon # Mixture-of-Expert flags # hparams.moe_hidden_sizes # hparams.moe_num_experts # hparams.moe_k # hparams.moe_loss_coef # Attention layers flags hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) hparams.add_hparam("attention_dropout", 0.0) # Attention: Local hparams.add_hparam("attention_loc_block_length", 256) # Attention: Local (unmasked only): How much to look left. hparams.add_hparam("attention_loc_block_width", 128) # Attention: Memory-compressed hparams.add_hparam("attention_red_factor", 3) hparams.add_hparam("attention_red_type", "conv") hparams.add_hparam("attention_red_nonlinearity", "none") # Fully connected layers flags # To be more consistent, should use filter_size to also control the MOE # size if moe_hidden_sizes not set. hparams.add_hparam("filter_size", 2048) hparams.add_hparam("relu_dropout", 0.0) return hparams
1,425
def make_conv2d_layer_class(strides, padding): """Creates a `Conv2DLayer` class. Args: strides: A 2-tuple of positive integers. Strides for the spatial dimensions. padding: A Python string. Can be either 'SAME' or 'VALID'. Returns: conv2d_layer_class: A new `Conv2DLayer` class that closes over the args to this function. """ # TODO(siege): We do this rather than storing parameters explicitly inside the # class because we want to avoid having to use a CompositeTensor-like # functionality, as that requires annotating a large porting of TFP with # expand_composites=True. This isn't worth doing for this experimental # library. class Conv2DLayer(collections.namedtuple('Conv2DLayer', [ 'kernel', ])): """2-dimensional convolution (in the standard deep learning sense) layer. See `tf.nn.conv` for the mathematical details of what this does. Attributes: kernel: A floating point Tensor with shape `[width, height, in_channels, out_channels]`. """ __slots__ = () @property def strides(self): """Strides for the spatial dimensions.""" return strides @property def padding(self): """Padding.""" return padding def __call__(self, x): """Applies the layer to an input. Args: x: A floating point Tensor with shape `[batch, height, width, in_channels]`. Returns: y: A floating point Tensor with shape `[batch, height', width', out_channels]`. The output `width'` and `height'` depend on the value of `padding`. """ @functools.partial( vectorization_util.make_rank_polymorphic, core_ndims=(4, 4)) # In an ideal world we'd broadcast the kernel shape with the batch shape # of the input, but the hardware does not like that. def do_conv(kernel, x): return tf.nn.conv2d( x, filters=kernel, strides=(1,) + self.strides + (1,), padding=self.padding, ) return do_conv(self.kernel, x) return Conv2DLayer
1,426
def get_df1_df2(X: np.array, y: np.array) -> [DataFrame, DataFrame]: """ Get DataFrames for points with labels 1 and -1 :param X: :param y: :return: """ x1 = np.array([X[:, i] for i in range(y.shape[0]) if y[i] == 1]).T x2 = np.array([X[:, i] for i in range(y.shape[0]) if y[i] == -1]).T df1 = DataFrame({'x': list(), 'y': list()}) df2 = DataFrame({'x': list(), 'y': list()}) if len(x1 > 0): df1 = DataFrame({'x': x1[0], 'y': x1[1]}) if len(x2 > 0): df2 = DataFrame({'x': x2[0], 'y': x2[1]}) return [df1, df2]
1,427
def olog_savefig(**kwargs): """Save a pyplot figure and place it in tho Olog The **kwargs are all passed onto the :func savefig: function and then onto the :func olog" function :returns: None """ fig = save_pyplot_figure(**kwargs) if 'attachments' in kwargs: if isinstance(kwargs['attachments'], list): kwargs['attachments'].append(fig) else: kwargs['attachments'] = [kwargs['attatchments'], fig] else: kwargs['attachments'] = fig olog(**kwargs) return
1,428
def directory_iterator(source, target): """ Iterates through a directory and symlinks all containing files in a target director with the same structure :param source: Directory to iterate through :param target: Directory to symlink files to """ for file in os.listdir(source): filename = os.fsdecode(file) path_source = source + bytes("/", 'utf-8') + file path_target = target + bytes("/", 'utf-8') + file if os.path.isdir(path_source): directory_maker(os.fsdecode(path_target)) directory_iterator(os.fsencode(path_source), os.fsencode(path_target)) elif os.path.isfile(path_source): try: os.symlink(path_source, path_target) except: print("Symlink Error") elif os.access(path_source, os.X_OK): try: os.symlink(path_source, path_target) except: print("Symlink Error") elif os.path.islink(path_source): continue else: print("Special file ", path_source)
1,429
def dnsip6encode(data): """ encodes the data as a single IPv6 address :param data: data to encode :return: encoded form """ if len(data) != 16: print_error("dnsip6encode: data is more or less than 16 bytes, cannot encode") return None res = b'' reslen = 0 for i in range(len(data)): res += base64.b16encode(data[i:i+1]) reslen += 1 if reslen % 2 == 0: res += b':' return res[:-1]
1,430
def gcm_send_bulk_message(registration_ids, data, encoding='utf-8', **kwargs): """ Standalone method to send bulk gcm notifications """ messenger = GCMMessenger(registration_ids, data, encoding=encoding, **kwargs) return messenger.send_bulk()
1,431
def apply_net_video(net, arr, argmax_output=True, full_faces='auto'): """Apply a preloaded network to input array coming from a video of one eye. Note that there is (intentionally) no function that both loads the net and applies it; loading the net should ideally only be done once no matter how many times it is run on arrays. Arguments: net: Network loaded by load_net arr: numpy array of shape (h, w, 3) or (batch_size, h, w, 3) with colors in RGB order generally (h, w) = (4000, 6000) for full faces and (4000, 3000) for half-faces although inputs are all resized to (256, 256) argmax_output: if True, apply argmax to output values to get categorical mask full_faces: whether inputs are to be treated as full faces; note that the networks take half-faces By default, base decision on input size Returns: Segmentation mask and potentially regression output. Regression output present if a regression-generating network was used Segmentation mask a numpy array of shape (batch_size, h, w) if argmax_output else (batch_size, h, w, num_classes) Regression output a numpy array of shape (batch_size, 4) for half-faces or (batch_size, 8) for full faces; one iris's entry is in the format (x,y,r,p) with p the predicted probability of iris presence; for full faces, each entry is (*right_iris, *left_iris)""" if len(arr.shape)==3: arr = arr[np.newaxis] tens = torch.tensor(arr.transpose(0,3,1,2), dtype=torch.float) orig_tens_size = tens.size()[2:] input_tensor = F.interpolate(tens, size=(256,256), mode='bilinear', align_corners=False) input_tensor = input_tensor.cuda() with torch.no_grad(): output = net(input_tensor) if 'reg' in net.outtype: seg, reg = output reg = reg.detach().cpu().numpy() reg = np.concatenate([reg[:,:3], sigmoid(reg[:,3:])], 1) else: seg = output segmentation = seg.detach().cpu() segmentation = F.interpolate(segmentation, size=orig_tens_size, mode='bilinear', align_corners=False) seg_arr = segmentation.numpy().transpose(0,2,3,1) seg_arr = cleanupseg(seg_arr) if argmax_output: seg_arr = np.argmax(seg_arr, 3) if 'reg' in net.outtype: return seg_arr, reg else: return seg_arr
1,432
def Storeligandnames(csv_file): """It identifies the names of the ligands in the csv file PARAMETERS ---------- csv_file : filename of the csv file with the ligands RETURNS ------- lig_list : list of ligand names (list of strings) """ Lig = open(csv_file,"rt") lig_aux = [] for ligand in Lig: lig_aux.append(ligand.replace(" ","_").replace("\n","").lower()) return lig_aux
1,433
def chunk_to_rose(station): """ Builds data suitable for Plotly's wind roses from a subset of data. Given a subset of data, group by direction and speed. Return accumulator of whatever the results of the incoming chunk are. """ # bin into three different petal count categories: 8pt, 16pt, and 26pt bin_list = [ list(range(5, 356, 10)), list(np.arange(11.25, 349, 22.5)), list(np.arange(22.5, 338, 45)), ] bname_list = [ list(range(1, 36)), list(np.arange(2.25, 34, 2.25)), list(np.arange(4.5, 32, 4.5)), ] # Accumulator dataframe. proc_cols = [ "sid", "direction_class", "speed_range", "count", "frequency", "decade", "pcount", ] accumulator = pd.DataFrame(columns=proc_cols) for bins, bin_names, pcount in zip(bin_list, bname_list, [36, 16, 8]): # Assign directions to bins. # We'll use the exceptional 'NaN' class to represent # 355º - 5º, which would otherwise be annoying. # Assign 0 to that direction class. ds = pd.cut(station["wd"], bins, labels=bin_names) station = station.assign(direction_class=ds.cat.add_categories("0").fillna("0")) # First compute yearly data. # For each direction class... directions = station.groupby(["direction_class"]) for direction, d_group in directions: # For each wind speed range bucket... for bucket, bucket_info in speed_ranges.items(): d = d_group.loc[ ( station["ws"].between( bucket_info["range"][0], bucket_info["range"][1], inclusive=True, ) == True ) ] count = len(d.index) full_count = len(station.index) frequency = 0 if full_count > 0: frequency = round(((count / full_count) * 100), 2) accumulator = accumulator.append( { "sid": station["sid"].values[0], "direction_class": direction, "speed_range": bucket, "count": count, "frequency": frequency, "decade": station["decade"].iloc[0], "month": station["month"].iloc[0], "pcount": pcount, }, ignore_index=True, ) accumulator = accumulator.astype( {"direction_class": np.float32, "count": np.int32, "frequency": np.float32,} ) return accumulator
1,434
def loadKiosk(eventid): """Renders kiosk for specified event.""" event = Event.get_by_id(eventid) return render_template("/events/eventKiosk.html", event = event, eventid = eventid)
1,435
def bson_encode(data: ENCODE_TYPES) -> bytes: """ Encodes ``data`` to bytes. BSON records in list are delimited by '\u241E'. """ if data is None: return b"" elif isinstance(data, list): encoded = BSON_RECORD_DELIM.join(_bson_encode_single(r) for r in data) # We are going to put a delimiter right at the head as a signal that this is # a list of bson files, even if it is only one record encoded = BSON_RECORD_DELIM + encoded return encoded else: return _bson_encode_single(data)
1,436
def _GetTailStartingTimestamp(filters, offset=None): """Returns the starting timestamp to start streaming logs from. Args: filters: [str], existing filters, should not contain timestamp constraints. offset: int, how many entries ago we should pick the starting timestamp. If not provided, unix time zero will be returned. Returns: str, A timestamp that can be used as lower bound or None if no lower bound is necessary. """ if not offset: return None entries = list(logging_common.FetchLogs(log_filter=' AND '.join(filters), order_by='DESC', limit=offset)) if len(entries) < offset: return None return list(entries)[-1].timestamp
1,437
def main(data_config_file, app_config_file): """Print delta table schemas.""" logger.info('data config: ' + data_config_file) logger.info('app config: ' + app_config_file) # load configs ConfigSet(name=DATA_CFG, config_file=data_config_file) cfg = ConfigSet(name=APP_CFG, config_file=app_config_file) # get list of delta tables to load tables = cfg.get_value(DATA_CFG + '::$.load_delta') for table in tables: path = table['path'] spark = SparkConfig().spark_session(config_name=APP_CFG, app_name="grapb_db") df = spark.read.format('delta').load(path) df.printSchema() return 0
1,438
def porosity_to_n(porosity,GaN_n,air_n): """Convert a porosity to a refractive index. using the volume averaging theory""" porous_n = np.sqrt((1-porosity)*GaN_n*GaN_n + porosity*air_n*air_n) return porous_n
1,439
def _indexing_coordi(data, coordi_size, itm2idx): """ function: fashion item numbering """ print('indexing fashion coordi') vec = [] for d in range(len(data)): vec_crd = [] for itm in data[d]: ss = np.array([itm2idx[j][itm[j]] for j in range(coordi_size)]) vec_crd.append(ss) vec_crd = np.array(vec_crd, dtype='int32') vec.append(vec_crd) return np.array(vec, dtype='int32')
1,440
def plot_precentile(arr_sim, arr_ref, num_bins=1000, show_top_percentile=1.0): """ Plot top percentile (as specified by show_top_percentile) of best restults in arr_sim and compare against reference values in arr_ref. Args: ------- arr_sim: numpy array Array of similarity values to evaluate. arr_ref: numpy array Array of reference values to evaluate the quality of arr_sim. num_bins: int Number of bins to divide data (default = 1000) show_top_percentile Choose which part to plot. Will plot the top 'show_top_percentile' part of all similarity values given in arr_sim. Default = 1.0 """ start = int(arr_sim.shape[0] * show_top_percentile / 100) idx = np.argpartition(arr_sim, -start) starting_point = arr_sim[idx[-start]] if starting_point == 0: print("not enough datapoints != 0 above given top-precentile") # Remove all data below show_top_percentile low_as = np.where(arr_sim < starting_point)[0] length_selected = arr_sim.shape[0] - low_as.shape[0] # start+1 data = np.zeros((2, length_selected)) data[0, :] = np.delete(arr_sim, low_as) data[1, :] = np.delete(arr_ref, low_as) data = data[:, np.lexsort((data[1, :], data[0, :]))] ref_score_cum = [] for i in range(num_bins): low = int(i * length_selected / num_bins) # high = int((i+1) * length_selected/num_bins) ref_score_cum.append(np.mean(data[1, low:])) ref_score_cum = np.array(ref_score_cum) fig, ax = plt.subplots(figsize=(6, 6)) plt.plot( (show_top_percentile / num_bins * (1 + np.arange(num_bins)))[::-1], ref_score_cum, color='black') plt.xlabel("Top percentile of spectral similarity score g(s,s')") plt.ylabel("Mean molecular similarity (f(t,t') within that percentile)") return ref_score_cum
1,441
def set_xfce4_shortcut_avail(act_args, key, progs): """Set the shortcut associated with the given key to the first available program""" for cmdline in progs: # Split the command line to find the used program cmd_split = cmdline.split(None, 1) cmd_split[0] = find_prog_in_path(cmd_split[0]) if cmd_split[0] is not None: return set_xfce4_shortcut(act_args, key, ' '.join(cmd_split)) logger.warning("no program found for shortcut %s", key) return True
1,442
def accesscontrol(check_fn): """Decorator for access controlled callables. In the example scenario where access control is based solely on user names (user objects are `str`), the following is an example usage of this decorator:: @accesscontrol(lambda user: user == 'bob') def only_bob_can_call_this(): pass Class methods are decorated in the same way. :param check_fn: A callable, taking a user object argument, and returning a boolean value, indicating whether the user (user object argument) is allowed access to the decorated callable.""" if not callable(check_fn): raise TypeError(check_fn) def decorator(wrapped): @wraps(wrapped) def decorated(*args, **kwargs): if ACL.current_user is None: raise AccessDeniedError(decorated) if not ACL.managed_funcs[decorated](ACL.current_user): raise AccessDeniedError(decorated) return wrapped(*args, **kwargs) ACL.managed_funcs[decorated] = check_fn return decorated return decorator
1,443
def load_dataset(path): """ Load data from the file :param: path: path to the data :return: pd dataframes, train & test data """ if '.h5' in str(path): dataframe = pd.read_hdf(path) elif '.pkl' in str(path): dataframe = pd.read_pickle(path) else: print('Wrong file') sys.exit() # Make it multiindex dataframe['event'] = dataframe.index dataframe = dataframe.set_index(['sample_nr', 'event']) dataframe = dataframe.reset_index('event', drop=True) dataframe = dataframe.set_index(dataframe.groupby(level=0).cumcount().rename('event'), append=True) return dataframe
1,444
def positionPctProfit(): """ Position Percent Profit The percentage profit/loss of each position. Returns a dictionary with market symbol keys and percent values. :return: dictionary """ psnpct = dict() for position in portfolio: # Strings are returned from API; convert to floating point type current = float(position.current_price) entry = float(position.avg_entry_price) psnpct[position.symbol] = ((current - entry) / entry) * 100 return psnpct
1,445
def _parse_fields(vel_field, corr_vel_field): """ Parse and return the radar fields for dealiasing. """ if vel_field is None: vel_field = get_field_name('velocity') if corr_vel_field is None: corr_vel_field = get_field_name('corrected_velocity') return vel_field, corr_vel_field
1,446
def get_species_charge(species): """ Returns the species charge (only electrons so far """ if(species=="electron"): return qe else: raise ValueError(f'get_species_charge: Species "{species}" is not supported.')
1,447
def orjson_dumps( obj: Dict[str, Any], *, default: Callable[..., Any] = pydantic_encoder ) -> str: """Default `json_dumps` for TIA. Args: obj (BaseModel): The object to 'dump'. default (Callable[..., Any], optional): The default encoder. Defaults to pydantic_encoder. Returns: str: The json formatted string of the object. """ return orjson.dumps(obj, default=default).decode("utf-8")
1,448
def KNN_classification(dataset, filename): """ Classification of data with k-nearest neighbors, followed by plotting of ROC and PR curves. Parameters --- dataset: the input dataset, containing training and test split data, and the corresponding labels for binding- and non-binding sequences. filename: an identifier to distinguish different plots from each other. Returns --- stats: array containing classification accuracy, precision and recall """ # Import and one hot encode training/test set X_train, X_test, y_train, y_test = prepare_data(dataset) # Fitting classifier to the training set KNN_classifier = KNeighborsClassifier( n_neighbors=100, metric='minkowski', p=2) KNN_classifier.fit(X_train, y_train) # Predicting the test set results y_pred = KNN_classifier.predict(X_test) y_score = KNN_classifier.predict_proba(X_test) # ROC curve title = 'KNN ROC curve (Train={})'.format(filename) plot_ROC_curve( y_test, y_score[:, 1], plot_title=title, plot_dir='figures/KNN_ROC_Test_{}.png'.format(filename) ) # Precision-recall curve title = 'KNN Precision-Recall curve (Train={})'.format(filename) plot_PR_curve( y_test, y_score[:, 1], plot_title=title, plot_dir='figures/KNN_P-R_Test_{}.png'.format(filename) ) # Calculate statistics stats = calc_stat(y_test, y_pred) # Return statistics return stats
1,449
def projection_error(pts_3d: np.ndarray, camera_k: np.ndarray, pred_pose: np.ndarray, gt_pose: np.ndarray): """ Average distance of projections of object model vertices [px] :param pts_3d: model points, shape of (n, 3) :param camera_k: camera intrinsic matrix, shape of (3, 3) :param pred_pose: predicted rotation and translation, shape (3, 4), [R|t] :param gt_pose: ground truth rotation and translation, shape (3, 4), [R|t] :return: the returned error, unit is pixel """ # projection shape (n, 2) pred_projection: np.ndarray = project_3d_2d(pts_3d=pts_3d, camera_intrinsic=camera_k, transformation=pred_pose) gt_projection: np.ndarray = project_3d_2d(pts_3d=pts_3d, camera_intrinsic=camera_k, transformation=gt_pose) error = np.linalg.norm(gt_projection - pred_projection, axis=1).mean() return error
1,450
def test_uploads_listings(client, benchmark_id): """Tests uploading and retrieving a file.""" # -- Setup ---------------------------------------------------------------- # Create new user and submission. Then upload a single file. user_1, token_1 = create_user(client, '0000') headers = {HEADER_TOKEN: token_1} url = CREATE_SUBMISSION.format(config.API_PATH(), benchmark_id) r = client.post(url, json={labels.GROUP_NAME: 'S1'}, headers=headers) submission_id = r.json[labels.GROUP_ID] data = dict() with open(SMALL_FILE, 'rb') as f: data['file'] = (io.BytesIO(f.read()), 'names.txt') url = SUBMISSION_FILES.format(config.API_PATH(), submission_id) r = client.post( url, data=data, content_type='multipart/form-data', headers=headers ) assert r.status_code == 201 file_id = r.json[flbls.FILE_ID] # -- Listing uploaded files ----------------------------------------------- r = client.get(url, headers=headers) assert r.status_code == 200 doc = r.json assert len(doc[flbls.FILE_LIST]) == 1 # Delete the file. url = SUBMISSION_FILE.format(config.API_PATH(), submission_id, file_id) r = client.delete(url, headers=headers) assert r.status_code == 204 url = SUBMISSION_FILES.format(config.API_PATH(), submission_id) r = client.get(url, headers=headers) assert r.status_code == 200 doc = r.json assert len(doc[flbls.FILE_LIST]) == 0
1,451
def fetch_response(update, context): """Echo the user message.""" response = update.message.text if response.startswith("sleep-"): update.message.reply_text(response.split('-')[1]) measure_me(update, context, state='body') elif response.startswith("body-"): update.message.reply_text(response.split('-')[1]) measure_me(update, context, state='mind') elif response.startswith("mind-"): update.message.reply_text(response.split('-')[1]) measure_me(update, context, state='motivation') elif response.startswith("motivation-"): update.message.reply_text(response.split('-')[1]) measure_me(update, context, state='dream') elif response.startswith("dream-"): update.message.reply_text(response.split('-')[1]) measure_me(update, context, state='done') else: update.message.reply_text(update.message.text)
1,452
def ph_update(dump, line, ax, high_contrast): """ :param dump: Believe this is needed as garbage data goes into first parameter :param line: The line to be updated :param ax: The plot the line is currently on :param high_contrast: This specifies the color contrast of the map. 0=regular contrast, 1=heightened contrast Description: Updates the ph line plot after pulling new data. """ plt.cla() update_data() values = pd.Series(dataList[3]) if(high_contrast): line = ax.plot(values, linewidth=3.0) else: line = ax.plot(values) return line
1,453
def get_percent_match(uri, ucTableName): """ Get percent match from USEARCH Args: uri: URI of part ucTableName: UClust table Returns: Percent match if available, else -1 """ with open(ucTableName, 'r') as read: uc_reader = read.read() lines = uc_reader.splitlines() for line in lines: line = line.split() if line[9] == uri: return line[3] return -1
1,454
def get_rm_rf(earliest_date, symbol='000300'): """ Rm-Rf(市场收益 - 无风险收益) 基准股票指数收益率 - 国库券1个月收益率 输出pd.Series(日期为Index), 'Mkt-RF', 'RF'二元组 """ start = '1990-1-1' end = pd.Timestamp('today') benchmark_returns = get_cn_benchmark_returns(symbol).loc[earliest_date:] treasury_returns = get_treasury_data(start, end)['1month'][earliest_date:] # 补齐缺省值 treasury_returns = treasury_returns.reindex( benchmark_returns.index, method='ffill') return benchmark_returns, treasury_returns
1,455
async def detect_custom(model: str = Form(...), image: UploadFile = File(...)): """ Performs a prediction for a specified image using one of the available models. :param model: Model name or model hash :param image: Image file :return: Model's Bounding boxes """ draw_boxes = False try: output = await dl_service.run_model(model, image, draw_boxes) error_logging.info('request successful;' + str(output)) return output except ApplicationError as e: error_logging.warning(model + ';' + str(e)) return ApiResponse(success=False, error=e) except Exception as e: error_logging.error(model + ' ' + str(e)) return ApiResponse(success=False, error='unexpected server error')
1,456
def main(): """Generate HDL""" args = parse_args() # Output file name may have to change output_hdl = join(args.hdl_root,"{0}.v".format(args.top_module)) # Language should always be verilog, but who knows what can happen hdl_lang = "verilog" g = tree(args.width,args.start) for a in args.transforms.split("_")[1:]: getattr(g,a.split("@")[0])(*[int(x) for x in a.split("@")[1].split(",")]); g.hdl(out=output_hdl,mapping=args.mapping,language=hdl_lang,top_module=args.top_module)
1,457
def transcribe(audio_path, model_path, transcr_folder="./tmp/piano_env_tmp/", transcr_filename="transcription.flac", verbose=VERBOSE): """Transcribing audio using trained model. """ audio, sr = soundfile.read(audio_path, dtype='int16') audio = torch.FloatTensor([audio]).div_(32768.0) q_net = models.DeepQNetwork() q_net.load_state_dict(torch.load(model_path)) q_net.eval() env = environments.PianoTranscription(verbose=verbose, tmp_directory=transcr_folder) state = env.initialize_sequence(audio) done = False step = 0 while not done: with torch.no_grad(): action = q_net(state).argmax().view(1,1) if verbose > 0: print("Step: {}, Action: {}".format(step, action)) # Add actions without computing reward # (No simulation necessary after each action) state, done = env.take_action_without_reward(action) step += 1 env.create_simulated_audio(audio_file_name=transcr_filename)
1,458
def _get_top_ranking_propoals(probs): """Get top ranking proposals by k-means""" dev = probs.device kmeans = KMeans(n_clusters=5).fit(probs.cpu().numpy()) high_score_label = np.argmax(kmeans.cluster_centers_) index = np.where(kmeans.labels_ == high_score_label)[0] if len(index) == 0: index = np.array([np.argmax(probs)]) return torch.from_numpy(index).to(dev)
1,459
def get_available_configs(config_dir, register=None): """ Return (or update) a dictionary *register* that contains all config files in *config_dir*. """ if register is None: register = dict() for config_file in os.listdir(config_dir): if config_file.startswith('_') or not config_file.lower().endswith('.yaml'): continue name = os.path.splitext(config_file)[0] config = load_yaml(os.path.join(config_dir, config_file)) config['base_catalog_dir'] = base_catalog_dir if 'fn' in config: config['fn'] = os.path.join(base_catalog_dir, config['fn']) register[name] = config return register
1,460
def test_simulated_annealing_for_valid_solution_warning_raised(slots, events): """ Test that a warning is given if a lower bound is passed and not reached in given number of iterations. """ def objective_function(array): return len(list(array_violations(array, events, slots))) array = np.array([ [1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0] ]) assert objective_function(array) == 2 np.random.seed(0) with warnings.catch_warnings(record=True) as w: X = simulated_annealing(initial_array=array, objective_function=objective_function, lower_bound=0, max_iterations=1) assert objective_function(X) == 1 assert len(w) == 1
1,461
async def find_quote_by_attributes(quotes: Dict[str, Quote], attribute: str, values: List[str]) -> Quote or None: """ Find a quote by its attributes :param quotes: The dict containing all current quotes :param attribute: the attribute by which to find the quote :param values: the values of the attribute the quote has to match :return: the Quote that has been found, None otherwise """ # TODO: implement this :) return None
1,462
def externals_test_setup(sbox): """Set up a repository in which some directories have the externals property, and set up another repository, referred to by some of those externals. Both repositories contain greek trees with five revisions worth of random changes, then in the sixth revision the first repository -- and only the first -- has some externals properties set. ### Later, test putting externals on the second repository. ### The arrangement of the externals in the first repository is: /A/B/ ==> ^/A/D/gamma gamma /A/C/ ==> exdir_G <scheme>:///<other_repos>/A/D/G ../../../<other_repos_basename>/A/D/H@1 exdir_H /A/D/ ==> ^/../<other_repos_basename>/A exdir_A //<other_repos>/A/D/G/ exdir_A/G/ exdir_A/H -r 1 <scheme>:///<other_repos>/A/D/H /<some_paths>/A/B x/y/z/blah A dictionary is returned keyed by the directory created by the external whose value is the URL of the external. """ # The test itself will create a working copy sbox.build(create_wc = False) svntest.main.safe_rmtree(sbox.wc_dir) wc_init_dir = sbox.add_wc_path('init') # just for setting up props repo_dir = sbox.repo_dir repo_url = sbox.repo_url other_repo_dir, other_repo_url = sbox.add_repo_path('other') other_repo_basename = os.path.basename(other_repo_dir) # Get a scheme relative URL to the other repository. scheme_relative_other_repo_url = other_repo_url[other_repo_url.find(':')+1:] # Get a server root relative URL to the other repository by trimming # off the first three /'s. server_relative_other_repo_url = other_repo_url for i in range(3): j = server_relative_other_repo_url.find('/') + 1 server_relative_other_repo_url = server_relative_other_repo_url[j:] server_relative_other_repo_url = '/' + server_relative_other_repo_url # These files will get changed in revisions 2 through 5. mu_path = os.path.join(wc_init_dir, "A/mu") pi_path = os.path.join(wc_init_dir, "A/D/G/pi") lambda_path = os.path.join(wc_init_dir, "A/B/lambda") omega_path = os.path.join(wc_init_dir, "A/D/H/omega") # These are the directories on which `svn:externals' will be set, in # revision 6 on the first repo. B_path = os.path.join(wc_init_dir, "A/B") C_path = os.path.join(wc_init_dir, "A/C") D_path = os.path.join(wc_init_dir, "A/D") # Create a working copy. svntest.actions.run_and_verify_svn(None, None, [], 'checkout', repo_url, wc_init_dir) # Make revisions 2 through 5, but don't bother with pre- and # post-commit status checks. svntest.main.file_append(mu_path, "Added to mu in revision 2.\n") svntest.actions.run_and_verify_svn(None, None, [], 'ci', '-m', 'log msg', '--quiet', wc_init_dir) svntest.main.file_append(pi_path, "Added to pi in revision 3.\n") svntest.actions.run_and_verify_svn(None, None, [], 'ci', '-m', 'log msg', '--quiet', wc_init_dir) svntest.main.file_append(lambda_path, "Added to lambda in revision 4.\n") svntest.actions.run_and_verify_svn(None, None, [], 'ci', '-m', 'log msg', '--quiet', wc_init_dir) svntest.main.file_append(omega_path, "Added to omega in revision 5.\n") svntest.actions.run_and_verify_svn(None, None, [], 'ci', '-m', 'log msg', '--quiet', wc_init_dir) # Get the whole working copy to revision 5. expected_output = svntest.wc.State(wc_init_dir, { }) svntest.actions.run_and_verify_update(wc_init_dir, expected_output, None, None) # Now copy the initial repository to create the "other" repository, # the one to which the first repository's `svn:externals' properties # will refer. After this, both repositories have five revisions # of random stuff, with no svn:externals props set yet. svntest.main.copy_repos(repo_dir, other_repo_dir, 5) # This is the returned dictionary. external_url_for = { } external_url_for["A/B/gamma"] = "^/A/D/gamma" external_url_for["A/C/exdir_G"] = other_repo_url + "/A/D/G" external_url_for["A/C/exdir_H"] = "../../../" + \ other_repo_basename + \ "/A/D/H@1" # Set up the externals properties on A/B/, A/C/ and A/D/. externals_desc = \ external_url_for["A/B/gamma"] + " gamma\n" change_external(B_path, externals_desc, commit=False) externals_desc = \ "exdir_G " + external_url_for["A/C/exdir_G"] + "\n" + \ external_url_for["A/C/exdir_H"] + " exdir_H\n" change_external(C_path, externals_desc, commit=False) external_url_for["A/D/exdir_A"] = "^/../" + other_repo_basename + "/A" external_url_for["A/D/exdir_A/G/"] = scheme_relative_other_repo_url + \ "/A/D/G/" external_url_for["A/D/exdir_A/H"] = other_repo_url + "/A/D/H" external_url_for["A/D/x/y/z/blah"] = server_relative_other_repo_url + "/A/B" externals_desc = \ external_url_for["A/D/exdir_A"] + " exdir_A" + \ "\n" + \ external_url_for["A/D/exdir_A/G/"] + " exdir_A/G/" + \ "\n" + \ "exdir_A/H -r 1 " + external_url_for["A/D/exdir_A/H"] + \ "\n" + \ external_url_for["A/D/x/y/z/blah"] + " x/y/z/blah" + \ "\n" change_external(D_path, externals_desc, commit=False) # Commit the property changes. expected_output = svntest.wc.State(wc_init_dir, { 'A/B' : Item(verb='Sending'), 'A/C' : Item(verb='Sending'), 'A/D' : Item(verb='Sending'), }) expected_status = svntest.actions.get_virginal_state(wc_init_dir, 5) expected_status.tweak('A/B', 'A/C', 'A/D', wc_rev=6, status=' ') svntest.actions.run_and_verify_commit(wc_init_dir, expected_output, expected_status, None, wc_init_dir) return external_url_for
1,463
def six_bus(vn_high=20, vn_low=0.4, length_km=0.03, std_type='NAYY 4x50 SE', battery_locations=[3, 6], init_soc=0.5, energy_capacity=20.0, static_feeds=None, gen_locations=None, gen_p_max=0.0, gen_p_min=-50.0, storage_p_max=50.0, storage_p_min=-50.0): """This function creates the network model for the 6 bus POC network from scratch. Buses and lines are added to an empty network based on a hard-coded topology and parameters from the config file (seen as inputs). The only controllable storage added in this network are batteries, and the input static_feeds is used to add loads and static generators which are not controlled by the agent. The first value in the series is taken for initialization of those elements. """ net = pp.create_empty_network(name='6bus', f_hz=60., sn_kva=100.) # create buses for i in range(8): nm = 'bus{}'.format(i) if i == 0: pp.create_bus(net, name=nm, vn_kv=vn_high) elif i == 1: pp.create_bus(net, name=nm, vn_kv=vn_low) else: if i <= 4: zn = 'Side1' else: zn = 'Side2' pp.create_bus(net, name=nm, zone=zn, vn_kv=vn_low) # create grid connection pp.create_ext_grid(net, 0) # create lines pp.create_line(net, 0, 1, length_km=length_km, std_type=std_type, name='line0') pp.create_line(net, 1, 2, length_km=length_km, std_type=std_type, name='line1') pp.create_line(net, 2, 3, length_km=length_km, std_type=std_type, name='line2') pp.create_line(net, 2, 4, length_km=length_km, std_type=std_type, name='line3') pp.create_line(net, 1, 5, length_km=length_km, std_type=std_type, name='line4') pp.create_line(net, 5, 6, length_km=length_km, std_type=std_type, name='line5') pp.create_line(net, 5, 7, length_km=length_km, std_type=std_type, name='line6') # add controllable storage for idx, bus_number in enumerate(battery_locations): energy_capacity_here = energy_capacity init_soc_here = init_soc if np.size(energy_capacity) > 1: energy_capacity_here = energy_capacity[idx] if np.size(init_soc) > 1: init_soc_here = init_soc[idx] add_battery(net, bus_number=bus_number, p_init=0.0, energy_capacity=energy_capacity_here, init_soc=init_soc_here, max_p=storage_p_max, min_p=storage_p_min) # Add controllable generator if gen_locations is not None: for idx, bus_number in enumerate(gen_locations): pp.create_gen(net, bus_number, p_kw=0.0, min_q_kvar=0.0, max_q_kvar=0.0, min_p_kw=gen_p_min, max_p_kw=gen_p_max) ##### TODO : Have different limits for different generators and storage ##### # add loads and static generation if static_feeds is None: print('No loads or generation assigned to network') else: if len(static_feeds) > 0: for key, val in static_feeds.items(): init_flow = val[0] print('init_flow: ', init_flow, 'at bus: ', key) if init_flow > 0: pp.create_load(net, bus=key, p_kw=init_flow, q_kvar=0) else: pp.create_sgen(net, bus=key, p_kw=init_flow, q_kvar=0) return net
1,464
def _add_exccess_het_filter( b: hb.Batch, input_vcf: hb.ResourceGroup, overwrite: bool, excess_het_threshold: float = 54.69, interval: Optional[hb.ResourceGroup] = None, output_vcf_path: Optional[str] = None, ) -> Job: """ Filter a large cohort callset on Excess Heterozygosity. The filter applies only to large callsets (`not is_small_callset`) Requires all samples to be unrelated. ExcessHet estimates the probability of the called samples exhibiting excess heterozygosity with respect to the null hypothesis that the samples are unrelated. The higher the score, the higher the chance that the variant is a technical artifact or that there is consanguinuity among the samples. In contrast to Inbreeding Coefficient, there is no minimal number of samples for this annotation. Returns: a Job object with a single output j.output_vcf of type ResourceGroup """ job_name = 'Joint genotyping: ExcessHet filter' if utils.can_reuse(output_vcf_path, overwrite): return b.new_job(job_name + ' [reuse]') j = b.new_job(job_name) j.image(utils.GATK_IMAGE) j.memory('8G') j.storage(f'32G') j.declare_resource_group( output_vcf={'vcf.gz': '{root}.vcf.gz', 'vcf.gz.tbi': '{root}.vcf.gz.tbi'} ) j.command( f"""set -euo pipefail # Captring stderr to avoid Batch pod from crashing with OOM from millions of # warning messages from VariantFiltration, e.g.: # > JexlEngine - ![0,9]: 'ExcessHet > 54.69;' undefined variable ExcessHet gatk --java-options -Xms3g \\ VariantFiltration \\ --filter-expression 'ExcessHet > {excess_het_threshold}' \\ --filter-name ExcessHet \\ {f'-L {interval} ' if interval else ''} \\ -O {j.output_vcf['vcf.gz']} \\ -V {input_vcf['vcf.gz']} \\ 2> {j.stderr} """ ) if output_vcf_path: b.write_output(j.output_vcf, output_vcf_path.replace('.vcf.gz', '')) return j
1,465
def _get_config_and_script_paths( parent_dir: Path, config_subdir: Union[str, Tuple[str, ...]], script_subdir: Union[str, Tuple[str, ...]], file_stem: str, ) -> Dict[str, Path]: """Returns the node config file and its corresponding script file.""" if isinstance(config_subdir, tuple): config_subpath = Path(*config_subdir) else: config_subpath = Path(config_subdir) if isinstance(script_subdir, tuple): script_subpath = Path(*script_subdir) else: script_subpath = Path(script_subdir) return { "config": parent_dir / config_subpath / f"{file_stem}.yml", "script": parent_dir / script_subpath / f"{file_stem}.py", }
1,466
def mongodb_get_users(): """Connects to mongodb and returns users collection""" # TODO parse: MONGOHQ_URL connection = Connection(env['MONGODB_HOST'], int(env['MONGODB_PORT'])) if 'MONGODB_NAME' in env and 'MONGODB_PW' in env: connection[env['MONGODB_DBNAME']].authenticate(env['MONGODB_NAME'], env['MONGODB_PW']) return connection[env['MONGODB_DBNAME']].users
1,467
def macro_china_hk_cpi_ratio() -> pd.DataFrame: """ 东方财富-经济数据一览-中国香港-消费者物价指数年率 https://data.eastmoney.com/cjsj/foreign_8_1.html :return: 消费者物价指数年率 :rtype: pandas.DataFrame """ url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx" params = { "type": "GJZB", "sty": "HKZB", "js": "({data:[(x)],pages:(pc)})", "p": "1", "ps": "2000", "mkt": "8", "stat": "1", "pageNo": "1", "pageNum": "1", "_": "1621332091873", } r = requests.get(url, params=params) data_text = r.text data_json = demjson.decode(data_text[1:-1]) temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]]) temp_df.columns = [ "时间", "前值", "现值", "发布日期", ] temp_df['前值'] = pd.to_numeric(temp_df['前值']) temp_df['现值'] = pd.to_numeric(temp_df['现值']) temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date return temp_df
1,468
def structuringElement(path): """ """ with open(path) as f: data = json.load(f) data['matrix'] = np.array(data['matrix']) data['center'] = tuple(data['center']) return data
1,469
def ptsToDist(pt1, pt2): """Computes the distance between two points""" if None in pt1 or None in pt2: dist = None else: vx, vy = points_to_vec(pt1, pt2) dist = np.linalg.norm([(vx, vy)]) return dist
1,470
def init_check(func): """ Decorator for confirming the KAOS_STATE_DIR is present (i.e. initialized correctly). """ def wrapper(*args, **kwargs): if not os.path.exists(KAOS_STATE_DIR): click.echo("{} - {} directory does not exist - first run {}".format( click.style("Warning", bold=True, fg='yellow'), click.style(os.path.split(KAOS_STATE_DIR)[-1], bold=True, fg='red'), click.style("kaos init", bold=True, fg='green'))) sys.exit(1) if not os.path.exists(CONFIG_PATH): click.echo("{} - {} does not exist - run {}".format( click.style("Warning", bold=True, fg='yellow'), click.style("./kaos/config", bold=True, fg='red'), click.style("kaos init", bold=True, fg='green'))) sys.exit(1) func(*args, **kwargs) return wrapper
1,471
def d6_to_RotMat(aa:torch.Tensor) -> torch.Tensor: # take (...,6) --> (...,9) """Converts 6D to a rotation matrix, from: https://github.com/papagina/RotationContinuity/blob/master/Inverse_Kinematics/code/tools.py""" a1, a2 = torch.split(aa, (3,3), dim=-1) a3 = torch.cross(a1, a2, dim=-1) return torch.cat((a1,a2,a3), dim=-1)
1,472
def send_mail(subject, email_template_name, context, to_email): """ Sends a django.core.mail.EmailMultiAlternatives to `to_email`. """ # append context processors, some variables from settings context = {**context, **context_processors.general()} # prepend the marketplace name to the settings subject = '{} {}'.format(settings.NAME_PRETTY, subject.strip()) # generate html email html_email = loader.render_to_string(email_template_name, context) # attach text and html message email_message = EmailMultiAlternatives(subject, strip_tags(html_email), settings.FROM_EMAIL_ADDRESS, [to_email]) email_message.attach_alternative(html_email, 'text/html') # send try: email_message.send() except SMTPException: with open("mailcrash.log", "a") as stream: stream.write("Mail to {} could not be send:\n{}\n".format(to_email, html_email)) except ConnectionRefusedError: if settings.DEBUG: print("Send mail refused!") else: with open("mailcrash.log", "a") as stream: stream.write("Mail to {} could not be send:\n{}\n".format(to_email, html_email))
1,473
def encrypt(key, pt, Nk=4): """Encrypt a plain text block.""" assert Nk in {4, 6, 8} rkey = key_expand(key, Nk) ct = cipher(rkey, pt, Nk) return ct
1,474
def upload(filename, url, token=None): """ Upload a file to a URL """ headers = {} if token: headers['X-Auth-Token'] = token try: with open(filename, 'rb') as file_obj: response = requests.put(url, data=file_obj, timeout=120, headers=headers, verify=False) except requests.exceptions.RequestException as err: logging.warning('RequestException when trying to upload file %s: %s', filename, err) return None except IOError as err: logging.warning('IOError when trying to upload file %s: %s', filename, err) return None if response.status_code == 200 or response.status_code == 201: return True return None
1,475
def cost(states, sigma=0.25): """Pendulum-v0: Same as OpenAI-Gym""" l = 0.6 goal = Variable(torch.FloatTensor([0.0, l]))#.cuda() # Cart position cart_x = states[:, 0] # Pole angle thetas = states[:, 2] # Pole position x = torch.sin(thetas)*l y = torch.cos(thetas)*l positions = torch.stack([cart_x + x, y], 1) squared_distance = torch.sum((goal - positions)**2, 1) squared_sigma = sigma**2 cost = 1 - torch.exp(-0.5*squared_distance/squared_sigma) return cost
1,476
def check(tc: globus_sdk.TransferClient, files): """Tries to find the path in the globus endpoints that match the supplies file names, size and last modified attributes""" tc.endpoint_search(filter_scope="shared-with-me")
1,477
def createGame(data): """Create a new Game object""" gm = Game.info(data['name']) room = gm.game_id GAME_ROOMS[room] = gm emit('join_room', {'room': GAME_ROOMS[room].to_json()}) emit('new_room', {'room': GAME_ROOMS[room].to_json()}, broadcast=True)
1,478
def flatmap(fn, seq): """ Map the fn to each element of seq and append the results of the sublists to a resulting list. """ result = [] for lst in map(fn, seq): for elt in lst: result.append(elt) return result
1,479
def the_test_file(): """the test file.""" filename = 'tests/resources/grype.json' script = 'docker-grype/parse-grype-json.py' return { 'command': f'{script} {filename}', 'host_url': 'local://' }
1,480
def build_stations() -> tuple[dict, dict]: """Builds the station dict from source file""" stations, code_map = {}, {} data = csv.reader(_SOURCE["airports"].splitlines()) next(data) # Skip header for station in data: code = get_icao(station) if code and station[2] in ACCEPTED_STATION_TYPES: stations[code] = format_station(code, station) code_map[station[0]] = code return stations, code_map
1,481
def terminate_process(proc): """ Recursively kills a process and all of its children :param proc: Result of `subprocess.Popen` Inspired by http://stackoverflow.com/a/25134985/358873 TODO Check if terminate fails and kill instead? :return: """ process = psutil.Process(proc.pid) for child in process.children(recursive=True): child.terminate() process.terminate()
1,482
def vox_mesh_iou(voxelgrid, mesh_size, mesh_center, points, points_occ, vox_side_len=24, pc=None): """LeoZDong addition: Compare iou between voxel and mesh (represented as points sampled uniformly inside the mesh). Everything is a single element (i.e. no batch dimension). """ # Un-rotate voxels to pointcloud orientation voxelgrid = voxelgrid.copy() voxelgrid = np.flip(voxelgrid, 1) voxelgrid = np.swapaxes(voxelgrid, 0, 1) # voxelgrid = np.swapaxes(voxelgrid, 0, 2) # Find voxel centers as if they are in a [-0.5, 0.5] bbox vox_center = get_vox_centers(voxelgrid) # Rescale points so that the mesh object is 0-centered and has longest side # to be 0.75 (vox_side_len=24 / 32) points += vox_center - mesh_center scale = (vox_side_len / voxelgrid.shape[0]) / mesh_size points *= scale # import ipdb; ipdb.set_trace() cond = np.stack((points.min(1) > -0.5, points.max(1) < 0.5), 0) in_bounds = np.all(cond, 0) vox_occ = np.zeros_like(points_occ) vox_occ[in_bounds] = points_occ_in_voxel(voxelgrid, points[in_bounds, :]) # Find occupancy in voxel for the query points # vox_occ = points_occ_in_voxel(voxelgrid, points) iou = occ_iou(points_occ, vox_occ) #### DEBUG #### # vox_occ_points = points[vox_occ > 0.5] # gt_occ_points = points[points_occ > 0.5] # int_occ_points = points[(vox_occ * points_occ) > 0.5] # save_dir = '/viscam/u/leozdong/shape2prog/output/chair/GA_24/meshes/table/cd5f235344ff4c10d5b24cafb84903c7' # save_ply(vox_occ_points, os.path.join(save_dir, 'vox_occ_points.ply')) # save_ply(gt_occ_points, os.path.join(save_dir, 'gt_occ_points.ply')) # save_ply(int_occ_points, os.path.join(save_dir, 'int_occ_points.ply')) # print("iou:", iou) return iou
1,483
async def test_response_no_charset_with_iso_8859_1_content(send_request): """ We don't support iso-8859-1 by default following conversations about endoding flow """ response = await send_request( request=HttpRequest("GET", "/encoding/iso-8859-1"), ) assert response.text() == "Accented: �sterreich" assert response.encoding is None
1,484
def test_forward(device): """Do the scene models have the right shape""" echellogram = Echellogram(device=device) t0 = time.time() scene_model = echellogram.forward(1) t1 = time.time() net_time = t1 - t0 print(f"\n\t{echellogram.device}: {net_time:0.5f} seconds", end="\t") assert scene_model.shape == echellogram.xx.shape assert scene_model.dtype == echellogram.xx.dtype
1,485
def qhxl_attr_2_bcp47(hxlatt: str) -> str: """qhxl_attr_2_bcp47 Convert HXL attribute part to BCP47 Args: hxlatt (str): Returns: str: """ resultatum = '' tempus1 = hxlatt.replace('+i_', '') tempus1 = tempus1.split('+is_') resultatum = tempus1[0] + '-' + tempus1[1].capitalize() # @TODO: test better cases with +ix_ resultatum = resultatum.replace('+ix_', '-x-') return resultatum
1,486
def _(output): """Handle the output of a bash process.""" logger.debug('bash handler: subprocess output: {}'.format(output)) if output.returncode == 127: raise exceptions.ScriptNotFound() return output
1,487
def process_row(row, fiscal_fields): """Add and remove appropriate columns. """ surplus_keys = set(row) - set(fiscal_fields) missing_keys = set(fiscal_fields) - set(row) for key in missing_keys: row[key] = None for key in surplus_keys: del row[key] assert set(row) == set(fiscal_fields) return row
1,488
def DefaultTo(default_value, msg=None): """Sets a value to default_value if none provided. >>> s = Schema(DefaultTo(42)) >>> s(None) 42 """ def f(v): if v is None: v = default_value return v return f
1,489
def load_files(file_list, inputpath): """ function to load the data from potentially multiple files into one pandas DataFrame """ df = None # loop through files and append for i, file in enumerate(file_list): path = f"{inputpath}/{file}" print(path) df_i = pd.read_csv(path) if i == 0: df = df_i else: df = pd.concat([df, df_i], axis=0, ignore_index=True) return df
1,490
def list_all(): """ List all systems List all transit systems that are installed in this Transiter instance. """ return systemservice.list_all()
1,491
def putText(image: np.ndarray, text: str, org=(0, 0), font=_cv2.FONT_HERSHEY_PLAIN, fontScale=1, color=(0, 0, 255), thickness=1, lineType=_cv2.LINE_AA, bottomLeftOrigin=False) -> np.ndarray: """Add text to `cv2` image, with default values. :param image: image array :param text: text to be added :param org: origin of text, from top left by default :param font: font choice :param fontScale: font size :param color: BGR color, red by default :param thickness: font thickness :param lineType: line type of text :param bottomLeftOrigin: True to start from bottom left, default False :return: image with text added """ return _cv2.putText(image, text, org, font, fontScale, color, thickness, lineType, bottomLeftOrigin)
1,492
def gaussFilter(fx: int, fy: int, sigma: int): """ Gaussian Filter """ x = tf.range(-int(fx / 2), int(fx / 2) + 1, 1) Y, X = tf.meshgrid(x, x) sigma = -2 * (sigma**2) z = tf.cast(tf.add(tf.square(X), tf.square(Y)), tf.float32) k = 2 * tf.exp(tf.divide(z, sigma)) k = tf.divide(k, tf.reduce_sum(k)) return k
1,493
def char_buffered(pipe): """Force the local terminal ``pipe`` to be character, not line, buffered.""" if win32 or env.get('disable_char_buffering', 0) or not sys.stdin.isatty(): yield else: old_settings = termios.tcgetattr(pipe) tty.setcbreak(pipe) try: yield finally: termios.tcsetattr(pipe, termios.TCSADRAIN, old_settings)
1,494
def do_something(param=None): """ Several routes for the same function FOO and BAR have different documentation --- """ return "I did something with {}".format(request.url_rule), 200
1,495
def extract_text( pattern: re.Pattern[str] | str, source_text: str, ) -> str | Literal[False]: """Match the given pattern and extract the matched text as a string.""" match = re.search(pattern, source_text) if not match: return False match_text = match.groups()[0] if match.groups() else match.group() return match_text
1,496
def _checksum_paths(): """Returns dict {'dataset_name': 'path/to/checksums/file'}.""" dataset2path = {} for dir_path in _CHECKSUM_DIRS: for fname in _list_dir(dir_path): if not fname.endswith(_CHECKSUM_SUFFIX): continue fpath = os.path.join(dir_path, fname) dataset_name = fname[:-len(_CHECKSUM_SUFFIX)] dataset2path[dataset_name] = fpath return dataset2path
1,497
def test_calculate_allocation_from_cash3(): """ Checks current allocation is 0 when spot_price is less than 0 (ie; bankrupt) """ last_cash_after_trade = 30.12 last_securities_after_transaction = 123.56 spot_price = -5.12 out_actual = calculate_allocation_from_cash(last_cash_after_trade, last_securities_after_transaction, spot_price) out_expect = 0.0 assert(out_actual == out_expect)
1,498
def get_merged_message_df(messages_df, address_book, print_debug=False): """ Merges a message dataframe with the address book dataframe to return a single dataframe that contains all messages with detailed information (e.g. name, company, birthday) about the sender. Args: messages_df: a dataframe containing all transmitted messages address_book: a dataframe containing the address book as loaded via this module print_debug: true if we should print out the first row of each intermediary table as it's created Returns: a dataframe that contained all messages with info about their senders """ phones_with_message_id_df = __get_address_joined_with_message_id(address_book) if print_debug: print('Messages Dataframe') display(messages_df.head(1)) print('Address Book Dataframe') display(address_book.head(1)) print('Phones/emails merged with message IDs via chats Dataframe') display(phones_with_message_id_df.head(1)) return messages_df.merge(phones_with_message_id_df, how='left', suffixes=['_messages_df', '_other_join_tbl'], left_index=True, right_on='message_id', indicator='merge_chat_with_address_and_messages')
1,499