content
stringlengths
22
815k
id
int64
0
4.91M
def remove_apostrophe(text): """Remove apostrophes from text""" return text.replace("'", " ")
5,357,900
def generate_initials(text): """ Extract initials from a string Args: text(str): The string to extract initials from Returns: str: The initials extracted from the string """ if not text: return None text = text.strip() if text: split_text = text.split(" ") if len(split_text) > 1: return (split_text[0][0] + split_text[-1][0]).upper() else: return split_text[0][0].upper() return None
5,357,901
def nmf_manifold_vec_update(X, U, V, k_to_W, k_to_D, k_to_L, k_to_feat_inds, n_steps=10, gamma=1.0, delta=1.0, i=0, verbose=False, norm_X=None): """ Perform <n_steps> update steps with a fixed Laplacian matrix for each latent factor Parameters ---------- X : np.array data to factor U : np.array previous setting of U to update V : np.array previous setting of V to update k_to_W : dict mapping of latent factor to weighted adjacency matrix k_to_D : dict mapping of latent factor to diagonal matrix that is the sum of W along a row (or column) k_to_L : dict mapping of latent factor to L = D - W n_steps : int number of update steps to perform gamma : float relative importance of manifold regularization term delta : float relative importance of ignoring manifold penalty i : int number of previous iterations verbose : bool if True, print objective function value after each iteration norm_X : float or None stored value of the norm of X """ obj_data = None m, k_latent = U.shape n, k_latent = V.shape for n_step in range(n_steps): U_up_num = X.dot(V) U_up_denom = U.dot((V.transpose().dot(V))) + U U = np.multiply(U, np.divide(U_up_num, U_up_denom, out=np.ones_like(U_up_num), where=U_up_denom!=0)) # 0 / 0 := 1 V_up_num_recon = X.transpose().dot(U) V_up_denom_recon = V.dot((U.transpose().dot(U))) # update each column vector of V separately to accomodate different Laplacians V_up_num_man = np.zeros((n, k_latent)) V_up_denom_man = np.zeros((n, k_latent)) V_up_num_ign = np.zeros((n, k_latent)) for k in range(k_latent): W = k_to_W[k] D = k_to_D[k] V_up_num_man[:,k] = gamma * W.dot(V[:,k]) V_up_denom_man[:,k] = gamma * D.dot(V[:,k]) nz_inds = k_to_feat_inds[k] V_up_num_ign[nz_inds,k] = delta * np.power(V[nz_inds,k] + 1, -2) V_up_num = V_up_num_recon + (V_up_num_man + V_up_num_ign) V_up_denom = V_up_denom_recon + V_up_denom_man V_up_denom[V_up_denom < EPSILON] = EPSILON V = np.multiply(V, np.divide(V_up_num, V_up_denom, out=np.ones_like(V_up_num), where=V_up_denom!=0)) V[V < EPSILON] = EPSILON obj_data = nmf_manifold_vec_obj(X, U, V, k_to_L, k_to_feat_inds, gamma=gamma, delta=delta) print(i+n_step+1, obj_data['obj']) if(verbose): print(obj_data) return U, V, obj_data
5,357,902
def check_xyz_species_for_drawing(xyz, species): """A helper function to avoid repetative code""" if species is not None and xyz is None: xyz = xyz if xyz is not None else species.final_xyz if species is not None and not isinstance(species, ARCSpecies): raise InputError('Species must be an ARCSpecies instance. Got {0}.'.format(type(species))) if species is not None and not species.final_xyz: raise InputError('Species {0} has an empty final_xyz attribute.'.format(species.label)) return xyz
5,357,903
def test_simple_single_sitemap(): """ Tests a single sitemap """ with test_sitemap() as sitemap: sitemap.add_section("articles") for url in urls_iterator(): sitemap.add_url(url) print(sitemap) assert len(sitemap) == 10 assert "(10 URLs)" in repr(sitemap) assert sitemap.sitemaps == ["sitemap-001-articles.xml.gz"]
5,357,904
def GetUDPStreamSample(command_out, sending_vm, receiving_vm, request_bandwidth, network_type, iteration): """Get a sample from the nuttcp string results. Args: command_out: the nuttcp output. sending_vm: vm sending the UDP packets. receiving_vm: vm receiving the UDP packets. request_bandwidth: the requested bandwidth in the nuttcp sample. network_type: the type of the network, external or internal. iteration: the run number of the test. Returns: sample from the results of the nuttcp tests. """ data_line = command_out.split('\n')[0].split(' ') data_line = [val for val in data_line if val] actual_bandwidth = float(data_line[6]) units = data_line[7] packet_loss = data_line[16] metadata = { 'receiving_machine_type': receiving_vm.machine_type, 'receiving_zone': receiving_vm.zone, 'sending_machine_type': sending_vm.machine_type, 'sending_zone': sending_vm.zone, 'packet_loss': packet_loss, 'bandwidth_requested': request_bandwidth, 'network_type': network_type, 'iteration': iteration } return sample.Sample('bandwidth', actual_bandwidth, units, metadata)
5,357,905
def _mixed_compare_sample(train_sample: Tuple, predict_sample: Tuple): """ For models relying on MixedCovariates. Parameters: ---------- train_sample (past_target, past_covariates, historic_future_covariates, future_covariates, future_target) predict_sample (past_target, past_covariates, historic_future_covariates, future_covariates, future_past_covariates, ts_target) """ # datasets; we skip future_target for train and predict, and skip future_past_covariates for predict datasets ds_names = ['past_target', 'past_covariates', 'historic_future_covariates', 'future_covariates'] train_has_ds = [ds is not None for ds in train_sample[:-1]] predict_has_ds = [ds is not None for ds in predict_sample[:4]] train_datasets = train_sample[:-1] predict_datasets = predict_sample[:4] tgt_train, tgt_pred = train_datasets[0], predict_datasets[0] raise_if_not(tgt_train.shape[-1] == tgt_pred.shape[-1], 'The provided target has a dimension (width) that does not match the dimension ' 'of the target this model has been trained on.') for idx, (ds_in_train, ds_in_predict, ds_name) in enumerate(zip(train_has_ds, predict_has_ds, ds_names)): raise_if(ds_in_train and not ds_in_predict and ds_in_train, f'This model has been trained with {ds_name}; some {ds_name} of matching dimensionality are needed ' f'for prediction.') raise_if(ds_in_train and not ds_in_predict and ds_in_predict, f'This model has been trained without {ds_name}; No {ds_name} should be provided for prediction.') raise_if(ds_in_train and ds_in_predict and train_datasets[idx].shape[-1] != predict_datasets[idx].shape[-1], f'The provided {ds_name} must have dimensionality that of the {ds_name} used for training the model.')
5,357,906
def initial_checks(): """Perform a series of checks to be sure the race data is good.""" #TODO check if user-config.py is present print("{} race editions found in {}.".format(len(race_editions), root_dir + races_dir)) filecount_errors = 0 for race in race_editions: files_count = len(os.listdir(race)) if files_count != 5: print("The folder {} has {} files in it.".format(race,files_count)) filecount_errors += 1 if filecount_errors > 0: print("{} race folders don't have the expected number of files.".format(filecount_errors)) else: print("All race folders have the expected number of files.")
5,357,907
def test_load_from_both_py_and_pyi_files(): """Check that the loader is able to merge data loaded from `*.py` and `*.pyi` files.""" with temporary_pypackage("package", ["mod.py", "mod.pyi"]) as tmp_package: tmp_package.path.joinpath("mod.py").write_text( dedent( """ CONST = 0 class Class: class_attr = True def function1(self, arg1): pass def function2(self, arg1=2.2): pass """ ) ) tmp_package.path.joinpath("mod.pyi").write_text( dedent( """ from typing import Sequence, overload CONST: int class Class: class_attr: bool @overload def function1(self, arg1: str) -> Sequence[str]: ... @overload def function1(self, arg1: bytes) -> Sequence[bytes]: ... def function2(self, arg1: float) -> float: ... """ ) ) loader = GriffeLoader(search_paths=[tmp_package.tmpdir]) package = loader.load_module(tmp_package.name) loader.resolve_aliases() assert "mod" in package.members mod = package["mod"] assert mod.filepath.suffix == ".py" assert "CONST" in mod.members const = mod["CONST"] assert const.value == "0" assert const.annotation.source == "int" assert "Class" in mod.members class_ = mod["Class"] assert "class_attr" in class_.members class_attr = class_["class_attr"] assert class_attr.value == "True" assert class_attr.annotation.source == "bool" assert "function1" in class_.members function1 = class_["function1"] assert len(function1.overloads) == 2 assert "function2" in class_.members function2 = class_["function2"] assert function2.returns.source == "float" assert function2.parameters["arg1"].annotation.source == "float" assert function2.parameters["arg1"].default == "2.2"
5,357,908
def setSwaggerParamDesc(swagger,searchParams): """ Set the Swagger GET Parameter Description to what is stored in the search Parameters using helper function """ for id in range(len(swagger['tags'])): # Paths are prefaced with forward slash idName = '/'+swagger['tags'][id]['name'] # Filter out Capability statement if idName != '/CapabilityStatement': for paramId in range(len(swagger['paths'][idName]['get']['parameters'])): # Get the parameter name to use getParamDesc function paramName = swagger['paths'][idName]['get']['parameters'][paramId]['name'] # Set description to what is returned from search Parameters swagger['paths'][idName]['get']['parameters'][paramId]['description'] = getParamDesc(searchParams,idName,paramName) swagger = removeFormatParam(swagger) return swagger
5,357,909
def parse_args(args=[], doc=False): """ Handle parsing of arguments and flags. Generates docs using help from `ArgParser` Args: args (list): argv passed to the binary doc (bool): If the function should generate and return manpage Returns: Processed args and a copy of the `ArgParser` object if not `doc` else a `string` containing the generated manpage """ parser = ArgParser(prog=__COMMAND__, description=f"{__COMMAND__} - {__DESCRIPTION__}") parser.add_argument("username", help="Username of the new user to add") parser.add_argument("-p", dest="password", help="Password for the new user") parser.add_argument("-n", dest="noninteractive", action="store_false", help="Don't ask for user input") parser.add_argument("--version", action="store_true", help=f"print program version") args = parser.parse_args(args) arg_helps_with_dups = parser._actions arg_helps = [] [arg_helps.append(x) for x in arg_helps_with_dups if x not in arg_helps] NAME = f"**NAME*/\n\t{__COMMAND__} - {__DESCRIPTION__}" SYNOPSIS = f"**SYNOPSIS*/\n\t{__COMMAND__} [OPTION]... " DESCRIPTION = f"**DESCRIPTION*/\n\t{__DESCRIPTION_LONG__}\n\n" for item in arg_helps: # Its a positional argument if len(item.option_strings) == 0: # If the argument is optional: if item.nargs == "?": SYNOPSIS += f"[{item.dest.upper()}] " elif item.nargs == "+": SYNOPSIS += f"[{item.dest.upper()}]... " else: SYNOPSIS += f"{item.dest.upper()} " else: # Boolean flag if item.nargs == 0: if len(item.option_strings) == 1: DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/\t{item.help}\n\n" else: DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/\n\t\t{item.help}\n\n" elif item.nargs == "+": DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/=[{item.dest.upper()}]...\n\t\t{item.help}\n\n" else: DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/={item.dest.upper()}\n\t\t{item.help}\n\n" if doc: return f"{NAME}\n\n{SYNOPSIS}\n\n{DESCRIPTION}\n\n" else: return args, parser
5,357,910
def interact(u, v): """Compute element-wise mean(s) from two arrays.""" return tuple(mean(array([u, v]), axis=0))
5,357,911
def part_allocation_count(build, part, *args, **kwargs): """ Return the total number of <part> allocated to <build> """ return build.getAllocatedQuantity(part)
5,357,912
def _disable_tracing(): """Disable system-wide tracing, if we specifically switched it on.""" global _orig_sys_trace if _orig_sys_trace is None: sys.settrace(None)
5,357,913
def stat_threshold(Z,mce='fdr_bh',a_level=0.05,side='two',copy=True): """ Threshold z maps Parameters ---------- mce: multiple comparison error correction method, should be among of the options below. [defualt: 'fdr_bh']. The options are from statsmodels packages: `b`, `bonferroni` : one-step correction `s`, `sidak` : one-step correction `hs`, `holm-sidak` : step down method using Sidak adjustments `h`, `holm` : step-down method using Bonferroni adjustments `sh`, `simes-hochberg` : step-up method (independent) `hommel` : closed method based on Simes tests (non-negative) `fdr_i`, `fdr_bh` : Benjamini/Hochberg (non-negative) `fdr_n`, `fdr_by` : Benjamini/Yekutieli (negative) 'fdr_tsbh' : two stage fdr correction (Benjamini/Hochberg) 'fdr_tsbky' : two stage fdr correction (Benjamini/Krieger/Yekutieli) 'fdr_gbs' : adaptive step-down fdr correction (Gavrilov, Benjamini, Sarkar) """ if copy: Z = Z.copy() if side=='one': sideflag = 1 elif side=='two' or 'double': sideflag = 2 Idx = np.triu_indices(Z.shape[0],1) Zv = Z[Idx] Pv = sp.norm.cdf(-np.abs(Zv))*sideflag [Hv,adjpvalsv] = smmt.multipletests(Pv,method = mce)[:2] adj_pvals = np.zeros(Z.shape) Zt = np.zeros(Z.shape) Zv[np.invert(Hv)] = 0 Zt[Idx] = Zv Zt = Zt + Zt.T; adj_pvals[Idx] = adjpvalsv adj_pvals = adj_pvals + adj_pvals.T; adj_pvals[range(Z.shape[0]),range(Z.shape[0])] = 0 return Zt, binarize(Zt), adj_pvals
5,357,914
def esmf_grid(lon, lat, periodic=False, mask=None): """ Create an ESMF.Grid object, for constructing ESMF.Field and ESMF.Regrid. Parameters ---------- lon, lat : 2D numpy array Longitute/Latitude of cell centers. Recommend Fortran-ordering to match ESMPy internal. Shape should be ``(Nlon, Nlat)`` for rectilinear grid, or ``(Nx, Ny)`` for general quadrilateral grid. periodic : bool, optional Periodic in longitude? Default to False. Only useful for source grid. mask : 2D numpy array, optional Grid mask. According to the ESMF convention, masked cells are set to 0 and unmasked cells to 1. Shape should be ``(Nlon, Nlat)`` for rectilinear grid, or ``(Nx, Ny)`` for general quadrilateral grid. Returns ------- grid : ESMF.Grid object """ # ESMPy expects Fortran-ordered array. # Passing C-ordered array will slow down performance. for a in [lon, lat]: warn_f_contiguous(a) warn_lat_range(lat) # ESMF.Grid can actually take 3D array (lon, lat, radius), # but regridding only works for 2D array assert lon.ndim == 2, 'Input grid must be 2D array' assert lon.shape == lat.shape, 'lon and lat must have same shape' staggerloc = ESMF.StaggerLoc.CENTER # actually just integer 0 if periodic: num_peri_dims = 1 else: num_peri_dims = None # ESMPy documentation claims that if staggerloc and coord_sys are None, # they will be set to default values (CENTER and SPH_DEG). # However, they actually need to be set explicitly, # otherwise grid._coord_sys and grid._staggerloc will still be None. grid = ESMF.Grid( np.array(lon.shape), staggerloc=staggerloc, coord_sys=ESMF.CoordSys.SPH_DEG, num_peri_dims=num_peri_dims, ) # The grid object points to the underlying Fortran arrays in ESMF. # To modify lat/lon coordinates, need to get pointers to them lon_pointer = grid.get_coords(coord_dim=0, staggerloc=staggerloc) lat_pointer = grid.get_coords(coord_dim=1, staggerloc=staggerloc) # Use [...] to avoid overwritting the object. Only change array values. lon_pointer[...] = lon lat_pointer[...] = lat # Follows SCRIP convention where 1 is unmasked and 0 is masked. # See https://github.com/NCPP/ocgis/blob/61d88c60e9070215f28c1317221c2e074f8fb145/src/ocgis/regrid/base.py#L391-L404 if mask is not None: # remove fractional values mask = np.where(mask == 0, 0, 1) # convert array type to integer (ESMF compat) grid_mask = mask.astype(np.int32) if not (grid_mask.shape == lon.shape): raise ValueError( 'mask must have the same shape as the latitude/longitude' 'coordinates, got: mask.shape = %s, lon.shape = %s' % (mask.shape, lon.shape) ) grid.add_item(ESMF.GridItem.MASK, staggerloc=ESMF.StaggerLoc.CENTER, from_file=False) grid.mask[0][:] = grid_mask return grid
5,357,915
def magic_series(grid): """ Check if grid satisfies the definition series[k] == sum(series[i] == k) """ logging.debug("Grid:\n{}".format(grid)) magic = (grid.sum(1) == np.where(grid.T)[1]) logging.debug("Magic check:\n{}".format(magic)) return magic.all()
5,357,916
def convert_to_numeral(decimal_integer: int, roman_format="brackets"): """Convert decimal to Roman numeral. roman_format is a str containing either 'brackets' or 'latex' The default option, 'brackets', converts 3,000,000,000 to [[MMM]] and 3,000,000 to [MMM]. 'latex' outputs a LaTeX formula for the numeral. """ def barfunction_latex(prefix: str, unbarred_string: str, num_of_bars: int, separator_size: int = 2): """Return a LaTeX-renderable representation of overline bars.""" bars_before = (r"\overline{" * num_of_bars) + r"\text{" bars_after = r"}" + ("}" * num_of_bars) if prefix: separation = f"\\hspace{{{separator_size}pt}}" else: separation = "" return prefix + separation + bars_before + unbarred_string + bars_after def barfunction_brackets(prefix: str, unbarred_string: str, num_of_bars: int): """Represent bars as (possibly nested) square brackets. For example, 3,000,000,000 is converted to [[MMM]]. """ bars_before = ("[" * num_of_bars) bars_after = ("]" * num_of_bars) return prefix + bars_before + unbarred_string + bars_after def latex_surround_with_dollars(string): """Surround LaTeX math expression with dollar signs.""" return "$" + string + "$" def list_occurring_roman_symbols(roman_symbols, integer_value): """List symbols that occur in Roman representation of number. + roman_symbols is [(int, str)], a list of tuples, each of which representing one Roman symbol and its corresponding integer value. For example, (3, 'III'). + integer_value is the value to be converted. Return: remainder, list_of_occurring_symbols + remainder: what remains from the number, which was too small to represent with the provided symbols + list_of_occurring_symbols: a list of the symbols present in the Roman representation of the number. """ remainder = integer_value list_of_occurring_symbols = [] for integer_value, str_roman_symbol in roman_symbols: repetitions, remainder = divmod(remainder, integer_value) list_of_occurring_symbols.append(str_roman_symbol * repetitions) return remainder, list_of_occurring_symbols def apply_barfunction(list_of_occurring_symbols, barfunction, numeral_string, num_of_bars): """Build up Roman numeral representation applying barfunction. The barfunction is only applied if list_of_occurring_symbols is not empty, otherwise the original numeral_string is returned untouched. """ unbarred_string = "".join(list_of_occurring_symbols) if unbarred_string: numeral_string = barfunction(numeral_string, unbarred_string, num_of_bars) return numeral_string if roman_format == 'latex': barfunction = barfunction_latex elif roman_format == 'brackets': barfunction = barfunction_brackets else: raise ValueError('roman_format should be either "latex" or "brackets"') remainder = decimal_integer numeral_string = "" for symbolset in ROMAN_NUMERAL_TABLE: num_of_bars = symbolset["bars"] symbols = symbolset["symbols"] remainder, list_of_occurring_symbols = list_occurring_roman_symbols( symbols, remainder) numeral_string = apply_barfunction(list_of_occurring_symbols, barfunction, numeral_string, num_of_bars) if roman_format == 'latex': return latex_surround_with_dollars(numeral_string) return numeral_string
5,357,917
def transpose(x): """Tensor transpose """ return np.transpose(x)
5,357,918
def greedy_reduction_flat(m: Mat2) -> Optional[List[Tuple[int, int]]]: """Returns a list of tuples (r1,r2) that specify which row should be added to which other row in order to reduce one row of m to only contain a single 1. In contrast to :func:`greedy_reduction`, it preforms the brute-force search starting with the highest indices, and places the row operations in such a way that the resulting depth is log_2 of the number of rows that have to be added together. Used in :func:`lookahead_extract_base`""" indicest = find_minimal_sums(m, True) if indicest is None: return indicest return flat_indices(m, list(indicest))[0]
5,357,919
def validate_access_and_security_params(config): """ Checks the presence of access_and_security parameters """ logger.info("checking basic_authentication params") sec_params = config_utils.get_k8s_dict(config).get(consts.ACCESS_SEC_KEY) if consts.AUTH_KEY in sec_params: auth_key = sec_params[consts.AUTH_KEY] if (consts.BASIC_AUTH_KEY not in auth_key or consts.TOKEN_AUTH_KEY not in auth_key): raise ValidationException( "Atleast one out of basic_authentication or " "token_authentication must be present") else: return else: raise ValidationException("authentication is not present")
5,357,920
def estimate_psd(vec, num_segs=DEFAULT_NUM_SEGS, overlap=DEFAULT_OVERLAP, dt=DEFAULT_DT, tukey_alpha=DEFAULT_TUKEY_ALPHA, one_sided=True): """ estimates the PSD using a DFT divides vec into "num_segs" with a fractional overlap of "overlap" between neighbors returns the average PSD from these samples (arithmetic mean) if one_sided, returns the one-sided PSD. Otherwise, returns the two-sided PSD (one half the one-sided PSD). WARNING: your logic on how to split segments may be fragile... """ N = len(vec) if overlap > N - num_segs: raise ValueError, "overlap is too big!" n = N/(1. + (num_segs-1.)*(1.-overlap)) ### compute the number of entries per segment overlap = int(n*overlap) ### compute the number of overlapping entries n = int(n) seglen = dt*n ### compute dfts for each segment separately psds = np.empty((n/2, num_segs), complex) for segNo in xrange(num_segs): start = segNo*(n-overlap) psds[:,segNo], freqs = dft(vec[start:start+n]*tukey(n, tukey_alpha), dt=dt) ### average mean_psd = np.sum(psds.real**2 + psds.imag**2, axis=1) / (seglen*num_segs) if one_sided: mean_psd *= 2 ### multiply by 2 to account for the power at negative frequencies in the one-sided PSD return mean_psd, freqs
5,357,921
def load_data(connection_string: str): """ Load data from a source. Source could be: - A JSON File - A MongoDB Load data from a file --------------------- If you want to load data from a File, you must to provide this connection string: >>> connection_string = "/path/to/my/file.json" or using URI format: >>> connection_string = "file:///path/to/my/file.json" Load file from a MongoDB ------------------------ If you want to load data from a MongoDB database, you must to provide a connection string like: >>> connection_string = "mongodb://mongo.example.com:27017" Or event more complicate: >>> connection_string = "mongodb://db1.example.net,db2.example.net:2500/?replicaSet=test" :param connection_string: :type connection_string: :return: :rtype: """ assert isinstance(connection_string, str) if connection_string.startswith("mongodb://"): data = _load_from_mongo(connection_string) elif connection_string.startswith("file://"): data = _load_from_file(connection_string) else: data = _load_from_file("file://{}".format(connection_string)) # Load JSON info return APITest(**data)
5,357,922
def himmelblau(xy): """ Himmelblau's function, as a set of residuals (cost = sum(residuals**2)) The standard Himmelbau's function is with data as [11, 7], and four minimum at (3.0, 2.0), ~(-2.8, 3.1), ~(-3.8, -3.3), ~(3.6, -1.8). Himmelblau's function is a quadratic model in both x and y. Its data- space dimension (2) is equal to its model-space dimension (2), so there is only parameter-effect curvature. Parameters ---------- - xy : 2-element list-like The x,y parameters of the model. Returns ------- 2-element list-like The residuals of the model. Notes ------ https://en.wikipedia.org/wiki/Himmelblau%27s_function """ x, y = xy r1 = x*x + y r2 = y*y + x return np.array([r1, r2])
5,357,923
def parse_instrument_data(smoothie_response: str) -> Dict[str, bytearray]: """ Parse instrument data. Args: smoothie_response: A string containing a mount prefix (L or R) followed by : and a hex string. Returns: mapping of the mount prefix to the hex string. """ try: items = smoothie_response.split("\n")[0].strip().split(":") mount = items[0] if mount not in {"L", "R"}: raise ParseError( error_message=f"Invalid mount '{mount}'", parse_source=smoothie_response ) # data received from Smoothieware is stringified HEX values # because of how Smoothieware handles GCODE messages data = bytearray.fromhex(items[1]) except (ValueError, IndexError, TypeError, AttributeError): raise ParseError( error_message="Unexpected argument to parse_instrument_data", parse_source=smoothie_response, ) return {mount: data}
5,357,924
def count_frames(directory): """ counts the number of consecutive pickled frames in directory Args: directory: str of directory Returns: 0 for none, otherwise >0 """ for i in itertools.count(start=0): pickle_file = os.path.join(directory, f"{str(i).zfill(12)}.pickle") if not os.path.isfile(pickle_file): return i
5,357,925
def _volume_sum_check(props: PropsDict, sum_to=1, atol=1e-3) -> bool: """Check arrays all sum to no more than 1""" check_broadcastable(**props) sum_ar = np.zeros((1,)) for prop in props: sum_ar = sum_ar + props[prop] try: assert sum_ar.max() <= sum_to + atol except AssertionError: raise ValueError(f"Volume fractions for {props.keys()} sum to greater than one") return True
5,357,926
def list_partition_deltas( partition: Partition, include_manifest: bool = False, *args, **kwargs) -> ListResult[Delta]: """ Lists a page of deltas committed to the given partition. To conserve memory, the deltas returned do not include manifests by default. The manifests can either be optionally retrieved as part of this call or lazily loaded via subsequent calls to `get_delta_manifest`. """ raise NotImplementedError("list_partition_deltas not implemented")
5,357,927
def main(): """Entry point""" parser = argparse.ArgumentParser(description="Analyse Maven dependency trees") parser.add_argument("--files", required=False, default="target/dependency-reports/*-tree.txt", help="Maven dependency plugin output files to analyse") parser.add_argument("--mvnhome", required=False, default=os.path.expanduser("~/.m2"), help="Maven home directory to search for JAR files") args = parser.parse_args() first = True for tree_filename in glob.glob(args.files): if not first: print("\n") first = False print("Analyzing {0}:\n".format(tree_filename)) analyse_tree(tree_filename, args.mvnhome, sys.stdout)
5,357,928
def HLA_flag4and8_hunter_killer_OLD(photfilename): """This function searches through photometry catalogs for sources whose flags contain both bits 4 (multi-pixel saturation), and 8 (faint magnitude limit). If found, the subroutine removes the "8" bit value from the set of flags for that source. Parameters ---------- photfilename : string name of sourcelist to process Returns ------- nothing! """ # for flag_value in inf=open(photfilename) phot_lines=inf.readlines() inf.close() fout=open(photfilename,'w') conf_ctr=0 log.info("Searching {} for flag 4 + flag 8 conflicts....".format(photfilename)) for phot_line in phot_lines: phot_line=phot_line.strip() parse_pl=phot_line.split(',') x=parse_pl[0] if (x[0] == 'X'):out_line=phot_line else: flagval=int(parse_pl[-1]) if ((flagval & 4 >0) and (flagval & 8 >0)): conf_ctr+=1 parse_pl[-1]=str(int(parse_pl[-1])-8) out_line = "" for item in parse_pl:out_line=out_line+"{},".format(item) out_line=out_line[:-1] fout.write("%s\n"%(out_line)) fout.close() if conf_ctr == 0: log.info("No conflicts found.") if conf_ctr == 1: log.info("{} conflict fixed.".format(conf_ctr)) if conf_ctr > 1: log.info("{} conflicts fixed.".format(conf_ctr))
5,357,929
def get_management_confs_in_domain(body=None): # noqa: E501 """get management configuration items and expected values in the domain get management configuration items and expected values in the domain # noqa: E501 :param body: domain info :type body: dict | bytes :rtype: ConfFiles """ if connexion.request.is_json: body = DomainName.from_dict(connexion.request.get_json()) # noqa: E501 # Check whether the domain exists domain = body.domain_name # check the input domain checkRes = Format.domainCheck(domain) if not checkRes: num = 400 base_rsp = BaseResponse(num, "Failed to verify the input parameter, please check the input parameters.") return base_rsp, num isExist = Format.isDomainExist(domain) if not isExist: base_rsp = BaseResponse(400, "The current domain does not exist") return base_rsp, 400 # The parameters of the initial return value assignment expected_conf_lists = ConfFiles(domain_name = domain, conf_files = []) # get the path in domain domainPath = os.path.join(TARGETDIR, domain) # When there is a file path is the path of judgment for the configuration items for root, dirs, files in os.walk(domainPath): if len(files) > 0 and len(root.split('/')) > 3: if "hostRecord.txt" in files: continue for d_file in files: d_file_path = os.path.join(root, d_file) contents = Format.get_file_content_by_read(d_file_path) feature = os.path.join(root.split('/')[-1], d_file) yang_modules = YangModule() d_module = yang_modules.getModuleByFeature(feature) file_lists = yang_modules.getFilePathInModdule(yang_modules.module_list) file_path = file_lists.get(d_module.name()).split(":")[-1] conf = ConfFile(file_path = file_path, contents = contents) expected_conf_lists.conf_files.append(conf) print("expected_conf_lists is :{}".format(expected_conf_lists)) if len(expected_conf_lists.domain_name) > 0: base_rsp = BaseResponse(200, "Get management configuration items and expected " + "values in the domain succeccfully") else: base_rsp = BaseResponse(400, "The file is Null in this domain") return expected_conf_lists
5,357,930
def small_view(data, attribute): """ Extract a downsampled view from a dataset, for quick statistical summaries """ shp = data.shape view = tuple([slice(None, None, np.intp(max(s / 50, 1))) for s in shp]) return data[attribute, view]
5,357,931
def playerStandings(): """Returns a list of the players and their win records, sorted by wins. The first entry in the list should be the player in first place, or a player tied for first place if there is currently a tie. Returns: A list of tuples, each of which contains (id, name, wins, matches): id: the player's unique id (assigned by the database) name: the player's full name (as registered) wins: the number of matches the player has won matches: the number of matches the player has played """ ## connecting with db db = connect() ## creating a cursor object c = db.cursor() ## get the scores table from the matches table using the below sql query query = ''' SELECT wins_table.id, wins_table.team_name, wins_table.wins, wins_table.wins + loses_table.loses as total FROM (SELECT TEAMS.*, (SELECT COUNT(*) FROM MATCHES WHERE MATCHES.winner = TEAMS.id) AS WINS FROM TEAMS) as wins_table, (SELECT TEAMS.*, (SELECT COUNT(*) FROM MATCHES WHERE MATCHES.loser = TEAMS.id) AS LOSES FROM TEAMS) as loses_table WHERE wins_table.id = loses_table.id ORDER BY wins_table.wins desc; ''' ## execute the query c.execute(query) ## query result result = c.fetchall() ## closing the connection with the database db.close() return result
5,357,932
def get_all_feature_names(df: pd.DataFrame, target: str = None) -> list: """Get a list of all feature names in a dataframe. Args: df (pd.DataFrame): dataframe of features and target variable target (str): name of target column in df Returns: all_feature_names (list): list of all feature names """ # if using the main df if target in df.columns.tolist(): df = df.loc[ :, ~df.columns.isin([target])] all_feature_names = df.columns.tolist() # if using samples_df with true and predicted labels else: df = df.loc[ :, ~df.columns.isin( [ 'true_label', 'predicted_label' ] ) ] all_feature_names = df.columns.tolist() return all_feature_names
5,357,933
def skipUnlessAddressSanitizer(func): """Decorate the item to skip test unless Clang -fsanitize=thread is supported.""" def is_compiler_with_address_sanitizer(self): compiler_path = self.getCompiler() compiler = os.path.basename(compiler_path) f = tempfile.NamedTemporaryFile() if lldbplatformutil.getPlatform() == 'windows': return "ASAN tests not compatible with 'windows'" cmd = "echo 'int main() {}' | %s -x c -o %s -" % (compiler_path, f.name) if os.popen(cmd).close() is not None: return None # The compiler cannot compile at all, let's *not* skip the test cmd = "echo 'int main() {}' | %s -fsanitize=address -x c -o %s -" % (compiler_path, f.name) if os.popen(cmd).close() is not None: return "Compiler cannot compile with -fsanitize=address" return None return skipTestIfFn(is_compiler_with_address_sanitizer)(func)
5,357,934
def generate_enhancer_promoter_pair(ep_df): """ """ std_ep_pair = ep_df[['chrom-Enh','chromStart','chromEnd','TSS']] min_ep_gap = abs((std_ep_pair['chromEnd']-std_ep_pair['chromStart']).min()) max_ep_gap = abs((std_ep_pair['chromEnd']-std_ep_pair['chromStart']).max()) fake_samples = [] for enhancer in std_ep_pair[['chrom-Enh','chromStart','chromEnd']].values: for promoter in std_ep_pair['TSS'].values: gap = abs(enhancer[-1]-promoter) if gap>min_ep_gap and gap<max_ep_gap: current_sample = np.r_[enhancer, promoter] fake_samples.append(current_sample) fake_samples = random.sample(fake_samples, std_ep_pair.shape[0]) fake_ep_pair = pd.DataFrame(fake_samples, columns=['chrom-Enh','chromStart','chromEnd','TSS']) return std_ep_pair, fake_ep_pair
5,357,935
def constant_lrs( draw, return_kwargs: bool = False ) -> Union[ st.SearchStrategy[lr_scheduler_pb2.ConstantLR], st.SearchStrategy[Tuple[lr_scheduler_pb2.ConstantLR, Dict]], ]: """Returns a SearchStrategy for an ConstantLR plus maybe the kwargs.""" kwargs: Dict = {} # initialise and return all_fields_set(lr_scheduler_pb2.ConstantLR, kwargs) constant_lr = lr_scheduler_pb2.ConstantLR(**kwargs) if not return_kwargs: return constant_lr return constant_lr, kwargs
5,357,936
def get_read_only_storage_manager(): """Get the current Flask app's read only storage manager, create if necessary""" return current_app.config.setdefault('read_only_storage_manager', ReadOnlyStorageManager())
5,357,937
def change_base(x: int, base: int): """Change numerical base of input number x to base. return string representation after the conversion. base numbers are less than 10. >>> change_base(8, 3) '22' >>> change_base(8, 2) '1000' >>> change_base(7, 2) '111' Example solution: # line 1 ret = "" # line 2 while x > 0: # line 3 ret = str(x % base) + ret # line 4 x /= base # line 5 return ret """ # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4 # END OF CONTEXT print("4") # END OF SOLUTION
5,357,938
def __parse_sql(sql_rows): """ Parse sqlite3 databse output. Modify this function if you have a different database setup. Helper function for sql_get(). Parameters: sql_rows (str): output from SQL SELECT query. Returns: dict """ column_names = ['id', 'requester', 'item_name', 'custom_name', 'quantity', 'crafting_discipline', 'special_instruction', 'status', 'rarity', 'resource_provided', 'pub-date', 'crafter', 'stats'] request_dict = {str(row[0]): {column_names[i]: row[i] for i,_ in enumerate(column_names)} for row in sql_rows} return request_dict
5,357,939
def assert_almost_equal( actual: Tuple[numpy.float64, numpy.float64], desired: List[numpy.float64], decimal: int, err_msg: Literal["(0, 30)agresti_coull"], ): """ usage.statsmodels: 1 """ ...
5,357,940
def _generate_to(qubo, seed, oct_upper_bound, bias=0.5): """ Given a QUBO, an upper bound on oct, and a bias of bipartite vertices, generate an Erdos-Renyi graph such that oct_upper_bound number of vertices form an OCT set and the remaining vertices are partitioned into partites (left partite set with probability of "bias"). Edges between the partite sets are then removed. """ # Compute parameters needed for ER n = qubo.order() p = qubo.size() / scipy.special.binom(n, 2) # Generate graph graph = nx.erdos_renyi_graph(n=n, p=p, seed=seed) random.seed(seed) # Compute partite sets on the remaining vertices nodes = list(graph.nodes())[oct_upper_bound:] partite1 = set() partite2 = set() for node in nodes: if random.random() < bias: partite1.add(node) else: partite2.add(node) # Remove edges within a partite set for edge in chain(combinations(partite1, 2), combinations(partite2, 2)): if graph.has_edge(*edge): graph.remove_edge(*edge) # Name the graph graph.graph['name'] = '{}-{}-{}'.format(qubo.graph['name'], 'to', seed) # Sanitize the graph and return graph = reset_labels(graph) return graph
5,357,941
def sqlCreate(fields=None, extraFields=None, addCoastGuardFields=True, dbType='postgres'): """Return the sqlhelp object to create the table. @param fields: which fields to put in the create. Defaults to all. @param extraFields: A sequence of tuples containing (name,sql type) for additional fields @param addCoastGuardFields: Add the extra fields that come after the NMEA check some from the USCG N-AIS format @type addCoastGuardFields: bool @param dbType: Which flavor of database we are using so that the create is tailored ('sqlite' or 'postgres') @return: An object that can be used to generate a return @rtype: sqlhelp.create """ if fields is None: fields = fieldList c = sqlhelp.create('waterlevel',dbType=dbType) c.addPrimaryKey() if 'MessageID' in fields: c.addInt ('MessageID') if 'RepeatIndicator' in fields: c.addInt ('RepeatIndicator') if 'UserID' in fields: c.addInt ('UserID') if 'Spare' in fields: c.addInt ('Spare') if 'dac' in fields: c.addInt ('dac') if 'fid' in fields: c.addInt ('fid') if 'month' in fields: c.addInt ('month') if 'day' in fields: c.addInt ('day') if 'hour' in fields: c.addInt ('hour') if 'min' in fields: c.addInt ('min') if 'stationid' in fields: c.addVarChar('stationid',7) if 'waterlevel' in fields: c.addInt ('waterlevel') if 'datum' in fields: c.addInt ('datum') if 'sigma' in fields: c.addInt ('sigma') if 'source' in fields: c.addInt ('source') if addCoastGuardFields: # c.addInt('cg_s_rssi') # Relative signal strength indicator # c.addInt('cg_d_strength') # dBm receive strength # c.addVarChar('cg_x',10) # Idonno c.addInt('cg_t_arrival') # Receive timestamp from the AIS equipment 'T' c.addInt('cg_s_slotnum') # Slot received in c.addVarChar('cg_r',15) # Receiver station ID - should usually be an MMSI, but sometimes is a string c.addInt('cg_sec') # UTC seconds since the epoch c.addTimestamp('cg_timestamp') # UTC decoded cg_sec - not actually in the data stream return c
5,357,942
def _section_cohort_management(course, access): """ Provide data for the corresponding cohort management section """ course_key = course.id ccx_enabled = hasattr(course_key, 'ccx') section_data = { 'section_key': 'cohort_management', 'section_display_name': _('Cohorts'), 'access': access, 'ccx_is_enabled': ccx_enabled, 'course_cohort_settings_url': reverse( 'course_cohort_settings', kwargs={'course_key_string': str(course_key)} ), 'cohorts_url': reverse('cohorts', kwargs={'course_key_string': str(course_key)}), 'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': str(course_key)}), 'verified_track_cohorting_url': reverse( 'verified_track_cohorting', kwargs={'course_key_string': str(course_key)} ), } return section_data
5,357,943
def rivers_by_station_number(stations, N): """Returns a list of N tuples on the form (river name, number of stations on the river). These tuples are sorted in decreasing order of station numbers. If many stations have the same number of stations as the 'Nth' river, these are also included.""" riversList = stations_by_river(stations) #Get list of rivers to consider riverNumber = [] for River in riversList: riverNumber.append((River, len(riversList[River]))) #Get tuple of (river name, number of stations) riverNumber.sort(key= lambda x:x[1], reverse=True) #Sort into decreasing numerical order #This code is used to include any rivers with equal number of stations to the 'final' one being output. extraStations = 0 #search through next few rivers to see how many have the same number of stations for i in range(N, len(riverNumber)): if riverNumber[i][1] == riverNumber[N-1][1]: extraStations += 1 else: break #as items pre-sorted once the number is not equal can exit N += extraStations #adjust value of N return riverNumber[:N]
5,357,944
def load_default_data() -> dict[str, str]: """Finds and opens a .json file with streamer data. Reads from the file and assigns the data to streamer_list. Args: None Returns: A dict mapping keys (Twitch usernames) to their corresponding URLs. Each row is represented as a seperate streamer. For example: { "GMHikaru":"https://www.twitch.tv/GMHikaru" } """ with open("statum\static\streamers.json", "r") as default_streamers: streamer_list: dict[str, str] = json.load(default_streamers) default_streamers.close() return streamer_list
5,357,945
def get_value_key(generator, name): """ Return a key for the given generator and name pair. If name None, no key is generated. """ if name is not None: return f"{generator}+{name}" return None
5,357,946
def wav_to_log_spectrogram_clips(wav_file): """convert audio into logrithmic spectorgram, then chop it into 2d-segmentation of 100 frames""" # convert audio into spectorgram sound, sr = librosa.load(wav_file, sr=SR, mono=True) stft = librosa.stft(sound, n_fft=N_FFT, hop_length=HOP_LEN, win_length=WIN_LEN) mag, phase = librosa.magphase(stft) db_spectro = librosa.amplitude_to_db(mag) # chop magnitude of spectrogram into clips, each has 1025 bins, 100 frames db_spectro_clips = np.empty((0, FREQ_BINS, 100)) for i in range(math.floor(mag.shape[1] / 100)): db_spectro_clips = np.concatenate((db_spectro_clips, db_spectro[np.newaxis, :, i * 100: (i + 1) * 100])) return db_spectro_clips
5,357,947
def main(): """ Main entry point into the SeisFlows package """ # Easy way to convert strings to functions acceptable_args = {"submit": submit, "resume": resume, "clean": clean, "restart": restart, "debug": debug} try: main_arg = get_args().main_args[0] except IndexError: ok_args = list(acceptable_args.keys()) + SeisShows()._public_methods sys.exit(f"\n\tseisflows command requires an argument. " f"available arguments are:\n\t{ok_args}\n" f"\ttype 'seisflows -h' for a help message\n") if main_arg in acceptable_args.keys(): acceptable_args[main_arg]() else: SeisShows()
5,357,948
def get_title(mods): """ Function takes the objects MODS and extracts and returns the text of the title. """ title = mods.find("{{{0}}}titleInfo/{{{0}}}title".format(MODS_NS)) if title is not None: return title.text
5,357,949
def get_from_identity(session, key, passive): """Look up the given key in the given session's identity map, check the object for expired state if found. """ instance = session.identity_map.get(key) if instance is not None: state = attributes.instance_state(instance) # expired - ensure it still exists if state.expired: if not passive & attributes.SQL_OK: # TODO: no coverage here return attributes.PASSIVE_NO_RESULT elif not passive & attributes.RELATED_OBJECT_OK: # this mode is used within a flush and the instance's # expired state will be checked soon enough, if necessary return instance try: state._load_expired(state, passive) except orm_exc.ObjectDeletedError: session._remove_newly_deleted([state]) return None return instance else: return None
5,357,950
def sample_ingredient(user, name='Cinnamon'): """ Create and return a sample ingredient :param user: User(custom) object :param name: name of the ingredient :return: Ingredient object """ return Ingredient.objects.create(user=user, name=name)
5,357,951
def check_if_string(data): """ Takes a data as argument and checks if the provided argument is an instance of string or not Args: data: Data to check for. Returns: result: Returns a boolean if the data provided is instance or not """ if sys.version_info[0] == 2: return isinstance(data, basestring) else: return isinstance(data, str)
5,357,952
def viterbi(O,S,Y, pi, A, B): """Generates a path which is a sequence of most likely states that generates the given observation Y. Args: O (numpy.ndarray): observation space. Size: 1 X N S (numpy.ndarray): state space. Size: 1 X K Y (list): observation sequence. Size: 1 X T pi (numpy.ndarray): inial probablities. Size: 1 X K A (numpy.ndarray): transition matrix. Size: K X K B (numpy.ndarray): emission matrix Size: N X K Returns: list: list of most likely sequence of POS tags """ # Reference: https://en.wikipedia.org/wiki/Viterbi_algorithm#Pseudocode #************************************************************************** ## Example data for trial # input # O = np.arange(1,7) # observation space # uniq words # Size = 1 X N # S = np.asarray([0, 1, 2]) # State space # uniq POS tags # Size = 1 X K # Y = np.array([0, 2, 0, 2, 2, 1]).astype(np.int32) # Observation sequnece T # # Size = 1 X T # pi = np.array([0.6, 0.2, 0.2]) # Initial probablity # Size = 1 X K # A = np.array([[0.8, 0.1, 0.1], # [0.2, 0.7, 0.1], # [0.1, 0.3, 0.6]]) # transition matrix # Size = K X K # B = np.array([[0.7, 0.0, 0.3], # [0.1, 0.9, 0.0], # [0.0, 0.2, 0.8]]) # emission matrix # Size = K X N # print("O",O) # print("S",S) # print("pi",pi) # print("Y",Y) # print("A",A,'\n') # print("B",B) # output # X = [0, 0, 0, 2, 2, 1] # Most likely path/sequence #************************************************************************** N = len(O) K = len(S) T = len(Y) T1 = np.zeros(shape=(K,T)) T2 = np.zeros(shape=(K,T)) for i in range(K): T1[i,0] = pi[i] * B[i, Y[0]] T2[i,0] = 0 for j in range(1, T): for i in range(K): if Y[j] == -1: # Unkown word handling. Set B[i, Y[j]] = 1 for all tags if Y[j] == -1 # aka word not found in train set. next_prob = T1[:,j-1] * A[:, i] * 1 else: next_prob = T1[:,j-1] * A[:, i] * B[i, Y[j]] T1[i,j] = np.max(next_prob) T2[i,j] = np.argmax(next_prob) Z = [None] * T X = [None] * T # Backpointer Z[T-1] = np.argmax(T1[:,T-1]) X[T-1] = S[Z[T-1]] for j in reversed(range(1, T)): Z[j-1] = T2[int(Z[j]),j] X[j-1] = S[int(Z[j-1])] return X
5,357,953
def int_to_bigint(value): """Convert integers larger than 64 bits to bytearray Smaller integers are left alone """ if value.bit_length() > 63: return value.to_bytes((value.bit_length() + 9) // 8, 'little', signed=True) return value
5,357,954
def test_contact(client): """ Test the contact. """ # all fields are required: rv = client.post("/contact", data=dict(name="user")) assert b"Something fishy in the request" in rv.data # email address must be `valid`: rv = client.post( "/contact", data=dict( name="", email="user@testcom", message="", captcha="", browser_time="", win_res="", newsletter_subscription=0, privacy_policy=True, ), ) assert b"Email required" in rv.data # a message must be written: rv = client.post( "/contact", data=dict( name="", email="[email protected]", message="", captcha="", browser_time="", win_res="", newsletter_subscription=0, privacy_policy=True, ), ) assert b"Please write something" in rv.data # all good now: rv = client.post( "/contact", data=dict( name="", email="us [email protected]", # whitespaces should be removed message="Once upon a time...", captcha="", # not needed in testing mode browser_time="", # could be anything (should not be trusted) win_res="", # could be anything (should not be trusted) newsletter_subscription=0, privacy_policy=True, ), ) assert b"Thanks a lot for your message" in rv.data assert b"thanks for subscribing to the newsletter" not in rv.data # cannot send an other message for a while: rv = client.post( "/contact", data=dict( name="", email="[email protected]", message="Once upon a time...", captcha="", browser_time="", win_res="", newsletter_subscription=0, privacy_policy=True, ), ) assert b"Please wait before sending me another message" in rv.data time.sleep(2) # can send again and try this time with a specified name: with client: name = "Mozart" rv = client.post( "/contact", data=dict( name=name, email="[email protected]", message="Once upon a time...", captcha="", browser_time="", win_res="", newsletter_subscription=1, # subscribe privacy_policy=True, ), ) assert ("Thank you " + name).encode("utf-8") in rv.data # check the subscription as well: assert b"See you soon" in rv.data cursor = get_db().cursor() cursor.execute("SELECT username FROM members WHERE email='[email protected]'") data = cursor.fetchone() assert cursor.rowcount == 1 assert data[0] == name time.sleep(2) # send again with a custom subject: data = dict( name=name, email="[email protected]", message="I forgot to tell you that...", captcha="", browser_time="", win_res="", newsletter_subscription=1, # subscribe privacy_policy=True, ) data["subjects[]"] = ["subject-is-hello", "subject-is-fine-art-print-enquiry"] rv = client.post("/contact", data=data) assert ("Thank you " + name).encode("utf-8") in rv.data # already subscribed but the feedback is the same to avoid guessing the email address book assert b"See you soon" not in rv.data
5,357,955
def xcorr(S, dtmax=10): """ Cross correlate each pair of columns in S at offsets up to dtmax """ # import pdb; pdb.set_trace() (T,N) = S.shape H = np.zeros((N,N,dtmax)) # Compute cross correlation at each time offset for dt in np.arange(dtmax): # print "Computing cross correlation at offset %d" % dt # Compute correlation in sections to conserve memory chunksz = 16 for n1 in np.arange(N, step=chunksz): for n2 in np.arange(N, step=chunksz): n1c = min(n1 + chunksz, N) n2c = min(n2 + chunksz, N) # Corr coef is a bit funky. We want the upper right quadrant # of this matrix. The result is ((n1c-n1)+(n2c-n2)) x ((n1c-n1)+(n2c-n2)) H[n1:n1c, n2:n2c, dt] = np.corrcoef(S[:T-dt, n1:n1c].T, S[dt:, n2:n2c].T)[:(n1c-n1),(n1c-n1):] # Set diagonal to zero at zero offset (obviously perfectly correlated) if dt == 0: H[:,:,0] = H[:,:,0]-np.diag(np.diag(H[:,:,0])) return H
5,357,956
def register_hooks(): """Exec all the rules files. Gather the hooks from them and load them into the hook dict for later use. """ global HOOKS_LOADED for name, path in load_rules().items(): globals = {} with open(path) as f: exec(compile(f.read(), path, 'exec'), globals) DESCRIPTIONS[name] = globals['__doc__'] for hook_name in HOOKS.keys(): if hook_name in globals: HOOKS[hook_name].append(globals[hook_name]) HOOKS_LOADED = True return HOOKS
5,357,957
def jboss_status(jboss_cli_home, server_ip, jboss_admin_port, jboss_admin, jboss_admin_pwd, timeout='60000'): """ | ##@函数目的: Jboss状态 | ##@参数说明: | ##@返回值: | ##@函数逻辑: | ##@开发人:jhuang | ##@时间: """ time_start = time.time() jboss_cli = 'jboss-cli.sh' if jboss_cli_home[-1] != '/': jboss_cli_home = jboss_cli_home + '/' ret = exec_shell( 'sh %sbin/%s --connect --controller=%s:%s --user=%s --password=%s --command="deployment-info" --timeout=%s' % ( jboss_cli_home, jboss_cli, server_ip, jboss_admin_port, jboss_admin, jboss_admin_pwd, timeout)) logger.debug('获取Jboss状态用时:%s' % (time.time() - time_start)) return ret
5,357,958
def proxy_rotator(): """Return a cycle object of proxy dict""" return Proxy.get_proxy_rotator()
5,357,959
def xpdacq_list_grid_scan(detectors: list, *args, snake_axes: typing.Union[bool, typing.Iterable[bool], None] = None, per_step: typing.Callable = xpdacq_per_step, md: typing.Union[dict, None] = None) -> typing.Generator: """ Scan over a mesh; each motor is on an independent trajectory. Parameters ---------- detectors: list list of 'readable' objects args: list patterned like (``motor1, position_list1,`` ``motor2, position_list2,`` ``motor3, position_list3,`` ``...,`` ``motorN, position_listN``) The first motor is the "slowest", the outer loop. ``position_list``'s are lists of positions, all lists must have the same length. Motors can be any 'settable' object (motor, temp controller, etc.). snake_axes: boolean or iterable, optional which axes should be snaked, either ``False`` (do not snake any axes), ``True`` (snake all axes) or a list of axes to snake. "Snaking" an axis is defined as following snake-like, winding trajectory instead of a simple left-to-right trajectory.The elements of the list are motors that are listed in `args`. The list must not contain the slowest (first) motor, since it can't be snaked. per_step: callable, optional hook for customizing action of inner loop (messages per step). See docstring of :func:`bluesky.plan_stubs.one_nd_step` (the default) for details. md: dict, optional metadata """ yield from bp.list_grid_scan(detectors, *args, snake_axes=snake_axes, per_step=per_step, md=md)
5,357,960
def pow(a, b): """ Return an attribute that represents a ^ b. """ return multiplyDivide(a, b, MultiplyDivideOperation.POWER)
5,357,961
async def send_simple_embed_to_channel(bot: commands.Bot, channel_name: str, message: str, color: str = config["colors"]["default"]) -> discord.Message: """Send a simple embed message to the channel with the given name in the given guild, using the given message and an optional colour. Args: bot (commands.Bot): The bot containing the guild with the channel to send the message to. channel_name (int): The name of the channel to send the message to. message (str): The contents of the message color (str, optional): The colour that will be used in the embed. Defaults to config["colors"]["default"]. Returns: discord.Message: The embed message that was sent. """ guild: discord.Guild = bot_util.get_guild(bot, config["guild-id"]) channel: discord.TextChannel = guild_util.get_channel_by_name(guild, channel_name) return await channel.send(embed = discord.Embed(description = message, color = int(color, 0)))
5,357,962
def retry_on_server_errors_timeout_or_quota_issues_filter(exception): """Retry on server, timeout and 403 errors. 403 errors can be accessDenied, billingNotEnabled, and also quotaExceeded, rateLimitExceeded.""" if HttpError is not None and isinstance(exception, HttpError): if exception.status_code == 403: return True return retry_on_server_errors_and_timeout_filter(exception)
5,357,963
def load_all_data(data_path): """Load all mode data.""" image_list = [] for cam in os.listdir(data_path): image_dir = os.path.join(data_path, cam, 'dets') cam_image_list = glob(image_dir+'/*.png') cam_image_list = sorted(cam_image_list) print(f'{len(cam_image_list)} images for {cam}') image_list += cam_image_list print(f'{len(image_list)} images in total') return image_list
5,357,964
def cmd_line_parser(): """ This function parses the command line parameters and arguments """ parser = argparse.ArgumentParser(usage="python " + sys.argv[0] + " [-h] [passive/active] -d [Domain] [Options]", epilog='\tExample: \r\npython ' + sys.argv[0] + " passive -d baidu.com -o html") parser._optionals.title = "OPTIONS" parser._positionals.title = "POSITION OPTIONS" parser.add_argument("scan_model", type=str, help="active or passive") # active part active = parser.add_argument_group("active", "active scan configuration options") active.add_argument("-x", "--xxxxx", dest="load_config_file", default=False, action="store_true", help="xxxxxxxxxxxx") # passive part passive = parser.add_argument_group("passive", "passive scan configuration options") passive.add_argument("-w", "--word-list", default=False, help="Custom brute force dictionary path") # other parser.add_argument("-d", "--domain", dest="domain", default=False, help="Target to scan") parser.add_argument("-m", "--multi-domain", dest="domains_file", default=False, help="Multi Target to scan") parser.add_argument("-o", "--format", default=False, help="The format of the output file") if len(sys.argv) == 1: sys.argv.append("-h") return parser.parse_args()
5,357,965
def CSourceForElfSymbolTable(variable_prefix, names, str_offsets): """Generate C source definition for an ELF symbol table. Args: variable_prefix: variable name prefix names: List of symbol names. str_offsets: List of symbol name offsets in string table. Returns: String containing C source fragment. """ out = ( r'''// NOTE: ELF32_Sym and ELF64_Sym have very different layout. #if UINTPTR_MAX == UINT32_MAX // ELF32_Sym # define DEFINE_ELF_SYMBOL(name, name_offset, address, size) \ { (name_offset), (address), (size), ELF_ST_INFO(STB_GLOBAL, STT_FUNC), \ 0 /* other */, 1 /* shndx */ }, #else // ELF64_Sym # define DEFINE_ELF_SYMBOL(name, name_offset, address, size) \ { (name_offset), ELF_ST_INFO(STB_GLOBAL, STT_FUNC), \ 0 /* other */, 1 /* shndx */, (address), (size) }, #endif // !ELF64_Sym ''') out += 'static const ELF::Sym k%sSymbolTable[] = {\n' % variable_prefix out += ' { 0 }, // ST_UNDEF\n' out += ' LIST_ELF_SYMBOLS_%s(DEFINE_ELF_SYMBOL)\n' % variable_prefix out += '};\n' out += '#undef DEFINE_ELF_SYMBOL\n' return out
5,357,966
def _kahan_reduction(x, y): """Implements the Kahan summation reduction.""" (s, c), (s1, c1) = x, y for val in -c1, s1: u = val - c t = s + u # TODO(b/173158845): XLA:CPU reassociates-to-zero the correction term. c = (t - s) - u s = t return s, c
5,357,967
def get_endpoint_access(endpoint_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEndpointAccessResult: """ Resource schema for a Redshift-managed VPC endpoint. :param str endpoint_name: The name of the endpoint. """ __args__ = dict() __args__['endpointName'] = endpoint_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('aws-native:redshift:getEndpointAccess', __args__, opts=opts, typ=GetEndpointAccessResult).value return AwaitableGetEndpointAccessResult( address=__ret__.address, endpoint_create_time=__ret__.endpoint_create_time, endpoint_status=__ret__.endpoint_status, port=__ret__.port, vpc_endpoint=__ret__.vpc_endpoint, vpc_security_group_ids=__ret__.vpc_security_group_ids, vpc_security_groups=__ret__.vpc_security_groups)
5,357,968
def wait_until_active(tol=5): """ When the system is in inactive mode, this process signals the beginning od the user activity Parameters ---------- tol : TYPE, optional DESCRIPTION. The default is 5. Returns ------- None. """ liinfo = LASTINPUTINFO() liinfo.cbSize = ctypes.sizeof(liinfo) lasttime = None delay = 1 maxdelay = int(tol * 1000) while True: GetLastInputInfo(ctypes.byref(liinfo)) if lasttime is None: lasttime = liinfo.dwTime if lasttime != liinfo.dwTime: break delay = min(2 * delay, maxdelay) Sleep(delay)
5,357,969
def predictor( service: MLFlowDeploymentService, data: np.ndarray, ) -> Output(predictions=np.ndarray): """Run a inference request against a prediction service""" service.start(timeout=10) # should be a NOP if already started prediction = service.predict(data) prediction = prediction.argmax(axis=-1) return prediction
5,357,970
def test_datasets_provenance_after_remove( runner, client, directory_tree, load_dataset_with_injection, get_datasets_provenance_with_injection ): """Test datasets provenance is updated after removing a dataset.""" assert 0 == runner.invoke(cli, ["dataset", "add", "my-data", "-c", str(directory_tree)]).exit_code dataset = load_dataset_with_injection("my-data", client) assert 0 == runner.invoke(cli, ["dataset", "rm", "my-data"]).exit_code with get_datasets_provenance_with_injection(client) as datasets_provenance: current_version = datasets_provenance.get_by_name("my-data") provenance = datasets_provenance.get_provenance() assert current_version is None # NOTE: We only keep the tail of provenance chain for each dataset in the provenance assert 1 == len(provenance) last_version = next(d for d in provenance) assert last_version.is_removed() is True assert_dataset_is_mutated(old=dataset, new=last_version)
5,357,971
def test_block_template_with_missing_template(app_with_mail): """Test that a missing template raises an exception.""" with app_with_mail.app_context(): with pytest.raises(TemplateNotFound): BlockTemplatedMessage("missing.html")
5,357,972
def get_domain(ns, domain): """ Return LMIInstance of given LMI_SSSDDomain. :type domain: string :param domain: Name of the domain to find. :rtype: LMIInstance of LMI_SSSDDomain """ keys = {'Name': domain} try: inst = ns.LMI_SSSDDomain.new_instance_name(keys).to_instance() except wbem.CIMError, err: if err[0] == wbem.CIM_ERR_NOT_FOUND: raise LmiFailed("Cannot find the domain: %s" % domain) raise return inst
5,357,973
def mmd_loss(embedding, auxiliary_labels, weights_pos, weights_neg, params): """ Computes mmd loss, weighted or unweighted """ if weights_pos is None: return mmd_loss_unweighted(embedding, auxiliary_labels, params) return mmd_loss_weighted(embedding, auxiliary_labels, weights_pos, weights_neg, params)
5,357,974
def perdidas (n_r,n_inv,n_x,**kwargs): """Calcula las perdidas por equipos""" n_t=n_r*n_inv*n_x for kwargs in kwargs: n_t=n_t*kwargs return n_t
5,357,975
def _symlink_dep_cmd(lib, deps_dir, in_runfiles): """ Helper function to construct a command for symlinking a library into the deps directory. """ lib_path = lib.short_path if in_runfiles else lib.path return ( "ln -sf " + relative_path(deps_dir, lib_path) + " " + deps_dir + "/" + lib.basename + "\n" )
5,357,976
def boundingBoxEdgeLengths(domain): """ Returns the edge lengths of the bounding box of a domain :param domain: a domain :type domain: `escript.Domain` :rtype: ``list`` of ``float`` """ return [ v[1]-v[0] for v in boundingBox(domain) ]
5,357,977
def get_user(isamAppliance, user): """ Get permitted features for user NOTE: Getting an unexplained error for this function, URL maybe wrong """ return isamAppliance.invoke_get("Get permitted features for user", "/authorization/features/users/{0}/v1".format(user))
5,357,978
def autoupdate( config_file: str, store: Store, tags_only: bool, freeze: bool, repos: Sequence[str] = (), add_unused_hooks: bool = False, ) -> int: """Auto-update the pre-commit config to the latest versions of repos.""" migrate_config(config_file, quiet=True) retv = 0 rev_infos: List[Optional[RevInfo]] = [] changed = False config = load_config(config_file) for repo_config in config['repos']: if repo_config['repo'] in {LOCAL, META}: continue info = RevInfo.from_config(repo_config) if repos and info.repo not in repos: rev_infos.append(None) continue output.write(f'Updating {info.repo} ... ') new_info = info.update(tags_only=tags_only, freeze=freeze) try: _check_hooks_still_exist_at_rev(repo_config, new_info, store) except RepositoryCannotBeUpdatedError as error: output.write_line(error.args[0]) rev_infos.append(None) retv = 1 continue if new_info.rev != info.rev: changed = True if new_info.frozen: updated_to = f'{new_info.frozen} (frozen)' else: updated_to = new_info.rev msg = f'updating {info.rev} -> {updated_to}.' output.write_line(msg) rev_infos.append(new_info) else: output.write_line('already up to date.') rev_infos.append(None) if add_unused_hooks: unused_hooks = _get_unused_hooks(repo_config, new_info, store) if unused_hooks: changed = True for unused_hook in unused_hooks: repo_config['hooks'].append({'id': unused_hook}) if changed: _write_new_config(config_file, rev_infos) return retv
5,357,979
def plot_energy_resolution_cta_performance(cta_site, ax=None, **kwargs): """ Plot the cta performances (June 2018) for the true_energy resolution Parameters ---------- cta_site: string see `ctaplot.ana.cta_performance` ax: `matplotlib.pyplot.axes` kwargs: args for `matplotlib.pyplot.plot` Returns ------- ax: `matplotlib.pyplot.axes` """ ax = plt.gca() if ax is None else ax cta_req = ana.cta_performance(cta_site) e_cta, ar_cta = cta_req.get_energy_resolution() kwargs.setdefault('label', "CTA performance {}".format(cta_site)) ax.set_ylabel(r"$(\Delta energy/energy)_{68}$") ax.set_xlabel(rf'$E_R$ [{e_cta.unit.to_string("latex")}]') with quantity_support(): ax.plot(e_cta, ar_cta, **kwargs) ax.set_xscale('log') ax.grid(True, which='both') ax.legend() return ax
5,357,980
def _n_nested_blocked_random_indices(sizes, n_iterations): """ Returns indices to randomly resample blocks of an array (with replacement) in a nested manner many times. Here, "nested" resampling means to randomly resample the first dimension, then for each randomly sampled element along that dimension, randomly resample the second dimension, then for each randomly sampled element along that dimension, randomly resample the third dimension etc. Parameters ---------- sizes : OrderedDict Dictionary with {names: (sizes, blocks)} of the dimensions to resample n_iterations : int The number of times to repeat the random resampling """ shape = [s[0] for s in sizes.values()] indices = OrderedDict() for ax, (key, (_, block)) in enumerate(sizes.items()): indices[key] = _get_blocked_random_indices( shape[: ax + 1] + [n_iterations], ax, block ) return indices
5,357,981
def check_if_folder_is_empty_true(mocker): """ Test must NOT call 'os.mkdir' :param mocker: :return: """ mock_os = Mock() mocker.patch('src.io.utils.os', mock_os) mock_os.path.listdir.return_value = [] folder_path = 'bla/bla1/bla2/bla3' is_empty = check_if_folder_is_empty(folder_path=folder_path) is_empty_expected = True assert is_empty == is_empty_expected
5,357,982
def phase_type_from_parallel_erlang2(theta1, theta2, n1, n2): """Returns initial probabilities :math:`\\alpha` and generator matrix :math:`S` for a phase-type representation of two parallel Erlang channels with parametrisation :math:`(\\theta_1, n_1)` and :math:`(\\theta_2, n_2)` (rate and steps of Erlang channels). `Note`: To obtain a phase-type density pass the results of this method into the method `utils.phase_type_pdf`. `Note`: The two Erlang channels split at the first substep into each channel. The parametrisation implies the rate :math:`n\\cdot\\theta` on the individual exponentially-distributed substeps for the respective channel. Parameters ---------- theta1 : float Rate parameter of the first complete Erlang channel (inverse of the mean Erlang waiting time). theta2 : float Rate parameter of the second complete Erlang channel (inverse of the mean Erlang waiting time). n1 : int or float Number of steps of the first Erlang channel (shape parameter). n2 : int or float Number of steps of the second Erlang channel (shape parameter). Returns ------- alpha : 1d numpy.ndarray The initial probability vector of the phase-type distribution (with shape `(1,m)` where :math:`m=n_1+n_2-1`). S : 2d numpy.ndarray The transient generator matrix of the phase-type distribution (with shape `(m,m)` where :math:`m=n_1+n_2-1`). """ ### self-written, copied from env_PHdensity notebook ### butools can then be used to get density and network image with: ### 1) pdf = ph.PdfFromPH(a, A, x) ### 2) ph.ImageFromPH(a, A, 'display') # some checks for theta in (theta1, theta2): if not isinstance(theta, float): raise ValueError('Float expected for theta.') for n in (n1, n2): if isinstance(n, int): pass elif isinstance(n, float) and n.is_integer(): pass else: raise ValueError('Integer number expected for n.') if n<1: raise ValueError('Steps n expected to be 1 or more.') # preallocate initial probs and subgenerator matrix alpha = np.zeros((1, int(n1 + n2)-1)) S = np.zeros((int(n1 + n2)-1, int(n1 + n2)-1)) # first index sets source alpha[0, 0] = 1.0 # substep rates r1 = n1 * theta1 r2 = n2 * theta2 # outflux from source # (from competing channels) S[0, 0] = -(r1+r2) # fill matrix (first channel) l = [0] + list(range(1, int(n1))) for i, inext in zip(l[0:-1], l[1:]): S[i, inext] = r1 S[inext, inext] = -r1 # fill matrix (second channel) l = [0] + list(range(int(n1), int(n1+n2)-1)) for i, inext in zip(l[0:-1], l[1:]): S[i, inext] = r2 S[inext, inext] = -r2 return alpha, S
5,357,983
def get_regions(max_time_value): """ Partition R into a finite collection of one-dimensional regions depending on the appearing max time value. """ regions = [] bound = 2 * max_time_value + 1 for i in range(0, bound + 1): if i % 2 == 0: temp = i // 2 r = Constraint('[' + str(temp) + ',' + str(temp) + ']') regions.append(r) else: temp = (i - 1) // 2 if temp < max_time_value: r = Constraint('(' + str(temp) + ',' + str(temp + 1) + ')') regions.append(r) else: r = Constraint('(' + str(temp) + ',' + '+' + ')') regions.append(r) return regions
5,357,984
def test_input_wsp(): """ Test putting constructor attributes in a default sub workspaces """ wsp = Workspace(input_wsp="cakes", flapjack=4, fruit=3, defaults=[]) assert(wsp.cakes is not None) assert(wsp.cakes.flapjack == 4) assert(wsp.cakes.fruit == 3) assert(wsp.flapjack is None) assert(wsp.fruit is None)
5,357,985
def write_nb(nb_node, nb_filename): """Rewrites notebook.""" nbformat.write(nb_node, nb_filename)
5,357,986
def label_edges(g: nx.DiGraph) -> nx.DiGraph: """Label all the edges automatically. Args: g: the original directed graph. Raises: Exception: when some edge already has attribute "label_". Returns: The original directed graph with all edges labelled. """ g_labelled = nx.DiGraph(g) i = 1 for edge in g_labelled.edges.data(): if _ATTR_LABEL in edge[2]: raise Exception( f"The edge {edge[0]}-{edge[1]} already has the {_ATTR_LABEL} attribute." ) else: edge[2][_ATTR_LABEL] = f"e{i}" i += 1 return g_labelled
5,357,987
def report_charts(request, report, casetype='Call'): """Return charts for the last 4 days based on the Call Summary Data""" # The ussual filters. query = request.GET.get('q', '') interval = request.GET.get('interval', 'daily') category = request.GET.get('category', '') if report == 'categorysummary': y_axis = 'category' elif report == 'dailysummary': y_axis = 'daily' else: y_axis = request.GET.get('y_axis', '') datetime_range = request.GET.get("datetime_range") agent = request.GET.get("agent") form = ReportFilterForm(request.GET) # Update the search url to chart based views. search_url = reverse('report_charts', kwargs={'report': report}) # Convert date range string to datetime object if datetime_range: try: a, b = [datetime_range.split(" - ")[0], datetime_range.split(" - ")[1]] from_date = datetime.strptime(a, '%m/%d/%Y %I:%M %p') to_date = datetime.strptime(b, '%m/%d/%Y %I:%M %p') current = from_date delta = to_date - from_date date_list = [] if interval == 'hourly': for i in range(int(delta.total_seconds()//3600)): date_list.append(from_date + timedelta(seconds=i*3600)) elif interval == 'monthly': while current <= to_date: current += relativedelta(months=1) date_list.append(current) elif interval == 'weekly': while current <= to_date: current += relativedelta(weeks=1) date_list.append(current) else: while current <= to_date: current += relativedelta(days=1) date_list.append(current) epoch_list = [date_item.strftime('%m/%d/%Y %I:%M %p') for date_item in date_list] # Add filter to ajax query string. except Exception as e: from_date = None to_date = None else: from_date = None to_date = None # Start date base = datetime.today() date_list = [base - timedelta(days=x) for x in range(0, 3)] epoch_list = [date_item.strftime('%m/%d/%Y %I:%M %p') for date_item in date_list] epoch_list.reverse() e = None datetime_ranges = pairwise(epoch_list) callsummary_data = [] total_calls = 0 for datetime_range in datetime_ranges: # Date time list returns desending. We want assending. datetime_range_string = " - ".join(datetime_range) if y_axis == 'category': categories = [i[0] for i in Category.objects.values_list('hl_category').distinct()] for category in categories: report_data = report_factory(report='chartreport', datetime_range=datetime_range_string, agent=agent, query=query, category=category, casetype=casetype) # Append data to tables list. callsummary_data.append(report_data) total_calls = total_calls + report_data.get('total_offered').get('count') else: report_data = report_factory(report='chartreport', datetime_range=datetime_range_string, agent=agent, query=query, category=category, casetype=casetype) # Append data to tables list. callsummary_data.append(report_data) total_calls = total_calls + report_data.get('total_offered').get('count') # Multibar chart page. if y_axis != 'daily': summary_table = CallSummaryTable(callsummary_data) tooltip_date = "%d %b %Y %H:%M:%S %p" extra_serie = {"tooltip": {"y_start": "There are ", "y_end": " calls"}, "date_format": tooltip_date} if y_axis == 'category': categories = [i[0] for i in Category.objects.values_list('hl_category').distinct()] chartdata = { 'x': epoch_list, } for i in range(len(categories)): chartdata['name%s' % str(i+1)] = categories[i] category_related = [] for data in callsummary_data: if data.get('category') == categories[i]: category_related.append(data) chartdata['y%s' % str(i+1)] = [d.get('total_offered').get('count') for d in category_related] chartdata['extra%s' % str(i+1)] = extra_serie elif y_axis == 'daily': daysummary_data = [] month_names = [] day_names = list(calendar.day_name) chartdata = {} day_related = {} for day_name in day_names: day_related[day_name] = [] for i in range(len(day_names)): day_summary = {} chartdata['name%s' % str(i+1)] = day_names[i] day_total_offered = 0 month_name = 'None' for data in callsummary_data: if data.get('day') == day_names[i]: day_related[day_names[i]].append(data) day_total_offered = day_total_offered + data.get('total_offered').get('count') day_related[day_names[i]][-1]['day_total_offered'] = day_total_offered month_name = data.get('month') day_summary['month'] = month_name month_names.append(month_name) day_summary['%s' % (day_names[i].lower())] = day_total_offered chartdata['y%s' % str(i+1)] = [d.get('day_total_offered') for d in day_related[day_names[i]]] chartdata['extra%s' % str(i+1)] = extra_serie chartdata['x'] = month_names daysummary_data.append(day_summary) else: ydata = [d.get('total_offered').get('count') for d in callsummary_data] ydata2 = [d.get('total_answered') for d in callsummary_data] ydata3 = [d.get('total_abandoned') for d in callsummary_data] chartdata = { 'x': epoch_list, 'name1': 'Total Offered', 'y1': ydata, 'extra1': extra_serie, 'name2': 'Total Answered', 'y2': ydata2, 'extra2': extra_serie, 'name3': 'Total Abandoned', 'y3': ydata3, 'extra3': extra_serie, } charttype = "multiBarChart" chartcontainer = 'multibarchart_container' # container name if y_axis == 'daily': summary_table = DaySummaryTable(daysummary_data) export_format = request.GET.get('_export', None) if TableExport.is_valid_format(export_format): exporter = TableExport(export_format, summary_table) return exporter.response('table.{}'.format(export_format)) data = { 'title': 'callsummary', 'form': form, 'summary_table': summary_table, 'datetime_ranges_number': len(datetime_ranges), 'error': e, 'y_axis': y_axis, 'search_url': search_url, 'total_calls': total_calls, 'charttype': charttype, 'casetype': casetype, 'chartdata': chartdata, 'chartcontainer': chartcontainer, 'extra': { 'name': 'Call data', 'x_is_date': False, 'x_axis_format': '', 'tag_script_js': True, 'jquery_on_ready': True, }, } if report == 'ajax': return render(request, 'helpline/report_charts_factory.html', data) else: return render(request, 'helpline/report_charts.html', data)
5,357,988
def test_nslookup(): """ Test if it query DNS for information about a domain or ip address """ ret = ( "Server: ct-dc-3-2.cybage.com\n" "Address: 172.27.172.12\n" "Non-authoritative answer:\n" "Name: google.com\n" "Addresses: 2404:6800:4007:806::200e\n" "216.58.196.110\n" ) mock = MagicMock(return_value=ret) with patch.dict(win_network.__salt__, {"cmd.run": mock}): assert win_network.nslookup("google.com") == [ {"Server": "ct-dc-3-2.cybage.com"}, {"Address": "172.27.172.12"}, {"Name": "google.com"}, {"Addresses": ["2404:6800:4007:806::200e", "216.58.196.110"]}, ]
5,357,989
def print_table(my_dict: dict, col_list: list=None): """ Pretty print a list of dictionaries as a dynamically sized table. :param my_dict: The dictionary or list of dictionaries to print :type my_dict: dict :param col_list: The list of columns to include that correspond to keys in the dictionaryies :type col_list: list """ # Dervied from https://stackoverflow.com/a/40389411/314051 # Author: Thierry Husson - Use it as you want but don't blame me. if not col_list: col_list = list(my_dict[0].keys() if my_dict else []) my_list = [col_list] # 1st row = header for item in my_dict: my_list.append([str(item[col] if item[col] is not None else '') for col in col_list]) col_size = [max(map(len,col)) for col in zip(*my_list)] format_str = ' | '.join(["{{:<{}}}".format(i) for i in col_size]) my_list.insert(1, ['-' * i for i in col_size]) # Seperating line for item in my_list: print(format_str.format(*item))
5,357,990
def test_backend_write_digital_state() -> None: """Test that we can write the digital state of a pin.""" backend = SBArduinoHardwareBackend("COM0", SBArduinoSerial) serial = cast(SBArduinoSerial, backend._serial) serial.check_data_sent_by_constructor() # This should put the pin into the most recent (or default) output state. backend.set_gpio_pin_mode(2, GPIOPinMode.DIGITAL_OUTPUT) serial.check_sent_data(b"W 2 L\n") backend.write_gpio_pin_digital_state(2, True) serial.check_sent_data(b"W 2 H\n") backend.write_gpio_pin_digital_state(2, False) serial.check_sent_data(b"W 2 L\n") serial.check_all_received_data_consumed()
5,357,991
def _get_optimizer(learning_rate: float, gradient_clip_norm: float): """Gets model optimizer.""" kwargs = {'clipnorm': gradient_clip_norm} if gradient_clip_norm > 0 else {} return tf.keras.optimizers.Adagrad(learning_rate, **kwargs)
5,357,992
def package_context(target, action='install'): """ A context for installing the build dependencies for a given target (or targets). Uses apt. Removes the dependencies when the context is exited. One may prevent the removal of some or all packages by modifying the list within the context. """ target = ' '.join(always_iterable(target)) status = sudo('apt {action} -q -y {target}'.format(**vars())) packages = jaraco.apt.parse_new_packages(status) try: yield packages finally: remove_packages(packages)
5,357,993
def is_my_message(msg): """ Функция для проверки, какому боту отправлено сообщение. Для того, чтобы не реагировать на команды для других ботов. :param msg: Объект сообщения, для которого проводится проверка. """ text = msg.text.split()[0].split("@") if len(text) > 1: if text[1] != config.bot_name: return False return True
5,357,994
def execute_search_query(client: Client, query: Any, data_range: str) -> Dict[str, Any]: """Execute search job and waiting for the results :type client: ``Client`` :param client: Http client :type query: ``Any`` :param query: Search query :type data_range: ``str`` :param data_range: http url query for getting range of data :return: Search result :rtype: ``Dict[str, Any]`` """ response = client.varonis_execute_search(query) location = get_search_result_path(response) search_result = client.varonis_get_search_result(location, data_range, SEARCH_RESULT_RETRIES) return search_result
5,357,995
def static(directory: str) -> WSGIApp: """Return a WSGI app that serves static files under the given directory. Powered by WhiteNoise. """ app = WhiteNoise(empty_wsgi_app()) if exists(directory): app.add_files(directory) return app
5,357,996
def check_filter(id): """ Helper function to determine if the current crime is in the dictionary """ if id not in important_crime: return 30 else: return important_crime[id] * 30
5,357,997
def test_notebook_basics_lesson2(): """ Regression test data from notebook example. Data created on 5/30/2015 with np.savez('data_notebook_basics_lesson2.npz',t_resamp=t_resamp,hp_resamp=hp_resamp,hc_resamp=hc_resamp) After gwtools changes to constants (8/24/2018), _v2 of this data was created""" #t, hp, hc = EOBNRv2_sur(q=1.2) t_resamp, hp_resamp, hc_resamp = \ EOBNRv2_sur(1.2,times=np.linspace(EOBNRv2_sur.tmin-1000,EOBNRv2_sur.tmax+1000,num=3000)) # load regression data reg_data = np.load('test/gws_regression_data_v2/data_notebook_basics_lesson2_v2.npz') np.testing.assert_allclose(t_resamp,reg_data['t_resamp'], rtol=rtol, atol=atol) np.testing.assert_allclose(hp_resamp,reg_data['hp_resamp'], rtol=rtol, atol=atol) np.testing.assert_allclose(hc_resamp,reg_data['hc_resamp'], rtol=rtol, atol=atol)
5,357,998
def get_attendees_from_order(transaction): """ GET /v1/orders/{identifier}/attendees :param transaction: :return: """ with stash['app'].app_context(): order = OrderFactory() order.identifier = "7201904e" db.session.add(order) db.session.commit()
5,357,999