content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def test_bitwise_and(a, b): """ >>> test_bitwise_and(0b01, 0b10) 0L >>> test_bitwise_and(0b01, 0b11) 1L >>> test_bitwise_and(0b01, 2.0) Traceback (most recent call last): ... NumbaError: 27:15: Expected an int, or object, or bool >>> test_bitwise_and(2.0, 0b01) Traceback (most recent call last): ... NumbaError: 27:11: Expected an int, or object, or bool """ return a & b
0855921300751368eb0ad3f3cba37b6ddac759fd
4,494
def offer_in_influencing_offers(offerId, influencing_offers): """ Find if a passed offerId is in the influencing_offers list Parameters ---------- offerId: Offer Id from portfolio dataframe. influencing_offers : List of offers found for a customer Returns ------- 1 if offer is found 0 if not found """ if (offerId in influencing_offers): return 1 else: return 0
81c4a8bcb7432222a1fc5175449192681002539c
4,496
def compara_dv(cpf, primeiro_dv, segundo_dv): """Valida se dígitos verificadores calculados são iguais aos inseridos.""" return "válido" if primeiro_dv == int(cpf[9]) and segundo_dv == int(cpf[10]) else "inválido"
4b1794f466ce8c00e91c8c5f281996ea262591f8
4,497
def write_file(file_name, data, line_length): """ Writes the results to a text file using a name based on file_name input: string, list returns: int """ pos = file_name.rfind('.') fn_o = file_name[:pos] + '.OUT' + file_name[pos:] f = open(fn_o, "w") for fsn, sequence in data: f.write(fsn + '\n') l_length = len(sequence) if line_length == 0 else line_length for p in range(0, len(sequence), l_length): f.write(sequence[p:p+l_length] + '\n') f.close() return len(data)
0ad1b25106a6c9120289e8d55caafbebf475f9d5
4,498
def poisson_moment( k, n): """ returns the moment of x**n with expectation value k CURRENTLY A SET OF HARD CODED EXPRESSIONS! VERY FRAGILE! --> would be *much* better if we could do this algorithmically """ if n==0: return 1 elif n==1: return k elif n==2: return k**2 + k elif n==3: return k**3 + 3*k**2 + k elif n==4: return k**4 + 6*k**3 + 7*k**2 + k elif n==5: return k**5 + 10*k**4 + 25*k**3 + 15*k**2 + k elif n==6: return k**6 + 15*k**5 + 65*k**4 + 90*k**3 + 31*k**2 + k elif n==7: return k**7 + 21*k**6 + 140*k**5 + 350*k**4 + 301*k**3 + 63*k**2 + k elif n==8: return k**8 + 28*k**7 + 266*k**6 + 1050*k*85 + 1701*k**4 + 966*k**3 + 127*k**2 + k else: raise NotImplementedError('currently only support n<=8')
d2af07d550b0cf6ac9a410296b4ec12c78cc1505
4,499
import calendar def generate_days(year): """Generates all tuples (YYYY, MM, DD) of days in a year """ cal = calendar.Calendar() days = [] for m in range(1,13): days.extend(list(cal.itermonthdays3(year, m))) days = [d for d in set(days) if d[0] == year] days.sort() return days
6d87910572957d21c9d5df668dfb5f2d02627817
4,501
import asyncio async def start(actual_coroutine): """ Start the testing coroutine and wait 1 second for it to complete. :raises asyncio.CancelledError when the coroutine fails to finish its work in 1 second. :returns: the return value of the actual_coroutine. :rtype: Any """ try: return await asyncio.wait_for(actual_coroutine, 2) except asyncio.CancelledError: pass
26e3737091ca798dbf8c0f6f2a18a1de4b0ec42b
4,502
def generate_input_types(): """ Define the different input types that are used in the factory :return: list of items """ input_types = ["Angle_irons", "Tubes", "Channels", "Mig_wire", "Argon_gas", "Galvanised_sheets", "Budget_locks", "Welding_rods", "Body_filler", "Grinding_discs", "Drill_bits", "Primer", "Paints", "Thinner", "Sand_paper", "Masking_tapes", "Carpet", "Pop_rivets", "Electrical_wires", "Bulbs", "Switch", "Insulation_tapes", "Fasteners", "Adhesives", "Reflectors", "Accessories", "Rubbers", "Aluminum_mouldings", "Glasses", "Window_locks"] return input_types
d9e10624daaf5dae92f15512c9b19c47af002139
4,503
import os def get_project_path_info(): """ 获取项目路径 project_path 指整个git项目的目录 poseidon_path 指git项目中名字叫poseidon的目录 """ _poseidon_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) _project_path = os.path.dirname(_poseidon_path) return {"project_path": _project_path, "poseidon_path": _poseidon_path}
758748df0befedc46ae913c0b9193d3ddb175d95
4,504
def prepare_data(song: dict) -> dict: """ Prepares song dataa for database insertion to cut down on duplicates :param song: Song data :return: The song data """ song['artist'] = song['artist'].upper().strip() song['title'] = song['title'].upper().strip() return song
f8f8c9a3a0fe510cb3fb2e7d6d5bd361721337e7
4,505
import networkx import torch def generate_erdos_renyi_netx(p, N): """ Generate random Erdos Renyi graph """ g = networkx.erdos_renyi_graph(N, p) W = networkx.adjacency_matrix(g).todense() return g, torch.as_tensor(W, dtype=torch.float)
fbb8e293a1b35958301c2e376a03c30012b0c33b
4,506
from typing import Union from pathlib import Path import yaml def load_cfg(cfg_file: Union[str, Path]) -> dict: """Load the PCC algs config file in YAML format with custom tag !join. Parameters ---------- cfg_file : `Union[str, Path]` The YAML config file. Returns ------- `dict` A dictionary object loaded from the YAML config file. """ # [ref.] https://stackoverflow.com/a/23212524 ## define custom tag handler def join(loader, node): seq = loader.construct_sequence(node) return ''.join([str(i) for i in seq]) ## register the tag handler yaml.add_constructor('!join', join) with open(cfg_file, 'r') as f: cfg = yaml.load(f, Loader=yaml.FullLoader) return cfg
c9137c5052adf8fa62913c352df2bfe9e79fc7ce
4,507
def get_model_defaults(cls): """ This function receives a model class and returns the default values for the class in the form of a dict. If the default value is a function, the function will be executed. This is meant for simple functions such as datetime and uuid. Args: cls: (obj) : A Model class. Returns: defaults: (dict) : A dictionary of the default values. """ tmp = {} for key in cls.__dict__.keys(): col = cls.__dict__[key] if hasattr(col, "expression"): if col.expression.default is not None: arg = col.expression.default.arg if callable(arg): tmp[key] = arg(cls.db) else: tmp[key] = arg return tmp
93c29af27446c558b165159cee4bb41bbb3cad4d
4,508
def create_property_map(cls, property_map=None): """ Helper function for creating property maps """ _property_map = None if property_map: if callable(property_map): _property_map = property_map(cls) else: _property_map = property_map.copy() else: _property_map = {} return _property_map
b67d0fdcd75c592f3443993f2948a2686e22322d
4,510
def read_k_bytes(sock, remaining=0): """ Read exactly `remaining` bytes from the socket. Blocks until the required bytes are available and return the data read as raw bytes. Call to this function blocks until required bytes are available in the socket. Arguments --------- sock : Socket to inspect remaining : Number of bytes to read from socket. """ ret = b"" # Return byte buffer while remaining > 0: d = sock.recv(remaining) ret += d remaining -= len(d) return ret
3d75eaa43b84ac99ac37b4b1a048f1a6615901b1
4,511
def total_minutes(data): """ Calcula a quantidade total de minutos com base nas palestras submetidas. """ soma = 0 for item in data.keys(): soma += (item*len(data[item])) return soma
c85f6ac0a1d58b67d1e53ae5ff87b8762e3d050c
4,512
def rowcount_fetcher(cursor): """ Return the rowcount returned by the cursor. """ return cursor.rowcount
21b30665391aa16d158083ccb37149bd6ec0f548
4,513
def xsthrow_format(formula): """formats the string to follow the xstool_throw convention for toy vars """ return (formula. replace('accum_level[0]', 'accum_level[xstool_throw]'). replace('selmu_mom[0]', 'selmu_mom[xstool_throw]'). replace('selmu_theta[0]', 'selmu_theta[xstool_throw]'))
b36183df77e681b967ce48a9164fe37861ffd11c
4,515
def getParInfo(sourceOp, pattern='*', names=None, includeCustom=True, includeNonCustom=True): """ Returns parInfo dict for sourceOp. Filtered in the following order: pattern is a pattern match string names can be a list of names to include, default None includes all includeCustom to include custom parameters includeNonCustom to include non-custom parameters parInfo is {<parName>:(par.val, par.expr, par.mode string, par.bindExpr, par.default)...} """ parInfo = {} for p in sourceOp.pars(pattern): if (names is None or p.name in names) and \ ((p.isCustom and includeCustom) or \ (not p.isCustom and includeNonCustom)): parInfo[p.name] = [p.val, p.expr if p.expr else '', p.mode.name, p.bindExpr, p.default] return parInfo
01eafb065ef98e1fd4676898aeb8d0c5a7a74b9d
4,516
def _landstat(landscape, updated_model, in_coords): """ Compute the statistic for transforming coordinates onto an existing "landscape" of "mountains" representing source positions. Since the landscape is an array and therefore pixellated, the precision is limited. Parameters ---------- landscape: nD array synthetic image representing locations of sources in reference plane updated_model: Model transformation (input -> reference) being investigated in_coords: nD array input coordinates Returns ------- float: statistic representing quality of fit to be minimized """ def _element_if_in_bounds(arr, index): try: return arr[index] except IndexError: return 0 out_coords = updated_model(*in_coords) if len(in_coords) == 1: out_coords = (out_coords,) out_coords2 = tuple((coords - 0.5).astype(int) for coords in out_coords) result = sum(_element_if_in_bounds(landscape, coord[::-1]) for coord in zip(*out_coords2)) ################################################################################ # This stuff replaces the above 3 lines if speed doesn't hold up # sum = np.sum(landscape[i] for i in out_coords if i>=0 and i<len(landscape)) # elif len(in_coords) == 2: # xt, yt = out_coords # sum = np.sum(landscape[iy,ix] for ix,iy in zip((xt-0.5).astype(int), # (yt-0.5).astype(int)) # if ix>=0 and iy>=0 and ix<landscape.shape[1] # and iy<landscape.shape[0]) ################################################################################ return -result
0205654ef8580a0d6731155d7d0c2b2c1a360e9c
4,517
def presence(label): """Higher-order function to test presence of a given label """ return lambda x, y: 1.0 * ((label in x) == (label in y))
49c7e0b4b7af69c808917af7ab4d6b56a7a4ef89
4,518
def select(population, to_retain): """Go through all of the warroirs and check which ones are best fit to breed and move on.""" #This starts off by sorting the population then gets all of the population dived by 2 using floor divison I think #that just makes sure it doesn't output as a pesky decimal. Then it takes one half of memebers which shall be females. # which tend to be not as strong as males(Not being sexist just science thats how we are built.) So the front half will be #The lower digits because we sorted it then the upper half will be males. Then it finishes off by getting the strongest males and #females and returns them. sorted_pop = sorted(population) to_keep_by_sex = to_retain//2 members = len(sorted_pop)//2 females = sorted_pop[:members] males = sorted_pop[members:] strong_females = females[-to_keep_by_sex:] strong_males = males[-to_keep_by_sex:] return strong_males, strong_females
4dc1251f09e6bd976d170017bbd328563e9ef786
4,519
def remove_duplicates(l): """ Remove any duplicates from the original list. Return a list without duplicates. """ new_l = l[:] tmp_l = new_l[:] for e in l: tmp_l.remove(e) if e in tmp_l: new_l.remove(e) return new_l
81132e3b23592589c19ddb11f661e80be6984782
4,520
import re def ResolveWikiLinks(html): """Given an html file, convert [[WikiLinks]] into links to the personal wiki: <a href="https://z3.ca/WikiLinks">WikiLinks</a>""" wikilink = re.compile(r'\[\[(?:[^|\]]*\|)?([^\]]+)\]\]') def linkify(match): wiki_root = 'https://z3.ca' wiki_name = match.group(1).replace('\n', ' ') wiki_slug = wiki_name.replace(' ', '_') return f'<a class="wiki" href="{wiki_root}/{wiki_slug}">{wiki_name}</a>' return wikilink.sub(linkify, html)
bef3e309aa2489e720a1742e327e9dd4edf6d720
4,523
import sys def getArg(flag): """ Devolve o argumento de uma dada flag """ try: a = sys.argv[sys.argv.index(flag) + 1] except: return "" else: return a
7400d0f449334350910bc5926b5fbf5333d3ea10
4,524
def get_band_params(meta, fmt='presto'): """ Returns (fmin, fmax, nchans) given a metadata dictionary loaded from a specific file format. """ if fmt == 'presto': fbot = meta['fbot'] nchans = meta['nchan'] ftop = fbot + nchans * meta['cbw'] fmin = min(fbot, ftop) fmax = max(fbot, ftop) elif fmt == 'sigproc': raise ValueError("Cannot parse observing band parameters from data in sigproc format") else: raise ValueError(f"Unknown format: {fmt}") return fmin, fmax, nchans
61e9b0781559de431e5189b89f69a0763b039d8f
4,525
import functools def logging(f): """Decorate a function to log its calls.""" @functools.wraps(f) def decorated(*args, **kwargs): sargs = map(str, args) skwargs = (f'{key}={value}' for key, value in kwargs.items()) print(f'{f.__name__}({", ".join([*sargs, *skwargs])})...') try: value = f(*args, **kwargs) except Exception as cause: print(f'! {cause}') raise print(f'=> {value}') return value return decorated
25822434fe331c59ce64b6f9cd5ec89b70b2542a
4,526
import math def calculate_distance(p1, p2): """ Calculate distance between two points param p1: tuple (x,y) point1 param p2: tuple (x,y) point2 return: distance between two points """ x1, y1 = p1 x2, y2 = p2 d = math.sqrt(pow(x2 - x1, 2) + pow(y2 - y1, 2)) return d
756b609a91e17299eb879e27e83cd663800e46dd
4,528
from textwrap import dedent def package_load_instructions(inst_distributions): """Load instructions, displayed in the package notes""" per_package_inst = '' for dist in inst_distributions: if dist.type == 'zip': per_package_inst += dedent( """ # Loading the ZIP Package Zip packages are compressed, so large resources may load faster. import metapack as mp pkg = mp.open_package('{url}') """.format(url=dist.package_url.inner)) elif dist.type == 'csv': per_package_inst += dedent( """ # Loading the CSV Package CSV packages load resources individually, so small resources may load faster. import metapack as mp pkg = mp.open_package('{url}') """.format(url=dist.package_url.inner)) if per_package_inst: return '\n---\n'+per_package_inst else: return ''
321a7486f27a3cb327ae7556e317bc53c24726ac
4,529
def deindented_source(src): """De-indent source if all lines indented. This is necessary before parsing with ast.parse to avoid "unexpected indent" syntax errors if the function is not module-scope in its original implementation (e.g., staticmethods encapsulated in classes). Parameters ---------- src : str input source Returns ------- str : de-indented source; the first character of at least one line is non-whitespace, and all other lines are deindented by the same """ lines = src.splitlines() n_chars = float("inf") for line in lines: len_line = len(line) idx = 0 # we're Python 3, so we assume you're not mixing tabs and spaces while idx < n_chars and idx < len_line and line[idx] in [" ", '\t']: idx += 1 if len_line > idx: n_chars = min(idx, n_chars) lines = [line[n_chars:] for line in lines] src = "\n".join(lines) return src
227d5e8e35b251f02ce5e9237f8120d2dd9c7e4b
4,530
from pathlib import Path def maybe_start_with_home_prefix(p: Path) -> Path: """ If the input path starts with the home directory path string, then return a path that starts with the home directory and points to the same location. Otherwise, return the path unchanged. """ try: return Path("~", p.relative_to(Path.home())) except ValueError: return p
6ee4e49e8dfb9bc68a1c10f5ea792715fb5d5336
4,531
import requests from datetime import datetime def get_time_string(place: str = "Europe/Moscow"): """ Get time data from worldtimeapi.org and return simple string Parameters ---------- place : str Location, i.e. 'Europe/Moscow'. Returns ------- string Time in format '%Y-%m-%d %H:%M:%S' Examples -------- >>> get_time_string() 2021-08-16 16:03:34 """ url = "http://worldtimeapi.org/api/timezone/" + place data = requests.get(url).json() date = datetime.fromisoformat(data["datetime"]) string = date.strftime("%Y-%m-%d %H:%M:%S") return string
f15ef5a843317c55d3c60bf2ee8c029258e1cd78
4,533
def add_suffix(input_dict, suffix): """Add suffix to dict keys.""" return dict((k + suffix, v) for k,v in input_dict.items())
7dbedd523d24bfdf194c999b8927a27b110aad3e
4,536
import json from typing import OrderedDict def build_list_of_dicts(val): """ Converts a value that can be presented as a list of dict. In case top level item is not a list, it is wrapped with a list Valid values examples: - Valid dict: {"k": "v", "k2","v2"} - List of dict: [{"k": "v"}, {"k2","v2"}] - JSON decodable string: '{"k": "v"}', or '[{"k": "v"}]' - List of JSON decodable strings: ['{"k": "v"}', '{"k2","v2"}'] Invalid values examples: - ["not", "a", "dict"] - [123, None], - [["another", "list"]] :param val: Input value :type val: Union[list, dict, str] :return: Converted(or original) list of dict :raises: ValueError in case value cannot be converted to a list of dict """ if val is None: return [] if isinstance(val, str): # use OrderedDict to preserve order val = json.loads(val, object_pairs_hook=OrderedDict) if isinstance(val, dict): val = [val] for index, item in enumerate(val): if isinstance(item, str): # use OrderedDict to preserve order val[index] = json.loads(item, object_pairs_hook=OrderedDict) if not isinstance(val[index], dict): raise ValueError("Expected a list of dicts") return val
dfd92f619ff1ec3ca5cab737c74af45c86a263e0
4,537
def arg(prevs, newarg): """ Joins arguments to list """ retval = prevs if not isinstance(retval, list): retval = [retval] return retval + [newarg]
8d591595add095542ad697b4bd54642a4a14a17c
4,538
def hasTable(cur, table): """checks to make sure this sql database has a specific table""" cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='table_name'") rows = cur.fetchall() if table in rows: return True else: return False
dfdb3db0901832330083da8b645ae90e28cfb26d
4,540
import socket def getipbyhost(hostname): """ return the IP address for a hostname """ return socket.gethostbyname(hostname)
9556f537e16fd710a566a96a51d4262335967893
4,542
import sys def reset_syspath(): """ Return a function to remove given path from sys.path. This is to use at the end (after all assertions) of test which use ``setup_project.setup_project`` to add base directory to sys.path and avoid clash with next tests doing the same. """ def reset_func(path): if path in sys.path: del sys.path[sys.path.index(path)] return reset_func
b169c51bb86d6e4e713309526ca402872d353766
4,543
import sys def valid_capture_area(top_left, bottom_right): """Check the capture area extents for sanity. """ tl_x = top_left['x'] tl_y = top_left['y'] br_x = bottom_right['x'] br_y = bottom_right['y'] if (br_x <= tl_x) or (br_y <= tl_y): print('The capture area ({},{}) ({},{}) ' 'is invalid.'.format(tl_x, tl_y, br_x, br_y), file=sys.stderr) return False print('Capture area: ({},{}) ({},{})'.format(tl_x, tl_y, br_x, br_y)) return True
654cbb1123ed5536d0ae217d0d70e9eabd36d0b3
4,545
def IsMultiPanel(hcuts, vcuts) -> bool: """ Check if the image is multi-panel or not. Could have more logic. """ return bool(hcuts or vcuts)
fc62a31007445eac90b6f5ceb3a7c9c006dd2eef
4,546
def collapse_umi(cells): """ Input set of genotypes for each read Return list with one entry for each UMI, per cell barcode """ collapsed_data = {} for cell_barcode, umi_set in cells.items(): for _, genotypes in umi_set.items(): if len(set(genotypes)) > 1: pass else: try: collapsed_data[cell_barcode] except KeyError: collapsed_data[cell_barcode] = [genotypes[0]] else: collapsed_data[cell_barcode].append(genotypes[0]) # count total ref, total alt UMIs for each genotype for key, value in collapsed_data.items(): collapsed_data[key] = [value.count("ref"), value.count("alt")] assert len(collapsed_data[key]) == 2 return collapsed_data
e98b44193487691fb04e8e0f4ec25c3438175c65
4,548
def is_vertex_cover(G, vertex_cover): """Determines whether the given set of vertices is a vertex cover of graph G. A vertex cover is a set of vertices such that each edge of the graph is incident with at least one vertex in the set. Parameters ---------- G : NetworkX graph The graph on which to check the vertex cover. vertex_cover : Iterable of nodes. Returns ------- is_cover : bool True if the given iterable forms a vertex cover. Examples -------- This example checks two covers for a graph, G, of a single Chimera unit cell. The first uses the set of the four horizontal qubits, which do constitute a cover; the second set removes one node. >>> import dwave_networkx as dnx >>> G = dnx.chimera_graph(1, 1, 4) >>> cover = [0, 1, 2, 3] >>> dnx.is_vertex_cover(G,cover) True >>> cover = [0, 1, 2] >>> dnx.is_vertex_cover(G,cover) False """ cover = set(vertex_cover) return all(u in cover or v in cover for u, v in G.edges)
4213db1953ec976b1606c3756fa73ff0cae9f578
4,549
import os def __get_out_file(in_file, out_dir): """ Get the path of the output file. Parameters ---------- in_file: str Path to input file. out_dir: str Path to output directory. Returns ------- file_no_ext: str The file name without extension. out_dir: str The path to the output directory. out_file: str The path of the output file. """ if out_dir == '': out_dir = os.path.dirname(in_file) file_no_ext = os.path.splitext(in_file)[0].split(os.sep)[-1] if out_dir == '': out_dir = '.' out_file = '/'.join([out_dir, file_no_ext]) return file_no_ext, out_dir, out_file
e191d45becc7df330f359baf6e11d72d30343a4b
4,550
def get_embedding_tids(tids, mapping): """Obtain token IDs based on our own tokenization, through the mapping to BERT tokens.""" mapped = [] for t in tids: mapped += mapping[t] return mapped
a31c9b0cf5b791590d6e30d8238cf0eb6ae2272b
4,554
import requests def extract_stream_url(ashx_url): """ Extract real stream url from tunein stream url """ r = requests.get(ashx_url) for l in r.text.splitlines(): if len(l) != 0: return l
679ca261510413f652d0953551b65db8e5c2a62e
4,555
def none_to_null(value): """ Returns None if the specified value is null, else returns the value """ return "null" if value == None else value
394b1f9620cf69c862905171f4aec96838ffc631
4,556
def get_dsd_url(): """Returns the remote URL to the global SDMX DSD for the SDGs.""" return 'https://registry.sdmx.org/ws/public/sdmxapi/rest/datastructure/IAEG-SDGs/SDG/latest/?format=sdmx-2.1&detail=full&references=children'
996568a92825aa7a7bf1be1db8ac2cac0828360a
4,557
def cal_pivot(n_losses,network_block_num): """ Calculate the inserted layer for additional loss """ num_segments = n_losses + 1 num_block_per_segment = (network_block_num // num_segments) + 1 pivot_set = [] for i in range(num_segments - 1): pivot_set.append(min(num_block_per_segment * (i + 1), network_block_num - 1)) return pivot_set
d23324fc39f2f1aeec807a4d65a51234a2b76cde
4,560
import numpy as np def uniquePandasIndexMapping(inputColumn): """quickly mapps the unique name entries back to input entries Keyword arguments: inputDataToAssess -- a SINGLE column from a pandas dataframe, presumably with duplications. Will create a frequency table and a mapping back to the source entries. """ inputColumn.sort_values(by=['company'], inplace=True) sortedInputColumn=inputColumn.reset_index() sortedInputColumn.rename(columns={"index":"userIndex"},inplace=True) tableUniqueFullNameCounts=inputColumn.iloc[:,0].value_counts() tableUniqueFullNameCounts=tableUniqueFullNameCounts.reset_index() tableUniqueFullNameCounts.rename(columns={"company":"count","index":"company"},inplace=True) tableUniqueFullNameCounts.sort_values(by=['company'], inplace=True) sortedTableUniqueFullNameCounts=tableUniqueFullNameCounts.reset_index() sortedTableUniqueFullNameCounts['inputIndexMapping']='' currentSum=0 for index, row in sortedTableUniqueFullNameCounts.iterrows(): currentRange=np.arange(currentSum,currentSum+sortedTableUniqueFullNameCounts['count'].iloc[index]) sortedTableUniqueFullNameCounts['inputIndexMapping'].iloc[index]=sortedInputColumn['userIndex'].iloc[currentRange].array currentSum=currentSum+sortedTableUniqueFullNameCounts['count'].iloc[index] return sortedInputColumn, sortedTableUniqueFullNameCounts;
c26fce9b8617963737c4b8dd05c0e8429c92daa3
4,561
def is_text_serializer(serializer): """Checks whether a serializer generates text or binary.""" return isinstance(serializer.dumps({}), str)
f08f40662da7fd34f5984028e601d664cac943df
4,562
def Transition_rep(source_State_name, target_State_name): """Representation of a transition :param source_State_name: The sequence of "name" values of State objects referred to by attribute "source" in this Transition :type source_State_name: Array :param target_State_name: The sequence of "name" values of State objects referred to by attribute "target" in this Transition :type target_State_name: Array """ return [f' {source_name}--{target_name}' for source_name, target_name in zip(source_State_name, target_State_name)]
2e5f7048722997e0931fd6ec3a2d9e880a160359
4,563
import glob import os import ast def list_class_names(dir_path): """ Return the mapping of class names in all files in dir_path to their file path. Args: dir_path (str): absolute path of the folder. Returns: dict: mapping from the class names in all python files in the folder to their file path. """ py_files = glob.glob(os.path.join(dir_path, "*.py")) py_files = [f for f in py_files if os.path.isfile(f) and not f.endswith('__init__.py')] cls_name_to_path = dict() for py_file in py_files: with open(py_file) as f: node = ast.parse(f.read()) classes_in_file = [n for n in node.body if isinstance(n, ast.ClassDef)] cls_names_in_file = [c.name for c in classes_in_file] for cls_name in cls_names_in_file: cls_name_to_path[cls_name] = py_file return cls_name_to_path
612f386330a494cfffcd4a7d2f296bf8020bae6f
4,564
def chunks(l, k): """ Take a list, l, and create k sublists. """ n = len(l) return [l[i * (n // k) + min(i, n % k):(i+1) * (n // k) + min(i+1, n % k)] for i in range(k)]
7cf0c39941ed8f358c576046154af6b3ee54b70a
4,566
import math def floor(base): """Get the floor of a number""" return math.floor(float(base))
8b00ffccf30765f55ff024b35de364c617b4b20c
4,568
import random def random_binary(): """ 测试 cached 缓存视图的装饰器 设置 key :return: """ return [random.randrange(0, 2) for i in range(500)]
3c30014d1222c136cb7d3d2fbe6e0d972decc776
4,569
def remove_from_end(string, text_to_remove): """ Remove a String from the end of a string if it exists Args: string (str): string to edit text_to_remove (str): the text to remove Returns: the string with the text removed """ if string is not None and string.endswith(text_to_remove): return string[:-len(text_to_remove)] return string
19cebd002fcf5aea5290a6998129427363342319
4,570
def _variable_map_by_name(variables): """ Returns Dict,representing referenced variable fields mapped by name. Keyword Parameters: variables -- list of 'variable_python_type' Warehouse support DTOs >>> from pprint import pprint >>> var1 = { 'column':'frob_hz', 'title':'Frobniz Resonance (Hz)' ... ,'python_type': 'float' ... ,'table': 'foo_fact'} >>> list1 = [var1] >>> pprint(_variable_map_by_name(list1)) {'frob_hz': {'column': 'frob_hz', 'python_type': 'float', 'table': 'foo_fact', 'title': 'Frobniz Resonance (Hz)'}} """ variable_by_field = {} for var in variables: field_name = var['column'] variable_by_field[field_name] = var return variable_by_field
91c27ceb84614313d036ec216ef4c4d567a68255
4,572
def ObjectNotFoundError(NDARError): """S3 object not found""" def __init__(self, object): self.object = object return def __str__(self): return 'Object not found: %s' % self.object
3cc552f7074f8117ed18fd975bc5ac0b09f8016a
4,573
def read_length(file_obj): # pragma: no cover """ Numpy trick to get a 32-bit length from four bytes Equivalent to struct.unpack('<i'), but suitable for numba-jit """ sub = file_obj.read(4) return sub[0] + sub[1]*256 + sub[2]*256*256 + sub[3]*256*256*256
82c311c3a8e2d2e277979c19aaae665b0227f9cd
4,574
from typing import List def readOneLineFileWithCommas(filepath: str) -> List[str]: """ Reads a file that is one line long, separated by commas """ try: with open(filepath) as fp: s: str = fp.readline() return s.split(",") except: raise Exception(f"Failed to open {filepath}")
4c181523192fab0ea01ae5da0883c543565119c6
4,575
def check_canopy_height(region_info, regional_lookup): """ Check the regional canopy height. """ mean_canopy_height = region_info['mean_canopy_height'] if mean_canopy_height == 'no data': mean_canopy_height = 0 return mean_canopy_height
5f04ad71df7f0b1c9ef73e97bbe99bea1916ae5e
4,578
def build_dict_conforming_to_schema(schema, **kwargs): """ Given a schema object (for example, TIMESTAMP_SCHEMA from this module) and a set of keyword arguments, create a dictionary that conforms to the given schema, using the keyword arguments to define the elements of the new dict. Checks the result to make sure that it conforms to the given schema, raising an error if not. Returns the new dict conforming to the schema if there are no problems. """ # Check that schema supports a check_match call. # Duck typing version of this check: if not hasattr(schema, 'check_match'): raise ValueError( 'The given "schema" does not seem to be a schema. It has no ' '"check_match" method. Given schema: ' + repr(schema)) # # Strict typing version of this check: # # Check that schema_name is a SCHEMA.Object. # if not isinstance(schema, schema.Schema): # raise ValueError( # 'The first argument must be a schema.Schema object, but is not. ' # 'Given schema: ' + repr(schema)) # The return value. d = {} for key, value in kwargs.items(): d[key] = value schema.check_match(d) return d
8971b7c6e1df8fd16a1b0e0946c9f21a3c601512
4,580
def make_09f9(): """倉庫インベントリーフッタ""" return ""
91d21aeb58fc004865db91846d73f978f48f9be4
4,582
def empty_call_false(*args, **kwargs) -> bool: """ Do nothing and return False """ return False
3b3964c859a47698f0000e1b26963953980fad51
4,583
def cookie_is_encoded(data): """ Tests whether or not a cookie is encoded / HMAC signed -> #bool True if encoded .. from vital.security import cookie_is_encoded cookie_is_encoded( "!YuOoKwDp8GhrwwojdjTxSCj1c2Z+7yz7r6cC7E3hBWo=?IkhlbGxvLCB3b3JsZC4i") # -> True .. """ return data.startswith('!') and '?' in data
baf2a05b516a23cacca4985944974112019abfda
4,584
def _id_to_box(id_, dim): """Convert id to box ID""" row = id_ // (dim ** 3) col = (id_ % (dim ** 2)) // dim return row * dim + col
8e6c4779872fff5cdc5a6ca6b4143a1519d8aaf2
4,586
import os def create_readme(top_dir,package_name,description="",docs=False): """ README requires the name of the package and the directory in which to write the file in. Optionally, give a description and whether or not to create a 'docs' directory. """ readme_str=""" # {package} ## Description {description} ## Examples ## Repo Structure {package}:<br/> ┣━ README.md<br/> ┣━ LICENSE<br/> ┣━ setup.py<br/> ┣━ {package}:<br/> ┃ ┗━ __init__.py<br/> """ if docs: readme_str= readme_str + \ """┣━ tests:<br/> ┃ ┗━ test_basic.py<br/> ┗━ docs:<br/> ┗━""" else: readme_str= readme_str + \ """┗━ tests: ┗━ test_basic.py """ readme_str=readme_str.format(package=package_name,description=description) # Write to file with open(os.path.join(top_dir,'README.md'),'w') as f: f.write(readme_str) return readme_str
70f7221536078a5d5c13eb97b28c394b12621941
4,587
def estimate_responsivity(mis_MU, norm_MU): """from the estimated base intensities, we return onlu users which have zero base intensity for misinformation and greater than zero base intensity for normal content. """ no_bad_intentions_ids = [] for id in range(len(mis_MU)): if mis_MU[id] == 0 and norm_MU[id] != 0: no_bad_intentions_ids.append(id) return no_bad_intentions_ids
4d944478694f1be1474eea963fad284079d5fe57
4,588
def compute_embeddings(image): """A mock function for a call to a deep learning model or a web service.""" del image # this is just a mock and doesn't do anything with the input return 42
31536d4a2371140e962aadb63b8645685328b3df
4,589
def text_to_string(filename): """Read a text file and return a string.""" with open(filename) as infile: return infile.read()
dbd79e78c84c3374c0252544086885b909ae9bd9
4,590
def lgsvlToScenicElevation(pos): """Convert LGSVL positions to Scenic elevations.""" return pos.y
d90f7509285b08c791eac56c1a119f91120cf556
4,591
def false_discovery(alpha,beta,rho): """The false discovery rate. The false discovery rate is the probability that an observed edge is incorrectly identified, namely that is doesn't exist in the 'true' network. This is one measure of how reliable the results are. Parameters ---------- alpha : float The estimate of the true-positive rate. beta : float The estimate of the false-positive rate. rho : float The estimate of network density. Returns ------- float The false discovery rate (probability). References ---------- .. [1] Newman, M.E.J. 2018. “Network structure from rich but noisy data.” Nature Physics 14 6 (June 1): 542–545. doi:10.1038/s41567-018-0076-1. """ return (1-rho)*beta/(rho*alpha + (1-rho)*beta)
849c236157070c5d1becfec3e4e5f46a63d232d2
4,593
import re def eval_formula(formula, assignment): """ Evaluates a formula represented as a string. **Attention**: Be extremely careful about what to pass to this function. All parameters are plugged into the formula and evaluated using `eval()` which executes arbitrary python code. Parameters ---------- formula : str String representation of the formula to be evaluated. assignment : dict Dictionary containing parameter names and values as keys and values, respectively. Returns ------- float Evaluation result. Examples -------- >>> eval_formula('a + (1 - b) * a', {'a': 0.1, 'b': 0.8}) 0.12 """ expression = formula for param, value in sorted(assignment.items(), reverse=True): expression = expression.replace(param, str(value)) # remove leading 0's expression = re.sub(r'\d-0\d', lambda x: re.sub(r'-0', '-', x[0]), expression) # pylint: disable=eval-used return eval(expression) # pylint: enable=eval-used
c1f344fc0049e20e86feb2428a46d51f9eee5898
4,595
import json def case_structure_generator(path): """Create test cases from reference data files.""" with open(str(path), 'r') as in_f: case_data = json.load(in_f) system_dict = case_data['namelists']['SYSTEM'] ibrav = system_dict['ibrav'] ins = {'ibrav': ibrav, 'cell': case_data['cell']} if '-' in path.name: _, qe_version_with_suffix = path.name.split('-') qe_version, _ = qe_version_with_suffix.rsplit('.', 1) else: qe_version = None ins = {'ibrav': ibrav, 'cell': case_data['cell'], 'qe_version': qe_version} if ibrav == 0: return ins, None, ValueError outs = dict() for key in (['a', 'b', 'c', 'cosab', 'cosac', 'cosbc'] + ['celldm({})'.format(i) for i in range(1, 7)]): if key in system_dict: outs[key] = system_dict[key] return ins, outs, None
1c7249c207032ed623bbfe274ed117283cd6ef4d
4,596
import psutil def available_memory(): """ Returns total system wide available memory in bytes """ return psutil.virtual_memory().available
5071312f64aa37e1d777c8f20009fa38137381a4
4,597
import math def ceil(base): """Get the ceil of a number""" return math.ceil(float(base))
ebe78a5eb8fa47e6cfba48327ebb1bdc469b970d
4,599
def get_attr_counts(datas, attr): """ 不同属性值的数量. :param datas: :type datas: list[BaseDataSample] :param attr: :type attr: str :return: """ results = {} for data in datas: value = data.get_value(attr) if isinstance(value, list): for v in value: results.setdefault(attr + "-" + v, 0) results[attr + "-" + v] += 1 else: results.setdefault(value, 0) results[value] += 1 return results
bea8e6e1c99efe1ad18894831006f0e218517c74
4,600
def split(string: str, separator: str = " ") -> list: """ Will split the string up into all the values separated by the separator (defaults to spaces) >>> split("apple#banana#cherry#orange",separator='#') ['apple', 'banana', 'cherry', 'orange'] >>> split("Hello there") ['Hello', 'there'] >>> split("11/22/63",separator = '/') ['11', '22', '63'] >>> split("12:43:39",separator = ":") ['12', '43', '39'] """ split_words = [] last_index = 0 for index, char in enumerate(string): if char == separator: split_words.append(string[last_index:index]) last_index = index + 1 elif index + 1 == len(string): split_words.append(string[last_index : index + 1]) return split_words
73e01d7ff9111d949f31f37b36c3b0656d06e340
4,601
import torch def _find_quantized_op_num(model, white_list, op_count=0): """This is a helper function for `_fallback_quantizable_ops_recursively` Args: model (object): input model white_list (list): list of quantizable op types in pytorch op_count (int, optional): count the quantizable op quantity in this module Returns: the quantizable op quantity in this module """ quantize_op_num = op_count for name_tmp, child_tmp in model.named_children(): if type(child_tmp) in white_list \ and not (isinstance(child_tmp, torch.quantization.QuantStub) or isinstance(child_tmp, torch.quantization.DeQuantStub)): quantize_op_num += 1 else: quantize_op_num = _find_quantized_op_num( child_tmp, white_list, quantize_op_num) return quantize_op_num
c51b06e476ff4804d5bdfca5a187717536a0418f
4,602
def list_to_string(the_list): """Converts list into one string.""" strings_of_list_items = [str(i) + ", " for i in the_list] the_string = "".join(strings_of_list_items) return the_string
f580dd8646526e64bb50297608e8ad8e338d9197
4,604
def _split_header_params(s): """Split header parameters.""" result = [] while s[:1] == b';': s = s[1:] end = s.find(b';') while end > 0 and s.count(b'"', 0, end) % 2: end = s.find(b';', end + 1) if end < 0: end = len(s) f = s[:end] result.append(f.strip()) s = s[end:] return result
fabbfb0959133e70019742c6661cb3bb443ca34d
4,605
def countDigits(string): """return number of digits in a string (Helper for countHaveTenDigits)""" count = 0 for char in string: if char == '0' or char == '1' or char == '2' or char == '3' or char == '4' or \ char == '5' or char == '6' or char == '7' or char == '8' or char == '9': count += 1 return count
f8d2327e022efc7a117b744588dfe16a3a7ba75e
4,606
def jsonDateTimeHandler(obj): """Takes an object and tries to serialize it in JSON by using strftime or isoformat.""" if hasattr(obj, "strftime"): # To avoid problems with the js date-time format return obj.strftime("%a %b %d, %Y %I:%M %p") elif hasattr(obj, 'isoformat'): return obj.isoformat() # elif isinstance(obj, ...): # return ... else: raise TypeError( 'Object of type %s with value of %s is not JSON serializable' % (type(obj), repr(obj)))
605f8a379575d185bc2a8b16810252511eec52af
4,608
def name(ndims=2, ndepth=2): """ encrypt n and version into a standardized string """ # Model name, depth and version value = 'care_denoise_%dDdepth%d' % (ndims, ndepth) return value
1933ac0454eac4c860d70683e58c922074498b63
4,610
import re def run_job(answer: str, job: dict, grade: float, feedback: str): """ Match answer to regex inside job dictionary. Add weight to grade if successful, else add comment to feedback. :param answer: Answer. :param job: Dictionary with regex, weight, and comment. :param grade: Current grade for the answer. :param feedback: Current feedback for the answer. :return: Modified answer, grade, and feedback. """ match = re.search(job["regex"], answer) if match: grade += job["weight"] answer = answer.replace(match[0], "", 1) else: feedback += job["comment"] + "\n" return answer, grade, feedback
487916da129b8958f8427b11f0118135268f9245
4,612
def timefstring(dtobj, tz_name=True): """Standardize the format used for timestamp string format. Include 3 letter string for timezone if set to True. """ if tz_name: return f'{dtobj.strftime("%Y-%m-%d_%H:%M:%S%Z")}' else: return f'{dtobj.strftime("%Y-%m-%d_%H:%M:%S")}NTZ'
5bbf0454a76ed1418cbc9c44de909940065fb51f
4,613
def supported_coins_balance(balance, tickers): """ Return the balance with non-supported coins removed """ supported_coins_balance = {} for coin in balance.keys(): if coin != "BTC": if f"{coin}/BTC" in tickers: supported_coins_balance[coin] = balance[coin] else: try: supported_coins_balance["BTC"] = balance[coin] except KeyError: print("BTC not in balance") return supported_coins_balance
aaea856c728d04f47f52c1b07c66be57ff17d8cf
4,615
def _identity_map(size): """Function returning list of lambdas mapping vector to itself.""" return [lambda x, id: x[id] for _ in range(size)]
6236d42d359fdc9b006bffcc597fccbc161eb53d
4,616
def center_vertices(vertices, faces, flip_y=True): """ Centroid-align vertices. Args: vertices (V x 3): Vertices. faces (F x 3): Faces. flip_y (bool): If True, flips y verts to keep with image coordinates convention. Returns: vertices, faces """ vertices = vertices - vertices.mean(dim=0, keepdim=True) if flip_y: vertices[:, 1] *= -1 faces = faces[:, [2, 1, 0]] return vertices, faces
85743c3b3e3838533e78c66b137cc9c8c7702519
4,617
def get_bridge_interfaces(yaml): """Returns a list of all interfaces that are bridgedomain members""" ret = [] if not "bridgedomains" in yaml: return ret for _ifname, iface in yaml["bridgedomains"].items(): if "interfaces" in iface: ret.extend(iface["interfaces"]) return ret
dad9e634a1c5306289e73d465b08b7ea857518e4
4,618
def _gen_sieve_array(M, factor_base): """Sieve Stage of the Quadratic Sieve. For every prime in the factor_base that doesn't divide the coefficient `a` we add log_p over the sieve_array such that ``-M <= soln1 + i*p <= M`` and ``-M <= soln2 + i*p <= M`` where `i` is an integer. When p = 2 then log_p is only added using ``-M <= soln1 + i*p <= M``. Parameters: =========== M : sieve interval factor_base : factor_base primes """ sieve_array = [0]*(2*M + 1) for factor in factor_base: if factor.soln1 is None: #The prime does not divides a continue for idx in range((M + factor.soln1) % factor.prime, 2*M, factor.prime): sieve_array[idx] += factor.log_p if factor.prime == 2: continue #if prime is 2 then sieve only with soln_1_p for idx in range((M + factor.soln2) % factor.prime, 2*M, factor.prime): sieve_array[idx] += factor.log_p return sieve_array
98a8e5bedaa56dbe53aa8a152c20a015d7b3556d
4,620
def getObjDetRoI(imgSize, imgPatchSize, objx1, objy1, objx2, objy2): """ Get region of interest (ROI) for a given object detection with respect to image and image patch boundaries. :param imgSize: size of the image of interest (e.g., [1920x1080]). :param imgPatchSize: Patch size of the image patch of interest (e.g., 192). :param objx1: Upper left x coordinate of the object detection. :param objy1: Upper left y coordinate of the object detection. :param objx2: Lower right x coordinate of the object detection. :param objy2: Lower right y coordinate of the object detection. """ # Cast to float values for calculations startX = float(objx1); startY = float(objy1); endX = float(objx2); endY = float(objy2); # Ensure image and image patch boundaries xRange = endX - startX; yRange = endY - startY; addX = (imgPatchSize - (xRange % imgPatchSize)); addY = (imgPatchSize - (yRange % imgPatchSize)); endX = endX + addX; endY = endY + addY; if endX > imgSize[1]: endX = imgSize[1] if endY > imgSize[0]: endY = imgSize[0] return startX, startY, endX, endY
2feedb9a5f79c24d0fda4eaa9b8db5bd6922b4ce
4,622
def get_solubility(molecular_weight, density): """ Estimate the solubility of each oil pseudo-component Estimate the solubility (mol/L) of each oil pseudo-component using the method from Huibers and Lehr given in the huibers_lehr.py module of py_gnome in the directory gnome/utilities/weathering/. This method is from Huibers & Katrisky in a 2012 EPA report and was further modified by Lehr to better match measured values. The equation used here is adapted to return results in mol/L. Parameters ---------- molecular_weight : np.array Molecular weights of each pseudo-component as recorded in the NOAA Oil Library (g/mol) density : np.array Density of each pseudo-component as recorded in the NOAA Oil Library (kg/m^3) Returns ------- solubility : np.array Array of solubilities (mol/L) for each pseudo-component of the oil. """ return 46.4 * 10. ** (-36.7 * molecular_weight / density)
64a951e8a6d9579cf934893fe5c9bc0a9181d4cc
4,625
import torch def cal_head_bbox(kps, image_size): """ Args: kps (torch.Tensor): (N, 19, 2) image_size (int): Returns: bbox (torch.Tensor): (N, 4) """ NECK_IDS = 12 # in cocoplus kps = (kps + 1) / 2.0 necks = kps[:, NECK_IDS, 0] zeros = torch.zeros_like(necks) ones = torch.ones_like(necks) # min_x = int(max(0.0, np.min(kps[HEAD_IDS:, 0]) - 0.1) * image_size) min_x, _ = torch.min(kps[:, NECK_IDS:, 0] - 0.05, dim=1) min_x = torch.max(min_x, zeros) max_x, _ = torch.max(kps[:, NECK_IDS:, 0] + 0.05, dim=1) max_x = torch.min(max_x, ones) # min_x = int(max(0.0, np.min(kps[HEAD_IDS:, 0]) - 0.1) * image_size) min_y, _ = torch.min(kps[:, NECK_IDS:, 1] - 0.05, dim=1) min_y = torch.max(min_y, zeros) max_y, _ = torch.max(kps[:, NECK_IDS:, 1], dim=1) max_y = torch.min(max_y, ones) min_x = (min_x * image_size).long() # (T,) max_x = (max_x * image_size).long() # (T,) min_y = (min_y * image_size).long() # (T,) max_y = (max_y * image_size).long() # (T,) rects = torch.stack((min_x, max_x, min_y, max_y), dim=1) return rects
546b4d4fcf756a75dd588c85ab467c21e9f45550
4,630
def _process_input(data, context): """ pre-process request input before it is sent to TensorFlow Serving REST API Args: data (obj): the request data, in format of dict or string context (Context): object containing request and configuration details Returns: (dict): a JSON-serializable dict that contains request body and headers """ if context.request_content_type == 'application/json': data = data.read().decode("utf-8") return data if len(data) else '' raise ValueError('{{"error": "unsupported content type {}"}}'.format( context.request_content_type or "unknown" ))
05d48d327613df156a5a3b6ec76e6e5023fa54ca
4,631
def remove_duplicates(iterable): """Removes duplicates of an iterable without meddling with the order""" seen = set() seen_add = seen.add # for efficiency, local variable avoids check of binds return [x for x in iterable if not (x in seen or seen_add(x))]
d98fdf8a4be281008fa51344610e5d052aa77cae
4,632
def _orbit_bbox(partitions): """ Takes a granule's partitions 'partitions' and returns the bounding box containing all of them. Bounding box is ll, ur format [[lon, lat], [lon, lat]]. """ lon_min = partitions[0]['lon_min'] lat_min = partitions[0]['lat_min'] lon_max = partitions[0]['lon_max'] lat_max = partitions[0]['lat_max'] for p in partitions[1:]: if p['lon_min'] < lon_min: lon_min = p['lon_min'] if p['lat_min'] < lat_min: lat_min = p['lat_min'] if p['lon_max'] > lon_max: lon_max = p['lon_max'] if p['lat_max'] > lat_max: lat_max = p['lat_max'] return [[lon_min, lat_min], [lon_max, lat_max]]
8e040b549cbdf9587f08a285bd6f867ae580d584
4,633
def db_to_dict(s_str, i = 0, d = {}): """ Converts a dotbracket string to a dictionary of indices and their pairs Args: s_str -- str: secondary_structure in dotbracket notation KWargs: i -- int: start index d -- dict<index1, index2>: the dictionary so far Returns: dictionary """ j = i while j < len(s_str): c = s_str[j] if c == "(": d = db_to_dict(s_str, j + 1, d) j = d[j] elif c == ")": d[i - 1] = j d[j] = i - 1 if(i != 0): return d # Don't return from the first iteration yet else: d[j] = None j = j + 1 return d
5440bc318b0b5c8a137e0a3f739031603994e89c
4,634
from typing import Any from typing import List def is_generic_list(annotation: Any): """Checks if ANNOTATION is List[...].""" # python<3.7 reports List in __origin__, while python>=3.7 reports list return getattr(annotation, '__origin__', None) in (List, list)
0ed718eed16e07c27fd5643c18a6e63dc9e38f69
4,636
from pathlib import Path def create_folder(base_path: Path, directory: str, rtn_path=False): """ Recursive directory creation function. Like mkdir(), but makes all intermediate-level directories needed to contain the leaf directory Parameters ----------- base_path : pathlib.PosixPath Global Path to be root of the created directory(s) directory : str Location in the Songbird-LFP-Paper the new directory is meant to be made rtn_path : bool, optional If True it returns a Path() object of the path to the Directory requested to be created Returns -------- location_to_save : class, (Path() from pathlib) Path() object for the Directory requested to be created Example -------- # Will typically input a path using the Global Paths from paths.py >>> create_folder('/data/') """ location_to_save = base_path / directory # Recursive directory creation function location_to_save.mkdir(parents=True, exist_ok=True) if rtn_path: return location_to_save.resolve()
7c3724b009ef03fc6aa4fbc2bf9da2cbfa4c784d
4,637