content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def byte_list_to_nbit_le_list(data, bitwidth, pad=0x00): """! @brief Convert a list of bytes to a list of n-bit integers (little endian) If the length of the data list is not a multiple of `bitwidth` // 8, then the pad value is used for the additional required bytes. @param data List of bytes. @param bitwidth Width in bits of the resulting values. @param pad Optional value used to pad input data if not aligned to the bitwidth. @result List of integer values that are `bitwidth` bits wide. """ bytewidth = bitwidth // 8 datalen = len(data) // bytewidth * bytewidth res = [sum((data[offset + i] << (i * 8)) for i in range(bytewidth)) for offset in range(0, datalen, bytewidth) ] remainder = len(data) % bytewidth if remainder != 0: pad_count = bytewidth - remainder padded_data = list(data[-remainder:]) + [pad] * pad_count res.append(sum((padded_data[i] << (i * 8)) for i in range(bytewidth))) return res
b92bbc28cc2ffd59ae9ca2e459842d7f4b284d18
707,490
def is_catalogue_link(link): """check whether the specified link points to a catalogue""" return link['type'] == 'application/atom+xml' and 'rel' not in link
bc6e2e7f5c34f6ea198036cf1404fef8f7e7b214
707,495
def _ParseProjectNameMatch(project_name): """Process the passed project name and determine the best representation. Args: project_name: a string with the project name matched in a regex Returns: A minimal representation of the project name, None if no valid content. """ if not project_name: return None return project_name.lstrip().rstrip('#: \t\n')
cb9f92a26c7157a5125fbdb5dd8badd7ffd23055
707,497
def _get_lspci_name(line): """Reads and returns a 'name' from a line of `lspci` output.""" hush = line.split('[') return '['.join(hush[0:-1]).strip()
92910d0f4d9dce1689ed22a963932fb85d8e2677
707,499
def get_child_right_position(position: int) -> int: """ heap helper function get the position of the right child of the current node >>> get_child_right_position(0) 2 """ return (2 * position) + 2
2a5128a89ac35fe846d296d6b92c608e50b80a45
707,500
def split_range(r, n): """ Computes the indices of segments after splitting a range of r values into n segments. Parameters ---------- r : int Size of the range vector. n : int The number of splits. Returns ------- segments : list The list of lists of first and last indices of segments. Example ------- >>> split_range(8, 2) [[0, 4], [4, 8]] """ step = int(r / n) segments = [] for i in range(n): new_segment = [step * i, step * (i + 1)] segments.append(new_segment) # correct the gap in the missing index due to the truncated step segments[-1][-1] = r return segments
34f570933a5eb8772dc4b2e80936887280ff47a4
707,504
def _fill_three_digit_hex_color_code(*, hex_color_code: str) -> str: """ Fill 3 digits hexadecimal color code until it becomes 6 digits. Parameters ---------- hex_color_code : str One digit hexadecimal color code (not including '#'). e.g., 'aaa', 'fff' Returns ------- filled_color_code : str Result color code. e.g., 'aaaaaa', 'ffffff' """ filled_color_code: str = '' for char in hex_color_code: filled_color_code += char * 2 return filled_color_code
d91df947fcc5f0718bbd9b3b4f69f1ad68ebeff4
707,505
def namify(idx): """ Helper function that pads a given file number and return it as per the dataset image name format. """ len_data = 6 #Ilsvr images are in the form of 000000.JPEG len_ = len(str(idx)) need = len_data - len_ assert len_data >= len_, "Error! Image idx being fetched is incorrect. Invalid value." pad = '0'*need return pad+str(idx)
069ff7a297f944e9e0e51e5e100276a54fa51618
707,508
def get_bedtools_coverage_cmd(bam_filename, gff_filename, output_filename, require_paired=False): """ Get bedtools command for getting the number of reads from the BAM filename that are strictly contained within each interval of the GFF. """ args = {"bam_filename": bam_filename, "gff_filename": gff_filename} # Do not include strandedness flag since that doesn't handle # paired-end cases intersect_cmd = "bedtools intersect -abam %(bam_filename)s " \ "-b %(gff_filename)s -f 1 -ubam " %(args) coverage_cmd = "%s | bedtools coverage -abam - -b %s -counts > %s" \ %(intersect_cmd, gff_filename, output_filename) return coverage_cmd
e4d6da3e3e7fe611c3bc3023bea3a76a0003a1f2
707,510
def form_requires_input(form): """ Returns True if the form has at least one question that requires input """ for question in form.get_questions([]): if question["tag"] not in ("trigger", "label", "hidden"): return True return False
97072a9edc494afa731312aebd1f23dc15bf9f47
707,516
def list_extract(items, arg): """Extract items from a list of containers Uses Django template lookup rules: tries list index / dict key lookup first, then tries to getattr. If the result is callable, calls with no arguments and uses the return value.. Usage: {{ list_of_lists|list_extract:1 }} (gets elt 1 from each item in list) {{ list_of_dicts|list_extract:'key' }} (gets value of 'key' from each dict in list) """ def _extract(item): try: return item[arg] except TypeError: pass attr = getattr(item, arg, None) return attr() if callable(attr) else attr return [_extract(item) for item in items]
23fb863a7032f37d029e8b8a86b883dbfb4d5e7b
707,517
import re def parse_query(query): """Parse the given query, returning a tuple of strings list (include, exclude).""" exclude = re.compile(r'(?<=-")[^"]+?(?=")|(?<=-)\w+').findall(query) for w in sorted(exclude, key=lambda i: len(i), reverse=True): query = query.replace(w, '') query = " " + query return re.compile(r'(?<=[+ ]")[^"]+?(?=")|(?<=[+ ])\w+').findall(query), exclude
4fe6aac76935af6e5acaa3aedad40d6bc635d4ff
707,520
import struct def read_plain_byte_array(file_obj, count): """Read `count` byte arrays using the plain encoding.""" return [file_obj.read(struct.unpack(b"<i", file_obj.read(4))[0]) for i in range(count)]
f300d205fda9b1b92ebd505f676b1f76122f994d
707,522
def big_number(int_in): """Converts a potentially big number into a lisible string. Example: - big_number(10000000) returns '10 000 000'. """ s = str(int_in) position = len(s) counter = 0 out = '' while position != 0: counter += 1 position -= 1 out = s[position] + out if counter % 3 == 0 and position != 0: out = " " + out return (out)
7db0dce8ffa1cbea736537efbf2fdd4d8a87c20d
707,523
def test_pandigital_9(*args): """ Test if args together contain the digits 1 through 9 uniquely """ digits = set() digit_count = 0 for a in args: while a > 0: digits.add(a % 10) digit_count += 1 a //= 10 return digit_count == 9 and len(digits) == 9 and 0 not in digits
ad5a738400f7b8a9bea001a13a76798633b9ac61
707,525
def _get_all_scopes(blocks): """Get all block-local scopes from an IR. """ all_scopes = [] for label, block in blocks.items(): if not (block.scope in all_scopes): all_scopes.append(block.scope) return all_scopes
daa13a20629dd419d08c9c6026972f666c3f9291
707,526
import difflib def getStringSimilarity(string1:str,string2:str): """ This function will return a similarity of two strings. """ return difflib.SequenceMatcher(None,string1,string2).quick_ratio()
292f552449569206ee83ce862c2fb49f6063dc9e
707,530
import torch def flipud(tensor): """ Flips a given tensor along the first dimension (up to down) Parameters ---------- tensor a tensor at least two-dimensional Returns ------- Tensor the flipped tensor """ return torch.flip(tensor, dims=[0])
b0fd62172b0055d9539b554a8c967c058e46b397
707,531
def get_file_type(filepath): """Returns the extension of a given filepath or url.""" return filepath.split(".")[-1]
070a1b22508eef7ff6e6778498ba764c1858cccb
707,532
from typing import List from typing import Pattern import re from typing import Optional from typing import Match def _target_js_variable_is_used( *, var_name: str, exp_lines: List[str]) -> bool: """ Get a boolean value whether target variable is used in js expression or not. Parameters ---------- var_name : str Target variable name. exp_lines : list of str js expression lines. Returns ------- result : bool If target variable is used in js expression, True will be returned. """ var_pattern: Pattern = re.compile(pattern=rf'var ({var_name}) = ') used_pattern_1: Pattern = re.compile( pattern=rf'{var_name}[ ;\)\.}},\]\[]') used_pattern_2: Pattern = re.compile( pattern=rf'{var_name}$') for line in exp_lines: if '//' in line: continue if var_name not in line: continue match: Optional[Match] = var_pattern.search(string=line) if match is not None: continue match = used_pattern_1.search(string=line) if match is not None: return True match = used_pattern_2.search(string=line) if match is not None: return True return False
be07cb1628676717b2a02723ae7c01a7ba7364d6
707,537
def find_period(samples_second): """ # Find Period Args: samples_second (int): number of samples per second Returns: float: samples per period divided by samples per second """ samples_period = 4 return samples_period / samples_second
c4a53e1d16be9e0724275034459639183d01eeb3
707,539
def sqrt(x: int) -> int: """ Babylonian Square root implementation """ z = (x + 1) // 2 y = x while z < y: y = z z = ( (x // z) + z) // 2 return y
1a91d35e5783a4984f2aca5a9b2a164296803317
707,540
def mock_interface_settings_mismatch_protocol(mock_interface_settings, invalid_usb_device_protocol): """ Fixture that yields mock USB interface settings that is an unsupported device protocol. """ mock_interface_settings.getProtocol.return_value = invalid_usb_device_protocol return mock_interface_settings
61958439a2869d29532e50868efb39fe3da6c8b5
707,545
def MakeLocalSsds(messages, ssd_configs): """Constructs the repeated local_ssd message objects.""" if ssd_configs is None: return [] local_ssds = [] disk_msg = ( messages. AllocationSpecificSKUAllocationAllocatedInstancePropertiesAllocatedDisk) interface_msg = disk_msg.InterfaceValueValuesEnum for s in ssd_configs: if s['interface'].upper() == 'NVME': interface = interface_msg.NVME else: interface = interface_msg.SCSI m = disk_msg( diskSizeGb=s['size'], interface=interface) local_ssds.append(m) return local_ssds
128e7a0358221fe3d93da4726924a7a783c65796
707,547
import base64 def _b64urldec(input: str) -> bytes: """ Deocde data from base64 urlsafe with stripped padding (as specified in the JWS RFC7515). """ # The input is stripped of padding '='. These are redundant when decoding (only relevant # for concatenated sequences of base64 encoded data) but the decoder checks for them. # Appending two (the maximum number) of padding '=' is the easiest way to ensure it won't choke # on too little padding. return base64.urlsafe_b64decode(input + '==')
fb535072b560b8565916ae8ec3f32c61c41115d8
707,548
def text(el): """ Helper to get the text content of a BeautifulSoup item """ return el.get_text().strip()
7b34c77c79677a73cc66532fe6305635b1bdac43
707,554
def get_sha512_manifest(zfile): """ Get MANIFEST.MF from a bar file. :param zfile: Open (!!!) ZipFile instance. :type zfile: zipfile.ZipFile """ names = zfile.namelist() manifest = None for name in names: if name.endswith("MANIFEST.MF"): manifest = name break if manifest is None: raise SystemExit return manifest
7ef150bb3e89f8723649ee983085a413ec8a31df
707,555
def time_human(x): """ Gets time as human readable """ # Round time x = round(x, 2) for number, unit in [(60, "s"), (60, "min"), (24, "h"), (365, "days")]: if abs(x) < number: return f"{x:.2f} {unit}" x /= number return f"{x:.2f} years"
3f7f51ac7454e429fc30da64eed075aaf1f10b5b
707,560
def evaluate_fN(model, NHI): """ Evaluate an f(N,X) model at a set of NHI values Parameters ---------- NHI : array log NHI values Returns ------- log_fN : array f(NHI,X) values """ # Evaluate without z dependence log_fNX = model.__call__(NHI) return log_fNX
e952a29fdf5864b26dc534140b2ccfb0b59fe24b
707,564
def tag(dicts, key, value): """Adds the key value to each dict in the sequence""" for d in dicts: d[key] = value return dicts
ffcfda13845fb8b522e50211184104a11da50398
707,567
def lambda_handler(event, context): """ Find and replace following words and outputs the result. Oracle -> Oracle© Google -> Google© Microsoft -> Microsoft© Amazon -> Amazon© Deloitte -> Deloitte© Example input: “We really like the new security features of Google Cloud”. Expected output: “We really like the new security features of Google© Cloud”. """ # Return 400 if event is none or strToReplace is blank if not event or not event['strToReplace']: return { 'statusCode': 400, 'body': "Input string not provided." } # Input String replacementString = event['strToReplace'] # Dictionary of words with replacement words wordsToReplaceDict = {'Oracle': 'Oracle©', 'Google': 'Google©', 'Microsoft': 'Microsoft©', 'Amazon': 'Amazon©', 'Deloitte': 'Deloitte©'} # Iterate over all key-value pairs in dictionary for key, value in wordsToReplaceDict.items(): # Replace words in string replacementString = replacementString.replace(key, value) return { 'statusCode': 200, 'body': replacementString }
66dc2914dd04a2e265ed21542bd462b61344d040
707,575
import csv def read_barcode_lineno_map(stream): """Build a map of barcodes to line number from a stream This builds a one based dictionary of barcode to line numbers. """ barcodes = {} reader = csv.reader(stream, delimiter="\t") for i, line in enumerate(reader): barcodes[line[0]] = i + 1 return barcodes
545a0d02dd76e774ba0de86431113ad9f36a098e
707,576
def _extract_dialog_node_name(dialog_nodes): """ For each dialog_node (node_id) of type *standard*, check if *title exists*. If exists, use the title for the node_name. otherwise, use the dialog_node For all other cases, use the dialog_node dialog_node: (dialog_node_title, dialog_node_type) In the case of Login Issues, "title": "Login Issue", "dialog_node": "Login Issues", the record will be created as: "Login Issues": ("Login Issue", "standard") """ nodes_dict = {} nodes_type = {} for obj in dialog_nodes: if (obj['type']=='standard') and ('title' in obj): if (obj['title'] is not None): nodes_dict[obj['dialog_node']] = (obj['title'],obj['type']) else: nodes_dict[obj['dialog_node']] = (obj['dialog_node'],obj['type']) else: nodes_dict[obj['dialog_node']] = (obj['dialog_node'],obj['type']) return nodes_dict
23121efa486c2da16a54b2441bb1435eec5b8b49
707,579
import types def is_iterator(obj): """ Predicate that returns whether an object is an iterator. """ return type(obj) == types.GeneratorType or ('__iter__' in dir(obj) and 'next' in dir(obj))
db57a2a1f171a48cc43ba4c248387191780dfd04
707,583
def table_to_dict(table): """Convert Astropy Table to Python dict. Numpy arrays are converted to lists. This Can work with multi-dimensional array columns, by representing them as list of list. e.g. This is useful in the following situation. foo = Table.read('foo.fits') foo.to_pandas() <- This will not work if columns are multi-dimensional. The alternative is: foo = Table.read('foo.fits') bar = table_to_dict(foo) df = pd.DataFrame(bar, columns=bar.keys()) <- The desired result. """ total_data = {} multi_cols = [] for i, _ in enumerate(table.columns): # This looks unusual, but it is the only way to iterate over columns. col = table.columns[i] data = table[col.name].tolist() total_data[col.name] = data if len(col.shape) == 2: multi_cols.append(col.name) return total_data, multi_cols
8ad9206222101bbd4d40913e3b43c8ffee9dd6ad
707,584
def application(environ, start_response): """Serve the button HTML.""" with open('wsgi/button.html') as f: response_body = f.read() status = '200 OK' response_headers = [ ('Content-Type', 'text/html'), ('Content-Length', str(len(response_body))), ] start_response(status, response_headers) return [response_body.encode('utf-8')]
97f1f793f234dbd3c29e9c4a791a224ba32c984b
707,586
def build_dataset_values(claim_object, data_value): """ Build results with different datasets. Parameters: claim_object (obj): Onject to modify and add to rows . data_value (obj): result object Returns: Modified claim_boject according to data_value.type """ if data_value["type"] == "globecoordinate": claim_object["str"] = str(data_value["value"]["latitude"]) + "," + str(data_value["value"]["longitude"]) elif data_value["type"] == "time": claim_object["date"] = data_value["value"]["time"].split("T")[0].split("+")[1] elif data_value["type"] == "string": claim_object["str"] = data_value["value"] else: pass return claim_object
f3d267a4e9ac099f6d2313deffb2f45d35b90217
707,591
def deactivate_text(shell: dict, env_vars: dict) -> str: """Returns the formatted text to write to the deactivation script based on the passed dictionaries.""" lines = [shell["shebang"]] for k in env_vars.keys(): lines.append(shell["deactivate"].format(k)) return "\n".join(lines)
0a75134a55bf9cd8eceb311c48a5547ad373593d
707,595
from typing import get_origin def is_dict(etype) -> bool: """ Determine whether etype is a Dict """ return get_origin(etype) is dict or etype is dict
a65af54bf6b24c94906765c895c899b18bf5c1eb
707,596
import requests def get_data(stock, start_date): """Fetch a maximum of the 100 most recent records for a given stock starting at the start_date. Args: stock (string): Stock Ticker start_date (int): UNIX date time """ # Build the query string request_url = f"https://api.pushshift.io/reddit/search/comment/?q={stock}&sort=asc&size=100&after={start_date}" # get the query and convert to json result_json = requests.get(request_url).json() return result_json
aafdc913d80346e82a21767cdb7b5e40f2376857
707,597
def check_output(file_path: str) -> bool: """ This function checks an output file, either from geomeTRIC or from Psi4, for a successful completion keyword. Returns True if the calculation finished successfully, otherwise False. """ with open(file_path, "r") as read_file: text = read_file.read() checks = ["Converged! =D", "Psi4 exiting successfully"] return any([check in text for check in checks])
2f0dea67216aff945b1b0db74e0131022acc3019
707,603
def compute_recall(true_positives, false_negatives): """Compute recall >>> compute_recall(0, 10) 0.0 >>> compute_recall(446579, 48621) 0.901815 """ return true_positives / (true_positives + false_negatives)
876bee73150d811e6b7c1a5de8d8e4349105c59b
707,605
def _insert_text_func(s, readline): """Creates a function to insert text via readline.""" def inserter(): readline.insert_text(s) readline.redisplay() return inserter
06532be051cb69b92fa79ef339edb733b8f31c15
707,612
def svo_filter_url(telescope, photofilter, zeropoint='AB'): """ Returns the URL where the filter transmission curve is hiding. Requires arguments: telescope: SVO-like name of Telescope/Source of photometric system. photofilter: SVO-like name of photometric filter. Optional: zeropoint: String. Either 'AB', 'Vega', or 'ST'. Output: url: URL of the relevant file. """ url = 'http://svo2.cab.inta-csic.es/theory/fps3/fps.php?' + \ 'PhotCalID=' + telescope + '/' + photofilter + '/' + zeropoint return url
e3cbe6a3192fcc890fb15df8fc3c02620a7c69fb
707,614
def setup_output_vcf(outname, t_vcf): """ Create an output vcf.Writer given the input vcf file as a templte writes the full header and Adds info fields: sizeCat MEF Returns a file handler and a dict with {individual_id: column in vcf} """ out = open(outname, 'w') line = t_vcf.readline() samp_columns = {} while not line.startswith("#CHROM"): out.write(line) line = t_vcf.readline() # edit the header out.write('##INFO=<ID=sizeCat,Number=A,Type=String,Description="Size category of variant">\n') out.write('##INFO=<ID=MEF,Number=.,Type=String,Description="Names of families that contain mendelian error">\n') out.write(line) for pos, iid in enumerate(line.strip().split('\t')[9:]): samp_columns[iid] = pos + 9 return out, samp_columns
82870c9c8d46dbe3161c434a87fac9108ed644b2
707,617
from typing import Dict from typing import Any def format_dict(body: Dict[Any, Any]) -> str: """ Formats a dictionary into a multi-line bulleted string of key-value pairs. """ return "\n".join( [f" - {k} = {getattr(v, 'value', v)}" for k, v in body.items()] )
b3f66d086284772e6783b8281f4d46c3dd6c237d
707,619
def rectify(link:str, parent:str, path:str): """A function to check a link and verify that it should be captured or not. For e.g. any external URL would be blocked. It would also take care that all the urls are properly formatted. Args: **link (str)**: the link to rectify. **parent (str)**: the complete url of the page from which the link was found. **path (str)**: the path (after the domain) of the page from which the link was found. Returns: **str**: the properly formatted link. """ if (link.startswith("#")) or (":" in link) or ("../" in link): return path if not link.startswith("/"): if parent.endswith("/"): if not path.endswith("/"): path += "/" return path + link else: path = "/".join(path.split("/")[:-1])+"/" return path + link return link
6ca5771fcbbb35fe6d99bab65082d447299bb93a
707,621
def indentsize(line): """Return the indent size, in spaces, at the start of a line of text.""" expline = line.expandtabs() return len(expline) - len(expline.lstrip())
c1f307adfeb2c1ec51c5e926a0b87dd3841e1aff
707,622
import struct def guid2bytes(s): """Converts a GUID to the serialized bytes representation""" assert isinstance(s, str) assert len(s) == 36 p = struct.pack return b"".join([ p("<IHH", int(s[:8], 16), int(s[9:13], 16), int(s[14:18], 16)), p(">H", int(s[19:23], 16)), p(">Q", int(s[24:], 16))[2:], ])
f298497173f9011392b671267cb47f081d25a9da
707,624
def grad(values: list[int], /) -> list[int]: """Compute the gradient of a sequence of values.""" return [v2 - v1 for v1, v2 in zip(values, values[1:])]
a1df1dffb27028dc408b00ec8ac26b6f68d9c923
707,626
def hex2int(hex_str): """ Convert 2 hex characters (e.g. "23") to int (35) :param hex_str: hex character string :return: int integer """ return int(hex_str, 16)
0640cffd6f7558f4dfd1bc74e20510e7d2051ca3
707,631
def mock_sync_cavatica_account(mocker): """ Mocks out sync Cavatica account functions """ sync_cavatica_account = mocker.patch( "creator.projects.cavatica.sync_cavatica_account" ) sync_cavatica_account.return_value = [], [], [] return sync_cavatica_account
27a0a8abee2c025fe17ba4fa4a939bcf04fc9c63
707,632
def build_url(station, d1, d2): """ Return the URL to fetch the response record for USArray MT station identifier *station* for the time range *d1* to *d2*. """ return 'http://service.iris.edu/irisws/resp/1/query?net=EM&sta={}&loc=--&cha=*&starttime={:%Y-%m-%dT%H:%M:%S}&endtime={:%Y-%m-%dT%H:%M:%S}'.format(station, d1, d2)
221d5f7a321d0e9337dbbe75e419298bcd3ab5c0
707,636
def read_float_with_comma(num): """Helper method to parse a float string representation that has a comma as decimal separator. Can't use locale as the page being parsed could not be in the same locale as the python running environment Args: num (str): the float string to parse Returns: float: the parsed float """ return float(num.replace(",", "."))
ff2e65ef35ba1fded06d8abb5ed252a6bffdceaa
707,641
def remote_repr(arg): """Return the `repr()` rendering of the supplied `arg`.""" return arg
d284a0f3a6d08ceae198aacf68554da9cc264b1b
707,642
import importlib from typing import Type def get_class_for_name(name: str, module_name: str = __name__) -> Type: """Gets a class from a module based on its name. Tread carefully with this. Personally I feel like it's only safe to use with dataclasses with known interfaces. Parameters ---------- name : str Name of the class we're trying to get the class object for. module_name: str, optional Which module to get a class from, by defualt __name__. Returns ------- Type [description] """ this_module = importlib.import_module(module_name) this_class = getattr(this_module, name) return this_class
73058c179187aac277221b33f4e1e65934a49a6a
707,643
def get_uleb128(byte_str): """ Gets a unsigned leb128 number from byte sting :param byte_str: byte string :return: byte string, integer """ uleb_parts = [] while byte_str[0] >= 0x80: uleb_parts.append(byte_str[0] - 0x80) byte_str = byte_str[1:] uleb_parts.append(byte_str[0]) byte_str = byte_str[1:] uleb_parts = uleb_parts[::-1] integer = 0 for i in range(len(uleb_parts) - 1): integer = (integer + uleb_parts[i]) << 7 integer += uleb_parts[-1] return byte_str, integer
1e9c02dc7c191686e7d7a19d8b8c82f95044c845
707,646
def check_shots_vs_bounds(shot_dict, mosaic_bounds, max_out_of_bounds = 3): """Checks whether all but *max_out_of_bounds* shots are within mosaic bounds Parameters ---------- shot_dict : dict A dictionary (see czd_utils.scancsv_to_dict()) with coordinates of all shots in a .scancsv file: {shot: [x_coords, y_coords], ...} mosaic_bounds : list A list of bounds to a .Align file (see get_mos_bounds()): [min_x, max_x, min_y, max_y] max_out_of_bounds : int, optional Max number of out-of-bounds shots allowed for a \ 'match' between mosaic and .scancsv. The default is 3. Returns ------- Boolean True or False, depending on whether all but *max_out_of_bounds* \ shots are within mosaic bounds """ total_out_of_bounds = 0 min_x, max_x, min_y, max_y = mosaic_bounds for eachcoords in shot_dict.values(): if not min_x <= eachcoords[0] <= max_x or not min_y <= eachcoords[1] <= max_y: total_out_of_bounds += 1 return total_out_of_bounds <= max_out_of_bounds
de36f7f2a32a2a7120236d0bd5e43520de0c7ea5
707,647
import dill def deserializer(serialized): """Example deserializer function with extra sanity checking. :param serialized: Serialized byte string. :type serialized: bytes :return: Deserialized job object. :rtype: kq.Job """ assert isinstance(serialized, bytes), "Expecting a bytes" return dill.loads(serialized)
8895a1c40eaf5e30dd10015b87a0b94da0edf9ac
707,648
def _read_byte(stream): """Read byte from stream""" read_byte = stream.read(1) if not read_byte: raise Exception('No more bytes!') return ord(read_byte)
767766ef0d7a52c41b7686f994a503bc8cc7fe8d
707,649
def get_labels_from_sample(sample): """ Each label of Chinese words having at most N-1 elements, assuming that it contains N characters that may be grouped. Parameters ---------- sample : list of N characters Returns ------- list of N-1 float on [0,1] (0 represents no split) """ labels = [] for word in sample: if len(word) > 1: for _ in range(len(word)-1): labels.append(0) # within a word, append a '0' for each interstice labels.append(1) # at the end of a word, append a '1' else: labels.append(1) labels = labels[:-1] # Throw away the last value, it doesn't represent an interstice return labels
4b21b878d1ae23b08569bda1f3c3b91e7a6c48b9
707,651
import re import math def number_to_block(number, block_number=0): """ Given an address number, normalizes it to the block number. >>> number_to_block(1) '0' >>> number_to_block(10) '0' >>> number_to_block(100) '100' >>> number_to_block(5) '0' >>> number_to_block(53) '0' >>> number_to_block(153) '100' >>> number_to_block(1000) '1000' >>> number_to_block(1030) '1000' >>> number_to_block(1359) '1300' >>> number_to_block(13593) '13500' >>> number_to_block('01') '0' >>> number_to_block('00') '0' >>> number_to_block('foo') 'foo' >>> number_to_block('3xx') '300' >>> number_to_block('3XX') '300' >>> number_to_block('3pp') '3pp' >>> number_to_block('XX') '0' >>> number_to_block('X') 'X' block_number lets you customize the "XX" of "3XX block". >>> number_to_block(234, 99) '299' >>> number_to_block(12345, 99) '12399' """ number = re.sub('(?i)xx', '00', str(number)) try: number = int(number) except (TypeError, ValueError): return number return str(int(math.floor(number / 100.0)) * 100 + block_number)
1504d79469dccc06e867fbf5a80507566efb5019
707,652
def add(vec_1, vec_2): """ This function performs vector addition. This is a good place to play around with different collection types (list, tuple, set...), :param vec_1: a subscriptable collection of length 3 :param vec_2: a subscriptable collection of length 3 :return vec_3: a subscriptable collection of length 3 """ # add two vectors vec_3 = [float(vec_1[0]) + float(vec_2[0]), float(vec_1[1]) + float(vec_2[1]), float(vec_1[2]) + float(vec_2[2])] return vec_3
4a17a82422cef472decb37c376e8bf5259ade60a
707,653
def ms_to_samples(ms, sampling_rate): """ Convert a duration in milliseconds into samples. Arguments: ms (float): Duration in ms. sampling_rate (int): Sampling rate of of the signal. Returns: int: Duration in samples. """ return int((ms / 1000) * sampling_rate)
a2bf63ad8cca580ae3307c33daa82bb1382d742c
707,654
def flatten(L): """Flatten a list recursively Inspired by this fun discussion: https://stackoverflow.com/questions/12472338/flattening-a-list-recursively np.array.flatten did not work for irregular arrays and itertools.chain.from_iterable cannot handle arbitrarily nested lists :param L: A list to flatten :return: the flattened list """ if L == []: return L if isinstance(L[0], list): return flatten(L[0]) + flatten(L[1:]) return L[:1] + flatten(L[1:])
c554a01a8308341d1c9620edc0783689e75fb526
707,655
def chi2(observed, expected): """ Return the chi2 sum of the provided observed and expected values. :param observed: list of floats. :param expected: list of floats. :return: chi2 (float). """ if 0 in expected: return 0.0 return sum((_o - _e) ** 2 / _e ** 2 for _o, _e in zip(observed, expected))
6050e98a823671de4a518d584a6e39bc519fa610
707,656
def feedback(olsys,H=1): """Calculate the closed-loop transfer function olsys cltf = -------------- 1+H*olsys where olsys is the transfer function of the open loop system (Gc*Gp) and H is the transfer function in the feedback loop (H=1 for unity feedback).""" clsys=olsys/(1.0+H*olsys) return clsys
ca78d05196068746a225038c0f401faad24c5f65
707,660
def comment(strng,indent=''): """return an input string, commented out""" template = indent + '# %s' lines = [template % s for s in strng.splitlines(True)] return ''.join(lines)
42386b7ed8de9127d7224481a5f5315d39b6ae97
707,661
def _traverse_tree_and_group_all_objects_by_oclass(root_obj, result=None): """Traverses the tree once and groups all objects by oclass :param root_obj: The root object where to start the traversion :type root_obj: CUDS :param result: The current results of the recursion, defaults to None :type result: dict, optional :return: All CUDS objects in the tree, grouped by oclass. :rtype: dict """ if result is None: result = {str(root_obj.oclass): [root_obj]} for neighbour in root_obj.iter(): if neighbour.oclass not in result.keys(): result[str(neighbour.oclass)] = [neighbour] else: result[str(neighbour.oclass)].append(neighbour) _traverse_tree_and_group_all_objects_by_oclass(neighbour, result) return result
3ae139313ea7b5e92f0d9231a4e64efc87acc5ac
707,663
def check_measurement(m_info, filters): """ Determine whether a given measurement should be included based on the filters. Inputs: m_info - A dictionary containing the configuration parameters for an individual measurement. filters - A dictionary containing a set of configuration parameter values that should be included Output: include - Boolean indicating whether to include the given measurement """ include = True for filter_field, filter_values in filters.iteritems(): try: iter(filter_values) except: filter_values = [filter_values] if not m_info[filter_field] in filter_values: include = False return include
374be08c315a63d09faadc9c963a49a89b04b3ed
707,664
def _normalize_sql(sql, maxlen=150): """Collapse whitespace and middle-truncate if needed.""" out = ' '.join(sql.split()) if len(out) > maxlen: i = int(maxlen / 2 - 4) out = (out[0:i] + ' . . . ' + out[-i:None]) return out
f85efb0c367b448d2e363d9c1f8bf62a2bdb600e
707,668
import re def validate_regex(regex_str): """ Checks if a given string is valid regex :param str regex_str: a suspicios string that may or may not be valid regex :rtype: bool :return: True if valid regex was give, False in case of TypeError or re.error """ # another of those super basic function where i am not sure if there isn't an easier way try: re.compile(regex_str) return True except re.error: return False except TypeError: # for the string not being one return False
97c6e2338eb67c2d4be74e3a18a4393a1eb36242
707,669
import base64 def _b64(b): """Helper function base64 encode for jose spec.""" return base64.urlsafe_b64encode(b).decode('utf8').replace("=", "")
4777d4f47de2c72b8dd95b765fc54d1abc6763f0
707,671
import torch def reparameterize(mu, logvar, n_samples=1): """Reparameterization trick. Args: mu (torch.Tensor): Mean. logvar (torch.Tensor): Logarithm of variation. n_samples (int): The number of samples. Returns: torch.Tensor: Samples drawn from the given Gaussian distribution. The shape is equal to mu if n_samples is 1, and (n_samples, *mu.shape) if n_samples is larger than 1. """ std = torch.exp(0.5 * logvar) eps = torch.randn(n_samples, *std.size(), device=std.device) z = mu + eps * std return z.squeeze(0)
726473147ee28f470ad7d543e2b36bc512ffd0ae
707,672
def boolean_fn2(a, b, c): """ Return the truth value of (a ∧ b) ∨ (-a ∧ -b) """ return a and b or not a and not b
c1ef37b3503866e9460fb95c4ab609278c6cff52
707,673
def apply_move(board_state, move, side): """Returns a copy of the given board_state with the desired move applied. Args: board_state (3x3 tuple of int): The given board_state we want to apply the move to. move (int, int): The position we want to make the move in. side (int): The side we are making this move for, 1 for the first player, -1 for the second player. Returns: (3x3 tuple of int): A copy of the board_state with the given move applied for the given side. """ move_x, move_y = move def get_tuples(): for x in range(3): if move_x == x: temp = list(board_state[x]) temp[move_y] = side yield tuple(temp) else: yield board_state[x] return tuple(get_tuples())
b47da6ddab3bd1abf99ee558471a3696e46b8352
707,681
def _el_orb(string): """Parse the element and orbital argument strings. The presence of an element without any orbitals means that we want to plot all of its orbitals. Args: string (str): The element and orbitals as a string, in the form ``"C.s.p,O"``. Returns: dict: The elements and orbitals as a :obj:`dict`. For example:: {'Bi': ['s', 'px', 'py', 'd']}. If an element symbol is included with an empty list, then all orbitals for that species are considered. """ el_orbs = {} for split in string.split(','): orbs = split.split('.') orbs = [orbs[0], 's', 'p', 'd', 'f'] if len(orbs) == 1 else orbs el_orbs[orbs.pop(0)] = orbs return el_orbs
654d085347913bca2fd2834816b988ea81ab7164
707,686
def fmla_for_filt(filt): """ transform a set of column filters from a dictionary like { 'varX':['lv11','lvl2'],...} into an R selector expression like 'varX %in% c("lvl1","lvl2")' & ... """ return ' & '.join([ '{var} %in% c({lvls})'.format( var=k, lvls=','.join(map(lambda x:'"%s"' % x, v)) if type(v) == list else '"%s"' % v ) for k, v in filt.items() ])
149d23822a408ad0d96d7cefd393b489b4b7ecfa
707,687
def gaussian_ll_pdf(x, mu, sigma): """Evaluates the (unnormalized) log of the normal PDF at point x Parameters ---------- x : float or array-like point at which to evaluate the log pdf mu : float or array-like mean of the normal on a linear scale sigma : float or array-like standard deviation of the normal on a linear scale """ log_pdf = -0.5*(x - mu)**2.0/sigma**2.0 #- np.log(sigma) - 0.5*np.log(2.0*np.pi) return log_pdf
dbf1e389ad8349093c6262b2c595a2e511f2cb28
707,688
from typing import Tuple def ordered_pair(x: complex) -> Tuple[float, float]: """ Returns the tuple (a, b), like the ordered pair in the complex plane """ return (x.real, x.imag)
c67e43cf80194f7a5c7c5fd20f2e52464816d056
707,689
def cli(ctx, user_id): """Create a new API key for a given user. Output: the API key for the user """ return ctx.gi.users.create_user_apikey(user_id)
d7dafd77ef983286184b6f5aa2362bb734389696
707,698
from io import BytesIO def bytes_to_bytesio(bytestream): """Convert a bytestring to a BytesIO ready to be decoded.""" fp = BytesIO() fp.write(bytestream) fp.seek(0) return fp
d59e4f5ccc581898da20bf5d3f6e70f8e8712aa6
707,700
def _identity_error_message(msg_type, message, status_code, request): """ Set the response code on the request, and return a JSON blob representing a Identity error body, in the format Identity returns error messages. :param str msg_type: What type of error this is - something like "badRequest" or "itemNotFound" for Identity. :param str message: The message to include in the body. :param int status_code: The status code to set :param request: the request to set the status code on :return: dictionary representing the error body """ request.setResponseCode(status_code) return { msg_type: { "message": message, "code": status_code } }
d73e182fc794f01c3415069ffeb37e76a01df7af
707,704
def repeat_batch(t, K, dim=0): """Repeat a tensor while keeping the concept of a batch. :param t: `torch.Tensor`: The tensor to repeat. :param K: `int`: The number of times to repeat the tensor. :param dim: `int`: The dimension to repeat in. This should be the batch dimension. :returns: `torch.Tensor`: The repeated tensor. The new shape will be batch size * K at dim, the rest of the shapes will be the same. Example:: >>> a = torch.arange(10).view(2, -1) >>> a tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) >>> a.repeat(2, 1) tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) >>> repeat_batch(a, 2) tensor([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [5, 6, 7, 8, 9]]) """ shape = t.shape tiling = [1] * (len(shape) + 1) tiling[dim + 1] = K tiled = t.unsqueeze(dim + 1).repeat(tiling) old_bsz = shape[dim] new_bsz = old_bsz * K new_shape = list(shape[:dim]) + [new_bsz] + list(shape[dim + 1 :]) return tiled.view(new_shape)
31ae6e02bd23c56049a4f8e5ea9f36e5b6186678
707,709
def prettify_seconds(seconds): """ Prettifies seconds. Takes number of seconds (int) as input and returns a prettified string. Example: >>> prettify_seconds(342543) '3 days, 23 hours, 9 minutes and 3 seconds' """ if seconds < 0: raise ValueError("negative input not allowed") signs = {"s": {"singular": "second", "plural": "seconds", }, "h": {"singular": "hour", "plural": "hours"}, "min": {"singular": "minute", "plural": "minutes"}, "d": {"singular": "day", "plural": "days"} } seperator = ", " last_seperator = " and " def get_sign(unit, value): if value == 1 or value == -1: return signs[unit]["singular"] else: return signs[unit]["plural"] days, remainder = divmod(seconds, 86400) hours, remainder = divmod(remainder, 3600) minutes, seconds = divmod(remainder, 60) daystext = "{} {}".format(days, get_sign("d", days)) if days else "" hourstext = "{} {}".format(hours, get_sign("h", hours)) if hours else "" minutestext = "{} {}".format(minutes, get_sign("min", minutes)) if minutes else "" if (not seconds) and (days or hours or minutes): secondstext = "" else: secondstext = "{} {}".format(seconds, get_sign("s", seconds)) output_list = [daystext, hourstext, minutestext, secondstext] filtered = [item for item in output_list if item] if len(filtered) <= 2: output = last_seperator.join(filtered) else: output = seperator.join(filtered[:-1]) + last_seperator + filtered[-1] return output
4b77f9ed3d2085895ef15c6be30b7bfe83d1f49d
707,713
def default_preprocessing(df): """Perform the same preprocessing as the original analysis: https://github.com/propublica/compas-analysis/blob/master/Compas%20Analysis.ipynb """ return df[(df.days_b_screening_arrest <= 30) & (df.days_b_screening_arrest >= -30) & (df.is_recid != -1) & (df.c_charge_degree != 'O') & (df.score_text != 'N/A')]
e6f4d8ceaa09fe71657e7936db886c3eabfb7aa0
707,714
def nullColumns(fileHeaders, allKeys): """ Return a set of column names that don't exist in the file. """ s1 = set(fileHeaders) s2 = set(allKeys) return s2.difference(s1)
17a0bb80414fe88f213399958b217ccf6fb5d1e9
707,720
def _flip(r, u): """Negate `r` if `u` is negated, else identity.""" return ~ r if u.negated else r
18ddcf5132867f5646c729bdadcb2c5077df8c03
707,722
def is_igb(request): """ Checks the headers for IGB headers. """ if 'HTTP_EVE_TRUSTED' in request.META: return True return False
1e6485614063a9f4eec36407b60154300d38db76
707,725
def simulate_until_target_substate_or_max_t( _simulate_until_attractor_or_target_substate_or_max_t, initial_state, perturbed_nodes_by_t, predecessor_node_lists, truth_tables): """ Perform simulation to figure whether it reaches target substate. Does not return states of simulations that don't reach target substate. Target substate is not considered as reached until all the perturbations are carried out. Initial state can be considered as reached target substate if no perturbations are present. :param _simulate_until_attractor_or_target_substate_or_max_t: [function] to perform simulation :param initial_state: initial state of the network :param perturbed_nodes_by_t: dict (by time steps) of dicts (by nodes) of node states :param predecessor_node_lists: list of predecessor node lists :param truth_tables: list of dicts (key: tuple of predecessor node states, value: resulting node state) :return: list of states where last state contains target substate, or None if target substate was not reached """ states, *_, target_substate_is_reached, _ = _simulate_until_attractor_or_target_substate_or_max_t( initial_state, perturbed_nodes_by_t, predecessor_node_lists, truth_tables) return states if target_substate_is_reached else None
526ef8085dcbe4bcbc112c3bd4626ec5247e2f97
707,729
import logging def _get_filehandler_with_formatter(logname, formatter=None): """ Return a logging FileHandler for given logname using a given logging formatter :param logname: Name of the file where logs will be stored, ".log" extension will be added :param formatter: An instance of logging.Formatter or None if the default should be used :return: """ handler = logging.FileHandler(logname) if formatter is not None: handler.setFormatter(formatter) return handler
1cc6f83480e691c4c54c359deabd6364da65f320
707,733
from typing import Tuple def to_int(s: str) -> Tuple[bool, int]: """Convert a string s to an int, if possible.""" try: n = int(s) return True, n except Exception: return False, 0
27d24b881f5987037f750a1cee022f7b1daa7c33
707,734
def getTrackIds(sp, username, playlist, offset=0): """ Returns the ids of the tracks contained in a playlist :param sp: A spotipy.Spotify object to be used for the request. :param username: The username of the user who's playlists you want the retrieve. :param playlist: Name of the playlist from wich the tracks are retrieved. :param offset: Do not worry about this parameter, it is used for recursion. :returns: A list containing all the ids of the tracks that are in the playlist. """ limit = 100 fields = "items(track(id)), total" api_response = sp.user_playlist_tracks(username, playlist["id"], fields, limit=limit, offset=offset) track_ids = [x["track"]["id"] for x in api_response["items"]] if api_response["total"] > limit + offset: next_page = getTrackIds(sp, username, playlist, offset + limit) for item in next_page: track_ids.append(item) return track_ids
5b4e621022f49137b7fd4547bf5ab4efe92b4515
707,737
import pathlib def path_to_filename(path, with_suffix=True): """Get filename from path. Parameters ========== path : str Path to retrieve file name from e.g. '/path/to/image.png'. with_suffix : bool Whether to include the suffix of file path in file name. Returns ======= str The file name of the path e.g. 'image.png' or 'image' if `with_suffix` is false. """ p = pathlib.Path(path) if with_suffix: return str(p.name) else: return str(p.with_suffix("").name)
45ecfb6e263e65de7165a69eda99bc8de2a157f4
707,740
def get_blueprint_docs(blueprints, blueprint): """Returns doc string for blueprint.""" doc_string = blueprints[blueprint].__doc__ return doc_string
8a334a9ddd1ff5fe844821152f4312b2db0e9da5
707,742
def is_binary(file_path): """ Returns True if the file is binary """ with open(file_path, 'rb') as fp: data = fp.read(1024) if not data: return False if b'\0' in data: return True return False
2df56f93d4e31220a580bf1e659c3c51b96260d2
707,743
def resolve(match, *objects): """Given an array of objects and a regex match, this function returns the first matched group if it exists in one of the objects, otherwise returns the orginial fully matches string by the regex. Example: if regex = \\\.([a-z]) and string = test\.abc, then the match = {group0: \.abc, group1: abc}. Assuimg one object: - obj = {abc: def}, then we return 'def' - obj = {test: value}, then we return \.abc Args: objects (array[dict]): the array of objects we use to look up the key in match.group(1) match: the regex match object Returns: str: the value of the matched group(1) in the first object found if exists, otherwise returns the fully matched string. """ for obj in objects: if obj is not None and match.group(1) in obj: return str(obj[match.group(1)]) return match.group(0)
52f59fb5248ba635866fcd59a549067c3984e460
707,749
from typing import Dict def binary_to_single(param_dict: Dict[str, float], star_index: int) -> Dict[str, float]: """ Function for converting a dictionary with atmospheric parameters of a binary system to a dictionary of parameters for one of the two stars. Parameters ---------- param_dict : dict Dictionary with the atmospheric parameters of both stars. The keywords end either with ``_0`` or ``_1`` that correspond with ``star_index=0`` or ``star_index=1``. star_index : int Star index (0 or 1) that is used for the parameters in ``param_dict``. Returns ------- dict Dictionary with the parameters of the selected star. """ new_dict = {} for key, value in param_dict.items(): if star_index == 0 and key[-1] == "0": new_dict[key[:-2]] = value elif star_index == 1 and key[-1] == "1": new_dict[key[:-2]] = value elif key in ["teff", "logg", "feh", "c_o_ratio", "fsed", "radius", "distance"]: new_dict[key] = value return new_dict
21099162ffe83715892abf82660e35ee98e02930
707,755
import time def convert_epoch_to_mysql_timestamp(epoch_timestamp): """ Converts a given epoch timestamp in seconds to the MySQL datetime format. :param epoch_timestamp: The timestamp as seconds since epoch time :return: The MySQL timestamp string in the format 'Y-m-d HH:MM:SS' :rtype: str """ try: epoch = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(epoch_timestamp)) return epoch except Exception as e: print(e) return None
15647a816e638e7668e2e830ebc4f1c6fdb2f030
707,759
def check_public_key(pk): """ Checks if a given string is a public (or at least if it is formatted as if it is). :param pk: ECDSA public key to be checked. :type pk: hex str :return: True if the key matches the format, raise exception otherwise. :rtype: bool """ prefix = pk[0:2] l = len(pk) if prefix not in ["02", "03", "04"]: raise Exception("Wrong public key format.") if prefix == "04" and l != 130: raise Exception( "Wrong length for an uncompressed public key: " + str(l)) elif prefix in ["02", "03"] and l != 66: raise Exception("Wrong length for a compressed public key: " + str(l)) else: return True
120b3e88a96db45e5e4df0996414448da8b84462
707,760