content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import re def colorize_output(output): """Add HTML colors to the output.""" # Task status color_output = re.sub(r'(ok: [-\w\d\[\]]+)', r'<font color="green">\g<1></font>', output) color_output = re.sub(r'(changed: [-\w\d\[\]]+)', r'<font color="orange">\g<1></font>', color_output) if not re.search(r'failed: 0', color_output): color_output = re.sub(r'(failed: [-\w\d\[\]]+)', r'<font color="red">\g<1></font>', color_output) color_output = re.sub(r'(fatal: [-\w\d\[\]]+):', r'<font color="red">\g<1></font>', color_output) # Play recap color_output = re.sub(r'(ok=[\d]+)', r'<font color="green">\g<1></font>', color_output) color_output = re.sub(r'(changed=[\d]+)', r'<font color="orange">\g<1></font>', color_output) color_output = re.sub(r'(failed=[1-9][0-9]*)', r'<font color="red">\g<1></font>', color_output) return color_output
80759da16262d850b45278faede4b60b7aa4a7c6
707,558
import argparse def parse_args(): """Parse command-line arguments""" parser = argparse.ArgumentParser( description='Stop a subjective evaluation without ' + 'destroying resources') parser.add_argument('--aws_api_key', help='The public API key for AWS') parser.add_argument( '--aws_api_secret_key', help='The private API key for AWS') parser.add_argument('--heroku_api_key', help='The API key for Heroku') parser.add_argument( '--mysql_local_user', help='The username of the local MySQL database') parser.add_argument( '--mysql_local_password', help='The corresponding password of the local MySQL database') return parser.parse_args()
661a9bdec94b88c06f6d4080ef20cc31f81901ff
707,559
def time_human(x): """ Gets time as human readable """ # Round time x = round(x, 2) for number, unit in [(60, "s"), (60, "min"), (24, "h"), (365, "days")]: if abs(x) < number: return f"{x:.2f} {unit}" x /= number return f"{x:.2f} years"
3f7f51ac7454e429fc30da64eed075aaf1f10b5b
707,560
def limit_sub_bbox(bbox, sub_bbox): """ >>> limit_sub_bbox((0, 1, 10, 11), (-1, -1, 9, 8)) (0, 1, 9, 8) >>> limit_sub_bbox((0, 0, 10, 10), (5, 2, 18, 18)) (5, 2, 10, 10) """ minx = max(bbox[0], sub_bbox[0]) miny = max(bbox[1], sub_bbox[1]) maxx = min(bbox[2], sub_bbox[2]) maxy = min(bbox[3], sub_bbox[3]) return minx, miny, maxx, maxy
fa5b7763b30442fba137814ac7b0336528c4540b
707,561
def slide_number_from_xml_file(filename): """ Integer slide number from filename Assumes /path/to/Slidefile/somekindofSlide36.something """ return int(filename[filename.rfind("Slide") + 5:filename.rfind(".")])
dcfbc322b30a39041ab15b8496f097a5a5329865
707,562
import re def is_strong_pass(password): """ Verify the strength of 'password' Returns a dict indicating the wrong criteria A password is considered strong if: 8 characters length or more 1 digit or more 1 symbol or more 1 uppercase letter or more 1 lowercase letter or more """ # calculating the length length_error = len(password) < 8 # searching for digits digit_error = re.search(r"\d", password) is None # searching for uppercase uppercase_error = re.search(r"[A-Z]", password) is None # searching for lowercase lowercase_error = re.search(r"[a-z]", password) is None # searching for symbols symbol_error = re.search(r"[ !#$@%&'()*+,-./[\\\]^_`{|}~" + r'"]', password) is None # overall result password_ok = not (length_error or digit_error or uppercase_error or lowercase_error or symbol_error) return password_ok
bfd1832951ba3059d8c542fa0b9d708a2416a4d4
707,563
def evaluate_fN(model, NHI): """ Evaluate an f(N,X) model at a set of NHI values Parameters ---------- NHI : array log NHI values Returns ------- log_fN : array f(NHI,X) values """ # Evaluate without z dependence log_fNX = model.__call__(NHI) return log_fNX
e952a29fdf5864b26dc534140b2ccfb0b59fe24b
707,564
import os def is_file_type(fpath, filename, ext_list): """Returns true if file is valid, not hidden, and has extension of given type""" file_parts = filename.split('.') # invalid file if not os.path.isfile(os.path.join(fpath, filename)): return False # hidden file elif filename.startswith('.'): return False # no extension elif len(file_parts) < 2: return False # check file type extension = file_parts[-1].lower() if extension in ext_list: return True else: return False
52213f023313e4edb5628fcebf47cb94bc2cfcbe
707,565
def index(): """ vista principal """ return "<i>API RestFull PARCES Version 0.1</i>"
8b8b963f75395df665bcf0283528c9641b3ea20e
707,566
def tag(dicts, key, value): """Adds the key value to each dict in the sequence""" for d in dicts: d[key] = value return dicts
ffcfda13845fb8b522e50211184104a11da50398
707,567
def update_max_braking_decel(vehicle, mbd): """ Updates the max braking decel of the vehicle :param vehicle: vehicle :param mbd: new max braking decel :type vehicle: VehicleProfile :return: Updated vehicle """ return vehicle.update_max_braking_decel(mbd)
dea3bf14ca14363246539fd81cf853cd2c0ad980
707,568
import click def cli(ctx: click.Context) -> int: """ Method used to declare root CLI command through decorators. """ return 0
be5016c5c38f435b8a213a6ce39b5571aee809f1
707,570
def get_close_icon(x1, y1, height, width): """percentage = 0.1 height = -1 while height < 15 and percentage < 1.0: height = int((y2 - y1) * percentage) percentage += 0.1 return (x2 - height), y1, x2, (y1 + height)""" return x1, y1, x1 + 15, y1 + 15
78b65cdeeb4f6b3a526fd5dd41b34f35545f1e9d
707,572
def train_model(network, data, labels, batch_size, epochs, validation_data=None, verbose=True, shuffle=False): """ Train """ model = network.fit( data, labels, batch_size=batch_size, epochs=epochs, validation_data=validation_data, shuffle=shuffle, verbose=verbose) return model
a2b093aef1b607cd34dd30e8c5f126e1efb3d409
707,573
def lambda_handler(event, context): """ Find and replace following words and outputs the result. Oracle -> Oracle© Google -> Google© Microsoft -> Microsoft© Amazon -> Amazon© Deloitte -> Deloitte© Example input: “We really like the new security features of Google Cloud”. Expected output: “We really like the new security features of Google© Cloud”. """ # Return 400 if event is none or strToReplace is blank if not event or not event['strToReplace']: return { 'statusCode': 400, 'body': "Input string not provided." } # Input String replacementString = event['strToReplace'] # Dictionary of words with replacement words wordsToReplaceDict = {'Oracle': 'Oracle©', 'Google': 'Google©', 'Microsoft': 'Microsoft©', 'Amazon': 'Amazon©', 'Deloitte': 'Deloitte©'} # Iterate over all key-value pairs in dictionary for key, value in wordsToReplaceDict.items(): # Replace words in string replacementString = replacementString.replace(key, value) return { 'statusCode': 200, 'body': replacementString }
66dc2914dd04a2e265ed21542bd462b61344d040
707,575
import csv def read_barcode_lineno_map(stream): """Build a map of barcodes to line number from a stream This builds a one based dictionary of barcode to line numbers. """ barcodes = {} reader = csv.reader(stream, delimiter="\t") for i, line in enumerate(reader): barcodes[line[0]] = i + 1 return barcodes
545a0d02dd76e774ba0de86431113ad9f36a098e
707,576
def search4vowels(pharse :str) -> set: """"Return any vowels found in a supplied word.""" vowels = set('aeiou') return vowels.intersection(set(pharse))
8a45c50828b6ba8d173572ac771eb8fe5ddc5a42
707,577
import os def xdg_data_home(): """Base directory where user specific data files should be stored.""" value = os.getenv('XDG_DATA_HOME') or '$HOME/.local/share/' return os.path.expandvars(value)
db4212def5e4760bbe1da762a74cf09a9ee40d78
707,578
def _extract_dialog_node_name(dialog_nodes): """ For each dialog_node (node_id) of type *standard*, check if *title exists*. If exists, use the title for the node_name. otherwise, use the dialog_node For all other cases, use the dialog_node dialog_node: (dialog_node_title, dialog_node_type) In the case of Login Issues, "title": "Login Issue", "dialog_node": "Login Issues", the record will be created as: "Login Issues": ("Login Issue", "standard") """ nodes_dict = {} nodes_type = {} for obj in dialog_nodes: if (obj['type']=='standard') and ('title' in obj): if (obj['title'] is not None): nodes_dict[obj['dialog_node']] = (obj['title'],obj['type']) else: nodes_dict[obj['dialog_node']] = (obj['dialog_node'],obj['type']) else: nodes_dict[obj['dialog_node']] = (obj['dialog_node'],obj['type']) return nodes_dict
23121efa486c2da16a54b2441bb1435eec5b8b49
707,579
import re def is_probably_beginning_of_sentence(line): """Return True if this line begins a new sentence.""" # Check heuristically for a parameter list. for token in ['@', '-', r'\*']: if re.search(r'\s' + token + r'\s', line): return True stripped_line = line.strip() is_beginning_of_sentence = re.match(r'[^\w"\'`\(\)]', stripped_line) is_pydoc_ref = re.match(r'^:\w+:', stripped_line) return is_beginning_of_sentence and not is_pydoc_ref
68a6a2151b4559f0b95e0ac82a8a16bd06d9d1ff
707,580
import os def progress_enabled(): """ Checks if progress is enabled. To disable: export O4_PROGRESS=false """ return os.environ.get('O4_PROGRESS', 'true') == 'true'
f04b3375eb4150f23f377472eea659a8d03433ab
707,581
from typing import Any def is_valid_dim(x: Any) -> bool: """determine if the argument will be valid dim when included in torch.Size. """ return isinstance(x, int) and x > 0
09b8dd41b20a835583cd051868f13756e8383342
707,582
import types def is_iterator(obj): """ Predicate that returns whether an object is an iterator. """ return type(obj) == types.GeneratorType or ('__iter__' in dir(obj) and 'next' in dir(obj))
db57a2a1f171a48cc43ba4c248387191780dfd04
707,583
def table_to_dict(table): """Convert Astropy Table to Python dict. Numpy arrays are converted to lists. This Can work with multi-dimensional array columns, by representing them as list of list. e.g. This is useful in the following situation. foo = Table.read('foo.fits') foo.to_pandas() <- This will not work if columns are multi-dimensional. The alternative is: foo = Table.read('foo.fits') bar = table_to_dict(foo) df = pd.DataFrame(bar, columns=bar.keys()) <- The desired result. """ total_data = {} multi_cols = [] for i, _ in enumerate(table.columns): # This looks unusual, but it is the only way to iterate over columns. col = table.columns[i] data = table[col.name].tolist() total_data[col.name] = data if len(col.shape) == 2: multi_cols.append(col.name) return total_data, multi_cols
8ad9206222101bbd4d40913e3b43c8ffee9dd6ad
707,584
def strip_quotes(string): """Remove quotes from front and back of string >>> strip_quotes('"fred"') == 'fred' True """ if not string: return string first_ = string[0] last = string[-1] if first_ == last and first_ in '"\'': return string[1:-1] return string
7e10d37e5b5bb4569c88b4de17ffde31a4456e15
707,585
def application(environ, start_response): """Serve the button HTML.""" with open('wsgi/button.html') as f: response_body = f.read() status = '200 OK' response_headers = [ ('Content-Type', 'text/html'), ('Content-Length', str(len(response_body))), ] start_response(status, response_headers) return [response_body.encode('utf-8')]
97f1f793f234dbd3c29e9c4a791a224ba32c984b
707,586
def _fix_server_adress(raw_server): """ Prepend http:// there. """ if not raw_server.startswith("http://"): raw_server = "http://" + raw_server return raw_server
64171be5033930fd5ecb3cd275cc0d859b7e6ca0
707,587
def _parse_output_keys(val): """Parse expected output keys from string, handling records. """ out = {} for k in val.split(","): # record output if ":" in k: name, attrs = k.split(":") out[name] = attrs.split(";") else: out[k] = None return out
abd739026574b1a3fa87c42d2719d172e36a1c4a
707,588
def encode_board(board): """ Encode the 2D board list to a 64-bit integer """ new_board = 0 for row in board.board: for tile in row: new_board <<= 4 if tile is not None: new_board += tile.val return new_board
2c85964902dc3b2d097e30b71f11e7c17b80297a
707,589
def longest_common_substring(string1, string2): """ Function to find the longest common substring of two strings """ m = [[0] * (1 + len(string2)) for i in range(1 + len(string1))] longest, x_longest = 0, 0 for x in range(1, 1 + len(string1)): for y in range(1, 1 + len(string2)): if string1[x - 1] == string2[y - 1]: m[x][y] = m[x - 1][y - 1] + 1 if m[x][y] > longest: longest = m[x][y] x_longest = x else: m[x][y] = 0 return string1[x_longest - longest: x_longest]
f567c629f5bd02143f0ed6bbbdc11f0e59e5f4bd
707,590
def build_dataset_values(claim_object, data_value): """ Build results with different datasets. Parameters: claim_object (obj): Onject to modify and add to rows . data_value (obj): result object Returns: Modified claim_boject according to data_value.type """ if data_value["type"] == "globecoordinate": claim_object["str"] = str(data_value["value"]["latitude"]) + "," + str(data_value["value"]["longitude"]) elif data_value["type"] == "time": claim_object["date"] = data_value["value"]["time"].split("T")[0].split("+")[1] elif data_value["type"] == "string": claim_object["str"] = data_value["value"] else: pass return claim_object
f3d267a4e9ac099f6d2313deffb2f45d35b90217
707,591
import os def get_yolk_dir(): """Return location we store config files and data.""" return os.path.abspath('%s/.yolk' % os.path.expanduser('~'))
788e44d2f95ce720d10154465198a3f86625453b
707,592
def _expectedValues(): """ These values are expected for well exposed spot data. The dictionary has a tuple for each wavelength. Note that for example focus is data set dependent and should be used only as an indicator of a possible value. keys: l600, l700, l800, l890 tuple = [radius, focus, widthx, widthy] """ out = dict(l600=(0.45, 0.40, 0.34, 0.32), l700=(0.47, 0.40, 0.32, 0.31), l800=(0.49, 0.41, 0.30, 0.30), l800l=(0.49, 0.41, 0.27, 0.27), l800m=(0.49, 0.41, 0.30, 0.30), l800h=(0.49, 0.41, 0.31, 0.31), l890=(0.54, 0.38, 0.29, 0.29)) return out
7ddd7031313ac5c90f022a6a60c81ad12b4d5dac
707,593
def deactivate_text(shell: dict, env_vars: dict) -> str: """Returns the formatted text to write to the deactivation script based on the passed dictionaries.""" lines = [shell["shebang"]] for k in env_vars.keys(): lines.append(shell["deactivate"].format(k)) return "\n".join(lines)
0a75134a55bf9cd8eceb311c48a5547ad373593d
707,595
from typing import get_origin def is_dict(etype) -> bool: """ Determine whether etype is a Dict """ return get_origin(etype) is dict or etype is dict
a65af54bf6b24c94906765c895c899b18bf5c1eb
707,596
import requests def get_data(stock, start_date): """Fetch a maximum of the 100 most recent records for a given stock starting at the start_date. Args: stock (string): Stock Ticker start_date (int): UNIX date time """ # Build the query string request_url = f"https://api.pushshift.io/reddit/search/comment/?q={stock}&sort=asc&size=100&after={start_date}" # get the query and convert to json result_json = requests.get(request_url).json() return result_json
aafdc913d80346e82a21767cdb7b5e40f2376857
707,597
def depart_people(state, goal): """Departs all passengers that can depart on this floor""" departures = [] for departure in state.destin.items(): passenger = departure[0] if passenger in goal.served and goal.served[passenger]: floor = departure[1] if state.lift_at == floor and state.boarded[passenger] and not state.served[passenger]: departures.append(('depart', passenger, state.lift_at)) return departures
f3a18ad9a6f884a57d0be1d0e27b3dfeeb95d736
707,598
def inventory_update(arr1, arr2): """Add the inventory from arr2 to arr1. If an item exists in both arr1 and arr2, then the quantity of the item is updated in arr1. If an item exists in only arr2, then the item is added to arr1. If an item only exists in arr1, then that item remains unaffected. Arguments: arr1: the destination inventory arr2: the inventory to add to the destination inventory Returns: a combined inventory """ # Set longer to the longer of the two arrays longer = arr2 if len(longer) > len(arr1): temp = arr1 arr1 = longer longer = temp # Since longer is potentially modified, set it # to a copy of itself. longer = longer.copy() # Iterate over the shorter array, appending # items that don't exist in the longer array, # or updating the quantity of existing items. for tup in arr1: qty = tup[0] name = tup[1] # Funny way to get the index of an array # object based on the object's own indexed # elements. try: i = [x[1] for x in longer].index(name) except ValueError: i = -1 if i < 0: longer.append(tup) else: longer[i][0] += qty # Man, why doesn't the index function accept a # key argument? Sort on the string description # of each inventory item. longer.sort(key=lambda x: x[1]) return longer
febba1d2dac6c79fabf4e8aaad8c0fd482478b50
707,601
from typing import Tuple from typing import List import argparse import sys def _parse_cli_args() -> Tuple[str, List[str]]: """Parses CLI args to return device name and args for unittest runner.""" parser = argparse.ArgumentParser( description="Runs a GDM + unittest reboot test on a device. All " "arguments other than the device name are passed through to " "the unittest runner.") parser.add_argument( "-d", "--device", required=True, help="GDM device name to run the test on. For example, 'device-1234'. " "The device must be shown as 'available' or 'connected' in the " "output of 'gdm devices'.") args, remaining_argv = parser.parse_known_args() return args.device, [sys.argv[0]] + remaining_argv
07b2b8c8223f789fca2099f432afede7aee3ef78
707,602
def check_output(file_path: str) -> bool: """ This function checks an output file, either from geomeTRIC or from Psi4, for a successful completion keyword. Returns True if the calculation finished successfully, otherwise False. """ with open(file_path, "r") as read_file: text = read_file.read() checks = ["Converged! =D", "Psi4 exiting successfully"] return any([check in text for check in checks])
2f0dea67216aff945b1b0db74e0131022acc3019
707,603
def scsilun_to_int(lun): """ There are two style lun number, one's decimal value is <256 and the other is full as 16 hex digit. According to T10 SAM, the full 16 hex digit should be swapped and converted into decimal. For example, SC got zlinux lun number from DS8K API, '40294018'. And it should be swapped to '40184029' and converted into decimal, 1075331113. When the lun number is '0c' and its decimal value is <256, it should be converted directly into decimal, 12. https://github.com/kubernetes/kubernetes/issues/45024 """ pretreated_scsilun = int(lun, 16) if pretreated_scsilun < 256: return pretreated_scsilun return (pretreated_scsilun >> 16 & 0xFFFF) | \ (pretreated_scsilun & 0xFFFF) << 16
2022938ccb5abbc89d5fb6f5f109d629e980c0ba
707,604
def compute_recall(true_positives, false_negatives): """Compute recall >>> compute_recall(0, 10) 0.0 >>> compute_recall(446579, 48621) 0.901815 """ return true_positives / (true_positives + false_negatives)
876bee73150d811e6b7c1a5de8d8e4349105c59b
707,605
def get_tipo_aqnext(tipo) -> int: """Solve the type of data used by DJANGO.""" tipo_ = 3 # subtipo_ = None if tipo in ["int", "uint", "serial"]: tipo_ = 16 elif tipo in ["string", "stringlist", "pixmap", "counter"]: tipo_ = 3 elif tipo in ["double"]: tipo_ = 19 elif tipo in ["bool", "unlock"]: tipo_ = 18 elif tipo in ["date"]: tipo_ = 26 elif tipo in ["time"]: tipo_ = 27 return tipo_
d5a066b98aa56785c4953a7ec8d7052e572e5630
707,606
def _is_empty(str_: str) -> bool: """文字列が空か 文字列が空であるかを判別する Args: str_ (str): 文字列 Returns: bool: 文字列が空のときはTrue, 空でないときはFalseを返す. """ if str_: return False return True
f0eff540767028a80a3042e2d5bc6951ad28fe24
707,607
import random def energy_generate_random_range_dim2(filepath,dim_1_low,dim_1_high,dim_2_low,dim_2_high,num=500): """ 6, 8 and 10 """ queryPool=[] query=[] for _ in range(num): left1 = random.randint(dim_1_low, dim_1_high) right1 = random.randint(left1, dim_1_high) query.append((left1, right1)) left2 = random.randint(dim_2_low, dim_2_high) # right2 = random.randint(left2, dim_2_high) query.append((left2, left2)) queryPool.append(query[:]) query.clear() with open(filepath,"w+") as f: f.write(str(queryPool)) return queryPool
cdcafba427dbbab9b9e318f58f54a3a3c834bbd3
707,608
def find_left(char_locs, pt): """Finds the 'left' coord of a word that a character belongs to. Similar to find_top() """ if pt not in char_locs: return [] l = list(pt) while (l[0]-1, l[1]) in char_locs: l = [l[0]-1, l[1]] return l
8e924f301203bcad2936d4cf4d82c6e21cbebb16
707,609
def int_to_charset(val, charset): """ Turn a non-negative integer into a string. """ if not val >= 0: raise ValueError('"val" must be a non-negative integer.') if val == 0: return charset[0] output = "" while val > 0: val, digit = divmod(val, len(charset)) output += charset[digit] # reverse the characters in the output and return return output[::-1]
ec30e014aaf42b6cc3904f13776b4226b0b75a5b
707,611
def _insert_text_func(s, readline): """Creates a function to insert text via readline.""" def inserter(): readline.insert_text(s) readline.redisplay() return inserter
06532be051cb69b92fa79ef339edb733b8f31c15
707,612
def tb_filename(tb): """Helper to get filename from traceback""" return tb.tb_frame.f_code.co_filename
75ac527b928d605f1dfc2b5034da6ab7e193fb82
707,613
def svo_filter_url(telescope, photofilter, zeropoint='AB'): """ Returns the URL where the filter transmission curve is hiding. Requires arguments: telescope: SVO-like name of Telescope/Source of photometric system. photofilter: SVO-like name of photometric filter. Optional: zeropoint: String. Either 'AB', 'Vega', or 'ST'. Output: url: URL of the relevant file. """ url = 'http://svo2.cab.inta-csic.es/theory/fps3/fps.php?' + \ 'PhotCalID=' + telescope + '/' + photofilter + '/' + zeropoint return url
e3cbe6a3192fcc890fb15df8fc3c02620a7c69fb
707,614
import torch def coord_sampler(img, coords): """ Sample img batch at integer (x,y) coords img: [B,C,H,W], coords: [B,2,N] returns: [B,C,N] points """ B,C,H,W = img.shape N = coords.shape[2] batch_ref = torch.meshgrid(torch.arange(B), torch.arange(N))[0] out = img[batch_ref, :, coords[:,1,:], coords[:,0,:]] return out.permute(0,2,1)
d4a1ac6125d11381933d59190074f33bd9a7e774
707,615
import os def directory(name, *args): """ Returns the directory with the specified name, as an absolute path. :param name: The name of the directory. One of "textures" or "models". :args Elements that will be appended to the named directory. :return: The full path of the named directory. """ top = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) dirs = { 'bin': 'bin', 'config': 'bin', 'top': '', 'textures': 'core/assets/minecraft/textures', 'blockstates': 'core/assets/minecraft/blockstates', 'models': "core/assets/minecraft/models", 'minecraft': "core/assets/minecraft", 'site': 'site', 'core': 'core', } is_defaults = name == 'defaults' if is_defaults: name = args[0] args = args[1:] path = dirs[name] if is_defaults: path = path.replace('core/', 'default_resourcepack/') path = os.path.join(top, path) for arg in args: for part in arg.split('/'): path = os.path.join(path, part) return path
61a8b4014f11fa449d455b941f032b415c393186
707,616
def setup_output_vcf(outname, t_vcf): """ Create an output vcf.Writer given the input vcf file as a templte writes the full header and Adds info fields: sizeCat MEF Returns a file handler and a dict with {individual_id: column in vcf} """ out = open(outname, 'w') line = t_vcf.readline() samp_columns = {} while not line.startswith("#CHROM"): out.write(line) line = t_vcf.readline() # edit the header out.write('##INFO=<ID=sizeCat,Number=A,Type=String,Description="Size category of variant">\n') out.write('##INFO=<ID=MEF,Number=.,Type=String,Description="Names of families that contain mendelian error">\n') out.write(line) for pos, iid in enumerate(line.strip().split('\t')[9:]): samp_columns[iid] = pos + 9 return out, samp_columns
82870c9c8d46dbe3161c434a87fac9108ed644b2
707,617
import os import json def load_json_file(): """loads the json file""" dirname = os.path.dirname(os.path.abspath(__file__)) json_filepath = os.path.join(dirname, "rps101_data.json") with open(json_filepath) as f: json_load = json.load(f) return json_load
9bd4b77167af6fc84d05b1766470a165d3ee6bd1
707,618
from typing import Dict from typing import Any def format_dict(body: Dict[Any, Any]) -> str: """ Formats a dictionary into a multi-line bulleted string of key-value pairs. """ return "\n".join( [f" - {k} = {getattr(v, 'value', v)}" for k, v in body.items()] )
b3f66d086284772e6783b8281f4d46c3dd6c237d
707,619
import time def random_sleep(n): """io""" time.sleep(2) return n
f626a2e64a266084f33a78dd9be4e4528ff88934
707,620
def rectify(link:str, parent:str, path:str): """A function to check a link and verify that it should be captured or not. For e.g. any external URL would be blocked. It would also take care that all the urls are properly formatted. Args: **link (str)**: the link to rectify. **parent (str)**: the complete url of the page from which the link was found. **path (str)**: the path (after the domain) of the page from which the link was found. Returns: **str**: the properly formatted link. """ if (link.startswith("#")) or (":" in link) or ("../" in link): return path if not link.startswith("/"): if parent.endswith("/"): if not path.endswith("/"): path += "/" return path + link else: path = "/".join(path.split("/")[:-1])+"/" return path + link return link
6ca5771fcbbb35fe6d99bab65082d447299bb93a
707,621
def indentsize(line): """Return the indent size, in spaces, at the start of a line of text.""" expline = line.expandtabs() return len(expline) - len(expline.lstrip())
c1f307adfeb2c1ec51c5e926a0b87dd3841e1aff
707,622
import struct def guid2bytes(s): """Converts a GUID to the serialized bytes representation""" assert isinstance(s, str) assert len(s) == 36 p = struct.pack return b"".join([ p("<IHH", int(s[:8], 16), int(s[9:13], 16), int(s[14:18], 16)), p(">H", int(s[19:23], 16)), p(">Q", int(s[24:], 16))[2:], ])
f298497173f9011392b671267cb47f081d25a9da
707,624
def truncate_words(text, max_chars, break_words=False, padding=0): """ Truncate a string to max_chars, optionally truncating words """ if break_words: return text[:-abs(max_chars - len(text)) - padding] words = [] for word in text.split(): length = sum(map(len, words)) + len(word) + len(words) - 1 + padding if length >= max_chars: break words.append(word) return ' '.join(words)
b239822fd1cef6c2e3f7425a0ad5cbc32b8b1325
707,625
def grad(values: list[int], /) -> list[int]: """Compute the gradient of a sequence of values.""" return [v2 - v1 for v1, v2 in zip(values, values[1:])]
a1df1dffb27028dc408b00ec8ac26b6f68d9c923
707,626
from typing import Union def subtract( num1: Union[int, float], num2: Union[int, float], *args ) -> Union[int, float]: """Subtracts given numbers""" sub: Union[int, float] = num1 - num2 for num in args: sub -= num return sub
55db772fdcdc9aa24fd61069a3adcd6bc7abe468
707,627
def _join_ljust(words, width=9): """join list of str to fixed width, left just""" return ' '.join(map(lambda s: s.ljust(width), words)).strip()
6096f5be960fb0ae2fe942c9b55924802094db16
707,628
def construct_dictionaries(color,marker,size, scatter_ecolor='k', alpha=1.0, fill_scatter=False, elinewidth=1,capsize=0): """ Example usage: halo_kws = construct_dictionaries('k','o', 20, alpha=.3) pltabund.plot_XFe_XFe(ax, 'K', 'Mg', roed, plot_xlimit=True, plot_ylimit=True, label="Halo", **halo_kws) """ e_kws = {'ecolor':color,'elinewidth':elinewidth,'capsize':capsize} if fill_scatter: ulkws = {'arrow_length':0.25, 'scatter_kws':{'marker':marker,'s':size,'facecolor':color,'alpha':alpha}, 'arrow_kws':{'color':color,'head_length':0.15,'head_width':0.05} } else: ulkws = {'arrow_length':0.25, 'scatter_kws':{'marker':marker,'s':size,'facecolor':'none', 'linewidths':1,'edgecolors':color,'alpha':alpha}, 'arrow_kws':{'color':color,'head_length':0.15,'head_width':0.05} } kws = {'color':color,'edgecolors':scatter_ecolor,'marker':marker,'s':size,'alpha':alpha, 'e_kws':e_kws,'ulkws':ulkws} return kws
6741dd1ffacc3a10953f86ccdaf53d1d3504a77c
707,629
def arrows(m) -> str: """One or more arrows separate by a space""" return m.arrow_list
3fb43d9d753f667148bb9bb18eb026f7385d6264
707,630
def hex2int(hex_str): """ Convert 2 hex characters (e.g. "23") to int (35) :param hex_str: hex character string :return: int integer """ return int(hex_str, 16)
0640cffd6f7558f4dfd1bc74e20510e7d2051ca3
707,631
def mock_sync_cavatica_account(mocker): """ Mocks out sync Cavatica account functions """ sync_cavatica_account = mocker.patch( "creator.projects.cavatica.sync_cavatica_account" ) sync_cavatica_account.return_value = [], [], [] return sync_cavatica_account
27a0a8abee2c025fe17ba4fa4a939bcf04fc9c63
707,632
def check_illegal(s): """ :param s: (String) user input :return: (Bool) check user input is illegal or not """ check = 0 for ch in s: if len(ch) > 1: check = 1 if check == 0: return True else: print("Illegal input") return False
6c028f03ae6f317e7fea020e2da1f35b93d3bcd7
707,633
from typing import List def kadane_algorithm(sequence: List[int]): """Greedy algorithm to track max sum so far - O(n) time and O(1) space""" if len(sequence) < 1: return 0 max_sum = sequence[0] curr_sum = sequence[0] for curr_index in range(1, len(sequence)): curr_sum = max(sequence[curr_index], curr_sum + sequence[curr_index]) max_sum = max(curr_sum, max_sum) return max_sum
f6096309055e52538f9a5f9b5b769b269688b068
707,634
def build_url(station, d1, d2): """ Return the URL to fetch the response record for USArray MT station identifier *station* for the time range *d1* to *d2*. """ return 'http://service.iris.edu/irisws/resp/1/query?net=EM&sta={}&loc=--&cha=*&starttime={:%Y-%m-%dT%H:%M:%S}&endtime={:%Y-%m-%dT%H:%M:%S}'.format(station, d1, d2)
221d5f7a321d0e9337dbbe75e419298bcd3ab5c0
707,636
import csv def load_LAC_geocodes_info(path_to_csv): """Import local area unit district codes Read csv file and create dictionary with 'geo_code' PROVIDED IN UNIT?? (KWH I guess) Note ----- - no LAD without population must be included """ with open(path_to_csv, 'r') as csvfile: read_lines = csv.reader(csvfile, delimiter=',') # Read line _headings = next(read_lines) # Skip first row data = {} for row in read_lines: values_line = {} for nr, value in enumerate(row[1:], 1): try: values_line[_headings[nr]] = float(value) except: values_line[_headings[nr]] = str(value) # Add entry with geo_code data[row[0]] = values_line return data
bd97d888ddb58469b111b41a7ea0a5a9e0be88fd
707,637
def assign_id_priority(handle): """ Assign priority according to agent id (lower id means higher priority). :param agent: :return: """ return handle
8e1b22748d263fc12749790e601a4197b5d6370e
707,639
def read_float_with_comma(num): """Helper method to parse a float string representation that has a comma as decimal separator. Can't use locale as the page being parsed could not be in the same locale as the python running environment Args: num (str): the float string to parse Returns: float: the parsed float """ return float(num.replace(",", "."))
ff2e65ef35ba1fded06d8abb5ed252a6bffdceaa
707,641
def remote_repr(arg): """Return the `repr()` rendering of the supplied `arg`.""" return arg
d284a0f3a6d08ceae198aacf68554da9cc264b1b
707,642
import importlib from typing import Type def get_class_for_name(name: str, module_name: str = __name__) -> Type: """Gets a class from a module based on its name. Tread carefully with this. Personally I feel like it's only safe to use with dataclasses with known interfaces. Parameters ---------- name : str Name of the class we're trying to get the class object for. module_name: str, optional Which module to get a class from, by defualt __name__. Returns ------- Type [description] """ this_module = importlib.import_module(module_name) this_class = getattr(this_module, name) return this_class
73058c179187aac277221b33f4e1e65934a49a6a
707,643
def get_cache_file_static(): """ Helper function to get the path to the VCR cache file for requests that must be updated by hand in cases where regular refreshing is infeasible, i.e. limited access to the real server. To update this server recording: 1) delete the existing recording 2) re-run all tests (with API keys for telescopes in place) 3) replace any secret information (such as API keys) with dummy values 4) commit recording """ return "data/tests/test_server_recordings_static.yaml"
44649f243322230a1a750e038d66cef725fbbc9b
707,644
def hamming_distance(lhs, rhs): """Returns the Hamming Distance of Two Equal Strings Usage >>> nt.hamming_distance('Pear','Pearls') """ return len([(x, y) for x, y in zip(lhs, rhs) if x != y])
8bf24f47c829169cfaa89af755b7722eb26155d9
707,645
def get_uleb128(byte_str): """ Gets a unsigned leb128 number from byte sting :param byte_str: byte string :return: byte string, integer """ uleb_parts = [] while byte_str[0] >= 0x80: uleb_parts.append(byte_str[0] - 0x80) byte_str = byte_str[1:] uleb_parts.append(byte_str[0]) byte_str = byte_str[1:] uleb_parts = uleb_parts[::-1] integer = 0 for i in range(len(uleb_parts) - 1): integer = (integer + uleb_parts[i]) << 7 integer += uleb_parts[-1] return byte_str, integer
1e9c02dc7c191686e7d7a19d8b8c82f95044c845
707,646
def check_shots_vs_bounds(shot_dict, mosaic_bounds, max_out_of_bounds = 3): """Checks whether all but *max_out_of_bounds* shots are within mosaic bounds Parameters ---------- shot_dict : dict A dictionary (see czd_utils.scancsv_to_dict()) with coordinates of all shots in a .scancsv file: {shot: [x_coords, y_coords], ...} mosaic_bounds : list A list of bounds to a .Align file (see get_mos_bounds()): [min_x, max_x, min_y, max_y] max_out_of_bounds : int, optional Max number of out-of-bounds shots allowed for a \ 'match' between mosaic and .scancsv. The default is 3. Returns ------- Boolean True or False, depending on whether all but *max_out_of_bounds* \ shots are within mosaic bounds """ total_out_of_bounds = 0 min_x, max_x, min_y, max_y = mosaic_bounds for eachcoords in shot_dict.values(): if not min_x <= eachcoords[0] <= max_x or not min_y <= eachcoords[1] <= max_y: total_out_of_bounds += 1 return total_out_of_bounds <= max_out_of_bounds
de36f7f2a32a2a7120236d0bd5e43520de0c7ea5
707,647
import dill def deserializer(serialized): """Example deserializer function with extra sanity checking. :param serialized: Serialized byte string. :type serialized: bytes :return: Deserialized job object. :rtype: kq.Job """ assert isinstance(serialized, bytes), "Expecting a bytes" return dill.loads(serialized)
8895a1c40eaf5e30dd10015b87a0b94da0edf9ac
707,648
def _read_byte(stream): """Read byte from stream""" read_byte = stream.read(1) if not read_byte: raise Exception('No more bytes!') return ord(read_byte)
767766ef0d7a52c41b7686f994a503bc8cc7fe8d
707,649
import os def populate_labels(model_name: str, paths: dict) -> list: """Report full list of object labels corresponding to detection model of choice Args: model_name: name of the model to use paths: dictionary of paths from yml file Returns: labels (list(str)): list of object labels strings """ model_file_path = paths['local_detection_model'] labels_file_path = os.path.join(model_file_path, model_name, 'coco.names') f = open(labels_file_path, 'r') labels = [line.strip() for line in f.readlines()] return labels
e225afc71567c1d3fac07aff9f76d3333dba2cf2
707,650
def get_labels_from_sample(sample): """ Each label of Chinese words having at most N-1 elements, assuming that it contains N characters that may be grouped. Parameters ---------- sample : list of N characters Returns ------- list of N-1 float on [0,1] (0 represents no split) """ labels = [] for word in sample: if len(word) > 1: for _ in range(len(word)-1): labels.append(0) # within a word, append a '0' for each interstice labels.append(1) # at the end of a word, append a '1' else: labels.append(1) labels = labels[:-1] # Throw away the last value, it doesn't represent an interstice return labels
4b21b878d1ae23b08569bda1f3c3b91e7a6c48b9
707,651
import re import math def number_to_block(number, block_number=0): """ Given an address number, normalizes it to the block number. >>> number_to_block(1) '0' >>> number_to_block(10) '0' >>> number_to_block(100) '100' >>> number_to_block(5) '0' >>> number_to_block(53) '0' >>> number_to_block(153) '100' >>> number_to_block(1000) '1000' >>> number_to_block(1030) '1000' >>> number_to_block(1359) '1300' >>> number_to_block(13593) '13500' >>> number_to_block('01') '0' >>> number_to_block('00') '0' >>> number_to_block('foo') 'foo' >>> number_to_block('3xx') '300' >>> number_to_block('3XX') '300' >>> number_to_block('3pp') '3pp' >>> number_to_block('XX') '0' >>> number_to_block('X') 'X' block_number lets you customize the "XX" of "3XX block". >>> number_to_block(234, 99) '299' >>> number_to_block(12345, 99) '12399' """ number = re.sub('(?i)xx', '00', str(number)) try: number = int(number) except (TypeError, ValueError): return number return str(int(math.floor(number / 100.0)) * 100 + block_number)
1504d79469dccc06e867fbf5a80507566efb5019
707,652
def add(vec_1, vec_2): """ This function performs vector addition. This is a good place to play around with different collection types (list, tuple, set...), :param vec_1: a subscriptable collection of length 3 :param vec_2: a subscriptable collection of length 3 :return vec_3: a subscriptable collection of length 3 """ # add two vectors vec_3 = [float(vec_1[0]) + float(vec_2[0]), float(vec_1[1]) + float(vec_2[1]), float(vec_1[2]) + float(vec_2[2])] return vec_3
4a17a82422cef472decb37c376e8bf5259ade60a
707,653
def ms_to_samples(ms, sampling_rate): """ Convert a duration in milliseconds into samples. Arguments: ms (float): Duration in ms. sampling_rate (int): Sampling rate of of the signal. Returns: int: Duration in samples. """ return int((ms / 1000) * sampling_rate)
a2bf63ad8cca580ae3307c33daa82bb1382d742c
707,654
def flatten(L): """Flatten a list recursively Inspired by this fun discussion: https://stackoverflow.com/questions/12472338/flattening-a-list-recursively np.array.flatten did not work for irregular arrays and itertools.chain.from_iterable cannot handle arbitrarily nested lists :param L: A list to flatten :return: the flattened list """ if L == []: return L if isinstance(L[0], list): return flatten(L[0]) + flatten(L[1:]) return L[:1] + flatten(L[1:])
c554a01a8308341d1c9620edc0783689e75fb526
707,655
def chi2(observed, expected): """ Return the chi2 sum of the provided observed and expected values. :param observed: list of floats. :param expected: list of floats. :return: chi2 (float). """ if 0 in expected: return 0.0 return sum((_o - _e) ** 2 / _e ** 2 for _o, _e in zip(observed, expected))
6050e98a823671de4a518d584a6e39bc519fa610
707,656
def extract_columns(data): """ EXTRACTS COLUMNS TO USE IN `DictWriter()` """ columns = [] column_headers = data[0] for key in column_headers: columns.append(key) return columns
6df143107612d311ab3c8870b9eccd3528ac3802
707,657
import ast import numpy def interp(specStr, t): """Return the current value of t using linear interpolation. <specStr> is a string containing a list of pairs e.g. '[[0,20],[30,65],[60,50],[90,75]]' The first element of each pair is DAYS. The second is a NUMBER. <t> is time in seconds""" specList = ast.literal_eval(specStr) X = [i[0] for i in specList] Y = [i[1] for i in specList] day = t/(60*60*24.0) return numpy.interp(day,X,Y)
bc60affe122f2d17044e01a01509231e71eda47d
707,658
def apply_cst(im, cst): """ Applies CST matrix to image. Args: im: input ndarray image ((height * width) x channel). cst: a 3x3 CST matrix. Returns: transformed image. """ result = im for c in range(3): result[:, :, c] = (cst[c, 0] * im[:, :, 0] + cst[c, 1] * im[:, :, 1] + cst[c, 2] * im[:, :, 2]) return result
7c63d07413bad5fcebf2dfe5f83f205d16280957
707,659
def feedback(olsys,H=1): """Calculate the closed-loop transfer function olsys cltf = -------------- 1+H*olsys where olsys is the transfer function of the open loop system (Gc*Gp) and H is the transfer function in the feedback loop (H=1 for unity feedback).""" clsys=olsys/(1.0+H*olsys) return clsys
ca78d05196068746a225038c0f401faad24c5f65
707,660
def comment(strng,indent=''): """return an input string, commented out""" template = indent + '# %s' lines = [template % s for s in strng.splitlines(True)] return ''.join(lines)
42386b7ed8de9127d7224481a5f5315d39b6ae97
707,661
def filter_nans(data, threshold = 3, threshold_type = "data"): """ ================================================================================================= filter_nans(data, threshold, threshold_type) This function is meant to filter out the nan values from a list, based on the input arguments. ================================================================================================= Arguments: data -> A list (or iterable) of data points. The points are assumed to be numbers. threshold -> An integer describing the minimum value requirement. threshold_type -> A string describing how the threshold integer will be applied. "on_data" "on_nan" ================================================================================================= Returns: The filtered list, or an empty list if the threshold requirements were not met. ================================================================================================= """ # Make sure the user gave a valid thresholding option assert threshold_type.lower() in ["data", "on_data", "on data", "nan", "on_nan", "on nan"], "Threshold is either relative to NaN or data." assert type(data) == list, "The data should be in a list" # Filter NaNs, as they do not equal themselves filtered = [val for val in data if val == val] # Keep data if there are at least <threshold> data points if threshold_type.lower() in ["data", "on_data", "on data"]: if len(filtered) >= threshold: return filtered else: return [] # Keep data if there are no more than <threshold> nans elif threshold_type.lower() in ["nan", "on_nan", "on nan"]: if len(data) - len(filtered) <= threshold: return filtered else: return []
fe84ae2d638102e05db68f0c0062ee036be1a63b
707,662
def _traverse_tree_and_group_all_objects_by_oclass(root_obj, result=None): """Traverses the tree once and groups all objects by oclass :param root_obj: The root object where to start the traversion :type root_obj: CUDS :param result: The current results of the recursion, defaults to None :type result: dict, optional :return: All CUDS objects in the tree, grouped by oclass. :rtype: dict """ if result is None: result = {str(root_obj.oclass): [root_obj]} for neighbour in root_obj.iter(): if neighbour.oclass not in result.keys(): result[str(neighbour.oclass)] = [neighbour] else: result[str(neighbour.oclass)].append(neighbour) _traverse_tree_and_group_all_objects_by_oclass(neighbour, result) return result
3ae139313ea7b5e92f0d9231a4e64efc87acc5ac
707,663
def check_measurement(m_info, filters): """ Determine whether a given measurement should be included based on the filters. Inputs: m_info - A dictionary containing the configuration parameters for an individual measurement. filters - A dictionary containing a set of configuration parameter values that should be included Output: include - Boolean indicating whether to include the given measurement """ include = True for filter_field, filter_values in filters.iteritems(): try: iter(filter_values) except: filter_values = [filter_values] if not m_info[filter_field] in filter_values: include = False return include
374be08c315a63d09faadc9c963a49a89b04b3ed
707,664
def embargo(cand_times, test_times, embargo_table): """ "Embargo" observations from the training set. Args: cand_times(Series): times of candidates to be the "embargoed set" index: t0(start time) value: t1(end time) test_times(Series): times of the test set index: t0(start time) value: t1(end time) embargo_table(Series): embargo times table returned by get_embargo_table() Returns: embargoed_times(Series): times of embargoed training set index: t0(start time) value: t1(end time) """ first_test_start = test_times.index[0] final_test_start = test_times.index[-1] final_embargo_start = embargo_table[final_test_start] # end time of the embargo to_embargo_idx = cand_times.loc[first_test_start:final_embargo_start].index embargoed_times = cand_times.drop(to_embargo_idx) return embargoed_times
6fb97816c32fc73661905af27613bef0c6ac0726
707,665
import os import pandas def handle_uploaded_file(file, filename): """ Обработка файла csv спарсенного с online.edu.ru """ if not os.path.exists('upload/'): os.mkdir('upload/') path = 'upload/' + filename with open(path, 'wb+') as destination: for chunk in file.chunks(): destination.write(chunk) df = pandas.read_csv(path, sep=',', encoding='utf-8') df.dropna(subset=['Направления подготовки'], inplace=True) df = df.drop(['Unnamed: 0'], axis=1) return df
2b24081bf7b4d42c60ff17f500c7da0d81e11ceb
707,666
def init_group_prams(net): """Initialize group_prams.""" decayed_params = [] no_decayed_params = [] for param in net.trainable_params(): if 'beta' not in param.name and 'gamma' not in param.name and 'bias' not in param.name: decayed_params.append(param) else: no_decayed_params.append(param) group_params = [{'params': decayed_params, 'weight_decay': 0.0001}, {'params': no_decayed_params}, {'order_params': net.trainable_params()}] return group_params
be078603c4ae42163f66668dcc16a0a77d899805
707,667
def _normalize_sql(sql, maxlen=150): """Collapse whitespace and middle-truncate if needed.""" out = ' '.join(sql.split()) if len(out) > maxlen: i = int(maxlen / 2 - 4) out = (out[0:i] + ' . . . ' + out[-i:None]) return out
f85efb0c367b448d2e363d9c1f8bf62a2bdb600e
707,668