content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def check_in_image(paste_image_location, paste_image_size, canvas_image_size): """Checks whether the location for the pasted image is within the canvas. Args: paste_image_location: a namedtuple of utils.XY, with 'x' and 'y' coordinates of the center of the image we want to paste. paste_image_size: a namedtuple of utils.XY, with 'x' and 'y' coordinates corresponding to the size of the image we are pasting. canvas_image_size: the size of the canvas that we are pasting the image to. Returns: True if the pasted image would lie within the canvas, False otherwise. """ offset_x = int(paste_image_size.x / 2) + 1 offset_y = int(paste_image_size.y / 2) + 1 if (paste_image_location.x + offset_x > canvas_image_size or paste_image_location.x - offset_x < 1 or paste_image_location.y + offset_y > canvas_image_size or paste_image_location.y - offset_y < 1): return False return True
173ff3ca7961bff34237512990fb2f103dd7ddc9
1,901
import os def templates(): """ .. versionadded:: 2015.5.0 List the available LXC template scripts installed on the minion CLI Examples: .. code-block:: bash salt myminion lxc.templates """ try: template_scripts = os.listdir("/usr/share/lxc/templates") except OSError: return [] else: return [x[4:] for x in template_scripts if x.startswith("lxc-")]
4f79b1baaf2e6434a221bd3ea449d71ce2fad8b5
1,903
def has_valid_chars(token: str) -> bool: """ decides whether this token consists of a reasonable character mix. :param token: the token to inspect :return: True, iff the character mix is considered "reasonable" """ hits = 0 # everything that is not alphanum or '-' or '.' limit = int(len(token) / 10) for c in token: if not (c.isalnum() or c == '.' or c == '-' or c == ' '): hits += 1 if hits > limit: return False return True
b9b65f1bfd3529275847f1d6e227d57dfebea8a8
1,904
def event_message(iden, event): """Return an event message.""" return {"id": iden, "type": "event", "event": event}
bfc3fca17a9ad8d3767853c82c5453328d4c07e3
1,905
def match(command): """Match function copied from cd_mkdir.py""" return ( command.script.startswith('cd ') and any(( 'no such file or directory' in command.output.lower(), 'cd: can\'t cd to' in command.output.lower(), 'does not exist' in command.output.lower() )))
e49540995f26b40b4c52879814fe905f35b1c8fd
1,906
def format_trace_id(trace_id: int) -> str: """Format the trace id according to b3 specification.""" return format(trace_id, "032x")
2c0541b4a25d85ae990e68e00dd75012aa1ced60
1,909
def get_count_name(df): """Indicate if a person has a 'Name' Parameters ---------- df : panda dataframe Returns ------- Categorical unique code """ # Feature that tells whether a passenger had a cabin on the Titanic df['Words_Count'] = df['Name'].apply(lambda x: len(x.split())).astype(int) return df
c51dfbcc025908243f20d10f4faa498fa068d4f7
1,910
def nmatches_mem(txt, pat, t, p, mem): """Find number of matches with recursion + memoization using a dictionary (this solution will also crash when recursion limit is reached) nmatches_mem(text, pattern, len(text), len(pattern), {}) """ if (t,p) in mem: return mem[t, p] if p==0: return 1 if t==0: return 0 matches = 0 for i in range(t, 0, -1): if txt[t-i] == pat[p-1]: matches += nmatches_mem(txt, pat, t-i, p-1, mem) mem[t, p] = matches return matches
5b6a10328ca876481fb9b8425bde2442f603d7e1
1,912
def scale(data, new_min, new_max): """Scales a normalised data series :param data: The norrmalised data series to be scaled :type data: List of numeric values :param new_min: The minimum value of the scaled data series :type new_min: numeric :param new_max: The new maximum of the scaled data series :type new_max: numeric :return: A scaled data series :rtype: list """ return [(x*(new_max-new_min))+new_min for x in data]
3e7720ae90cfdbef1253dbfa39b3e4a10fc118bb
1,913
def get_same_padding(kernel_size: int, stride: int, dilation: int) -> int: """Calculates the padding size to obtain same padding. Same padding means that the output will have the shape input_shape / stride. That means, for stride = 1 the output shape is the same as the input, and stride = 2 gives an output that is half of the input shape. Args: kernel_size : convolution kernel size. Only tested to be correct with odd values. stride : convolution stride dilation : convolution dilation Raises: ValueError: Only stride or dilation may be greater than 1 Returns: padding value to obtain same padding. """ if stride > 1 and dilation > 1: raise ValueError("Only stride OR dilation may be greater than 1") if dilation > 1: return (dilation * (kernel_size - 1) + 1) // 2 return kernel_size // 2
12548482e855dcfc627c5b0a6ccf69ad4a74b39b
1,914
def move_child_position(context, request): """ Move the child from one position to another. :param context: "Container" node in which the child changes its position. :type context: :class:kotti.resources.Node or descendant :param request: Current request (of method POST). Must contain either "from" and "to" params or a json_body that contain(s) the 0-based old (i.e. the current index of the child to be moved) and new position (its new index) values. :type request: :result: JSON serializable object with a single attribute ("result") that is either "success" or "error". :rtype: dict """ data = request.POST or request.json_body if ("from" in data) and ("to" in data): max_pos = len(context.children) - 1 try: old_position = int(data["from"]) new_position = int(data["to"]) if not ((0 <= old_position <= max_pos) and (0 <= new_position <= max_pos)): raise ValueError except ValueError: return {"result": "error"} # sqlalchemy.ext.orderinglist takes care of the "right" sequence # numbers (immediately consecutive, starting with 0) for us. context.children.insert(new_position, context.children.pop(old_position)) result = "success" else: result = "error" return {"result": result}
082aef1169de6dab4881593ef8abf85e5076f190
1,915
def get_rgba_from_color(rgba): """Return typle of R, G, B, A components from given color. Arguments: rgba - color """ r = (rgba & 0xFF000000) >> 24 g = (rgba & 0x00FF0000) >> 16 b = (rgba & 0x0000FF00) >> 8 a = (rgba & 0x000000FF) return r, g, b, a
56d3e0dce01cfc4348ae115de81abb55ec85eb56
1,916
def beauty_factor(G): """Return the "beauty factor" of an arbitrary graph, the minimum distance between a vertex and a non-incident edge.""" V, E = G[0], G[1] dists = [] for (i, u) in enumerate(V): for (j, k) in E: if i == j or i == k: continue v, w = V[j], V[k] a, b = u-v, w-v proj = (a.real*b.real+a.imag*b.imag) / abs(b) # scalar projection if 0 <= proj <= abs(b): dists.append(abs(a - b * proj / abs(b))) else: dists.extend((abs(a), abs(u-w))) return min(dists)
9267a534d8453a17561b2c8e1f67e40942069ffe
1,917
def legislature_to_number(leg): """ Takes a full session and splits it down to the values for FormatDocument.asp. session = '49th-1st-regular' legislature_to_number(session) --> '49Leg/1s' """ l = leg.lower().split('-') return '%sLeg/%s%s' % (l[0][0:2], l[1][0], l[2][0])
cffeeea2bad17d9dadcfd75d70417824c7fe3396
1,918
def reverse_int_bits(n: int, n_bits: int = 10) -> int: """Reverses the bits of *n*, considering it is padded by *n_bits* first""" return int(format(n, '0' + str(n_bits) + 'b')[::-1], 2)
3c76db59296863161b0bb543e057a82383a780a2
1,919
import math def n_permutations(n, r=None): """Number of permutations (unique by position) :param n: population length :param r: sample length :return: int """ if r is None: r = n if n < 0 or r < 0: raise ValueError("n and r must be positive") if n == 0 or r > n: return 0 return math.factorial(n) // math.factorial(n - r)
441081c534c07bb98b6a32cce4c87d64b030a5a7
1,920
import re def _(pattern, key_path: str, case_ignored=False) -> bool: """Called when the concerned Key is defined as a re.Pattern, and case_ignored flag is neglected.""" return re.fullmatch(pattern, key_path) is not None
c5759a7940dcb9babc791322cac1397a640dc94d
1,921
def transition_with_random_block(block_randomizer): """ Build a block transition with randomized data. Provide optional sub-transitions to advance some number of epochs or slots before applying the random block. """ return { "block_producer": block_randomizer, }
acf0d285a7633b40ffb46853831412dafa6617e5
1,922
def get_wl_band(radar_frequency): """Returns integer corresponding to radar frequency. Args: radar_frequency (float): Radar frequency (GHz). Returns: int: 0=35GHz radar, 1=94Ghz radar. """ return 0 if (30 < radar_frequency < 40) else 1
cf2eaa12f111f7ad6751fb31f58e0bc01666494a
1,923
import hashlib def md5hash(string): """ Return the MD5 hex digest of the given string. """ return hashlib.md5(string).hexdigest()
cfc0d44c3c84fb08d277d7b397a5aca453025d96
1,924
import base64 import requests def main(dict): """ Function that allows to send a get request to twitter API and retrieve the last 3 tweets of a specific account name. The parameter of the account is passed by Watson Assistant throught a context variable. Args: dict (dict): containing the parameter - in our case only one is used : "account" (e.g. @blackmirror) Return: list_tweets (list) : list containing text (and image) of the last three tweets. """ account_name = dict.get("account")[1:] client_key = '// your twitter dev account client_key //' client_secret = '// your twitter dev account client_secret //' key_secret = '{}:{}'.format(client_key, client_secret).encode('ascii') b64_encoded_key = base64.b64encode(key_secret) b64_encoded_key = b64_encoded_key.decode('ascii') base_url = 'https://api.twitter.com/' auth_url = '{}oauth2/token'.format(base_url) auth_headers = { 'Authorization': 'Basic {}'.format(b64_encoded_key), 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8' } auth_data = { 'grant_type': 'client_credentials' } auth_resp = requests.post(auth_url, headers=auth_headers, data=auth_data) access_token = auth_resp.json()['access_token'] search_headers = { 'Authorization': 'Bearer {}'.format(access_token) } search_url = '{}1.1/statuses/user_timeline.json?screen_name={}&count=3'.format(base_url, account_name) search_resp = requests.get(search_url, headers=search_headers) tweet_data = search_resp.json() list_tweets =[] for i in range(len(tweet_data)): # store the text of the tweet text = tweet_data[i].get("text") # if the tweet contains an image add this to the tweet text if(tweet_data[i].get("entities").get("media")): image = tweet_data[i].get("entities").get("media")[0].get("media_url_https") width = tweet_data[i].get("entities").get("media")[0].get("sizes").get("small").get("w") height = tweet_data[i].get("entities").get("media")[0].get("sizes").get("small").get("h") url = tweet_data[i].get("entities").get("media")[0].get("url") final = text + "<a href = '" + url + "'>" + "<img src = '" +image + "' height =" + str(height) + " width = "+ str(width) + ">" + "</a>" list_tweets.append(final) # if there is no image, then just save the text of the tweet else: list_tweets.append(text) return {"result": list_tweets}
5dddf4ad7c4ee45d1bf3a61f308989dffc451cc2
1,925
def headers(): """ default HTTP headers for all API calls """ return {"Content-type": "application/json"}
e601410c7ba22f28a88e47742349087744495c6b
1,926
def get_mode(input_list: list): """ Get's the mode of a certain list. If there are few modes, the function returns False. This is a very slow way to accomplish this, but it gets a mode, which can only be 4 things, so it should be OK """ if len(input_list) == 0: return False distinguished_elements = {} for element in input_list: if element not in distinguished_elements: distinguished_elements[element] = 0 # Count all of the elements and save them in a dictionary for key, value in distinguished_elements.items(): distinguished_elements[key] = input_list.count(key) # Get the mode max_key = None max_value = 0 for key, value in distinguished_elements.items(): if value > max_value: max_key = key max_value = value # If there's a second mode, return False for key, value in distinguished_elements.items(): if value == max_value and key != max_key: return False return max_key
552620bb68e3922dff7b19f52f5da9dbee813ca3
1,927
import time def sum_of_n_2(n): """ 迭代求和,仅使用加法。 :param n: 1 到 n 的和 :return: 元组(元素的组合),第一位为值,第二位为所花时间 """ start = time.time() the_sum = 0 for i in range(1, n + 1): the_sum = the_sum + i end = time.time() return the_sum, end - start
45a07d5a8d02f515b8d0730a9a10bf71398092e8
1,929
def TSA_t_g( temperature, temperature_vegetation, vegetation_fraction): """ //Temperature of ground from Tvegetation //Based on two sources pixel split //Chen et al., 2005. IJRS 26(8):1755-1762. //Estimation of daily evapotranspiration using a two-layer remote sensing model. Ground temperature, bare soil TSA_t_g( temperature, temperature_vegetation, vegetation_fraction) """ result = (temperature - (vegetation_fraction*temperature_vegetation)) / (1 - vegetation_fraction) return result
c1189f2554e3f7dba13a2c8fd8698f906d343611
1,930
import math def _fcn_mg_joint_pos(t, q_init, q_end, t_strike_end): """Helper function for `create_mg_joint_pos_policy()` to fit the `TimePolicy` scheme""" return ((q_end - q_init) * min(t / t_strike_end, 1) + q_init) / 180 * math.pi
892a494ea5ee2033d2f29efe7400bce8aab30c1c
1,932
import re def rex_coverage(patterns, example_freqs, dedup=False): """ Given a list of regular expressions and a dictionary of examples and their frequencies, this counts the number of times each pattern matches a an example. If ``dedup`` is set to ``True``, the frequencies are ignored, so that only the number of keys is returned. """ results = [] for p in patterns: p = '%s%s%s' % ('' if p.startswith('^') else '^', p, '' if p.endswith('$') else '$') r = re.compile(p, re.U) if dedup: results.append(sum(1 if re.match(r, k) else 0 for k in example_freqs)) else: results.append(sum(n if re.match(r, k) else 0 for (k, n) in example_freqs.items())) return results
a9ac988348d1fa037508b0a2b6c71e077ca41627
1,933
def build_diamond(validated_letter): """ >:param str validated_letter: A capital letter, that will be used to generate the list of strings needed to print out the diamond. >**Returns:** A list a strings that contains the correct spacing for printing the diamond. build_diamond is used to generate the list of strings needed to print the diamond structure. It takes a single argument of a letter (in string format), and returns a list of strings. This list of strings can then be printed with newline characters (using join) to output the diamond structure. """ a_ascii = ord('A') rows = ord(validated_letter) - a_ascii + 1 diamond = [] for row in list(range(rows)) + list(reversed(range(rows-1))): if row == 0: diamond.append('{: <{w1}}{current_letter}'.format('', w1=rows-1, current_letter=chr(a_ascii+row))) else: diamond.append('{: <{w1}}{current_letter}{: <{w2}}{current_letter}'.format('', '', w1=rows-row-1, current_letter=chr(a_ascii+row), w2=row*2-1)) return diamond
bd55281ee275d402d4f35701daacb3be0246812e
1,934
import importlib def import_module_attribute(function_path): """Import and return a module attribute given a full path.""" module, attribute = function_path.rsplit(".", 1) app_module = importlib.import_module(module) return getattr(app_module, attribute)
ce2647bb193c2a6c07949073f7c0d142ee8cd1b5
1,936
def rgb_to_hex(red, green, blue): """Give three color arrays, return a list of hex RGB strings""" pat = "#{0:02X}{1:02X}{2:02X}" return [pat.format(r & 0xff, g & 0xff, b & 0xff) for r, g, b in zip(red, green, blue)]
9126ae9e05d4a005d397f13fd4f0d9400efe5a65
1,937
def make_webhdfs_url(host, user, hdfs_path, op, port=50070): """ Forms the URL for httpfs requests. INPUT ----- host : str The host to connect to for httpfs access to HDFS. (Can be 'localhost'.) user : str The user to use for httpfs connections. hdfs_path : str The full path of the file or directory being checked. op : str The httpfs operation string. E.g., 'GETFILESTATUS'. port : int The port to use for httpfs connections. OUTPUT ------ str : The string to use for an HTTP request to httpfs. """ url = 'http://' + host + ':' + str(port) + '/webhdfs/v1' url += hdfs_path + '?user.name=' + user + '&op=' + op return url
c4899d75fd54558c6216889cbc749f5d0fe403df
1,938
def chunkify(arr, n): """Breaks a list into n chunks. Last chunk may not be equal in size to other chunks """ return [arr[i : i + n] for i in range(0, len(arr), n)]
10df800440e8c1d5e4070dc48dd8c7ecc12f3c83
1,939
def _scale_value_to_rpm(value, total): """Scale value to reads per million""" return value * 1 / (total / 1e6)
c3a49c8df8cbb22bd055a2f8076065463041bb72
1,940
import os def file_exists_not_empty(filename,): """ Tests if file exists and is not empty :param filename: full path of file to be checked :type filename: str """ if os.path.isfile(filename): if os.stat(filename).st_size == 0: return False else: return False return True
a00a9b5afad47263899de10b26ce8c1225c0eb7c
1,941
import requests import json def slack(channel, message, subject=''): """ Sends a notification to meerkat slack server. Channel is '#deploy' only if in live deployment, otherwise sent privately to the developer via slackbot. Args: channel (str): Required. The channel or username to which the message should be posted. message (str): Required. The message to post to slack. subject (str): Optional. Placed in bold and seperated by a pipe. return "sent" """ # Assemble the message text string text = str(message) if subject: text = "*_{}_* | {}".format(subject, message) # Send the slack message message = {'text': text, 'channel': channel, 'username': 'Meerkat'} url = ('https://hooks.slack.com/services/T050E3XPP/' 'B0G7UKUCA/EtXIFB3CRGyey2L7x5WbT32B') headers = {'Content-Type': 'application/json'} r = requests.post(url, data=json.dumps(message), headers=headers) # Return the slack response return r
274111901fd3b7545c8e2128ce3e98716eca8406
1,942
def get_results_from_firebase(firebase): """ The function to download all results from firebase Parameters ---------- firebase : pyrebase firebase object initialized firebase app with admin authentication Returns ------- results : dict The results in a dictionary with the following format: { "task_id" { "user1_id": { "data": {...} }, "user2_id": { "data": {...} }, } } """ fb_db = firebase.database() results = fb_db.child("results").get().val() return results
ca06c24367d778d4b601eab6fa31009fe6ecb372
1,943
def GetSourceFile(file, sourcepath): """Return a relative file if it is embedded in a path.""" for root in sourcepath: if file.find(root) == 0: prefix_length = len(root) if not root.endswith('/'): prefix_length += 1 relative_file = file[prefix_length:] return relative_file return None
b241497131c3595f78ebf9d1481c8d9d50887e5a
1,944
import os import subprocess def trace_cmd_installed(): """Return true if trace-cmd is installed, false otherwise""" with open(os.devnull) as devnull: try: subprocess.check_call(["trace-cmd", "options"], stdout=devnull) except OSError: return False return True
7135c0b2ebfa46b69df5b692178e6b569d3014dd
1,945
import argparse def get_args(): """ Supports the command-line arguments listed below. """ parser = argparse.ArgumentParser( description='Test Runner script') parser.add_argument('-c', '--controller', type=str, required=True, help='Controller host name') parser.add_argument('-s', '--server', type=str, required=True, help='Cluster Server hostname') parser.add_argument('-e', '--export', type=str, help='NFS Export Name', default="/") parser.add_argument('-n', '--nodes', type=int, help='Number of active nodes', default=0) parser.add_argument('-d', '--domains', type=int, help='Number of fs domains', default=0) parser.add_argument('-m', '--mtype', type=str, help='Mount Type', choices=['nfs3', 'nfs4', 'nfs4.1', 'smb1', 'smb2', 'smb3'], default="nfs3") parser.add_argument('--start_vip', type=str, help="Start VIP address range") parser.add_argument('--end_vip', type=str, help="End VIP address range") parser.add_argument('-l', '--locking', type=str, help='Locking Type', choices=['native', 'application', 'off'], default="native") args = parser.parse_args() return args
8bb488e026c13ddc008ea2877b1d0ae9f904f970
1,946
import os import base64 def img_to_b64(img_path): """显示一副图片""" assert os.path.isfile(img_path) with open(img_path, 'rb') as f: img = f.read() b64 = base64.b64encode(img) return b64
6ab67dc503c7bf077fb8772b1c5708eb10efe7e7
1,948
def filter_by_is_awesome(resources): """The resources being that is_awesome Arguments: resources {[type]} -- A list of resources """ return [resource for resource in resources if resource.is_awesome]
46717a93e75dfed53bba03b5b7f8a5e8b8315876
1,950
def header_lines(filename): """Read the first five lines of a file and return them as a list of strings.""" with open(filename, mode='rb') as f: return [f.readline().decode().rstrip() for _ in range(5)]
35056152c1566ea2d14452308f00d6903b6e4dff
1,952
import re def word_detokenize(tokens): """ A heuristic attempt to undo the Penn Treebank tokenization above. Pass the --pristine-output flag if no attempt at detokenizing is desired. """ regexes = [ # Newlines (re.compile(r'[ ]?\\n[ ]?'), r'\n'), # Contractions (re.compile(r"\b(can)\s(not)\b"), r'\1\2'), (re.compile(r"\b(d)\s('ye)\b"), r'\1\2'), (re.compile(r"\b(gim)\s(me)\b"), r'\1\2'), (re.compile(r"\b(gon)\s(na)\b"), r'\1\2'), (re.compile(r"\b(got)\s(ta)\b"), r'\1\2'), (re.compile(r"\b(lem)\s(me)\b"), r'\1\2'), (re.compile(r"\b(mor)\s('n)\b"), r'\1\2'), (re.compile(r"\b(wan)\s(na)\b"), r'\1\2'), # Ending quotes (re.compile(r"([^' ]) ('ll|'re|'ve|n't)\b"), r"\1\2"), (re.compile(r"([^' ]) ('s|'m|'d)\b"), r"\1\2"), (re.compile(r'[ ]?”'), r'"'), # Double dashes (re.compile(r'[ ]?--[ ]?'), r'--'), # Parens and brackets (re.compile(r'([\[\(\{\<]) '), r'\1'), (re.compile(r' ([\]\)\}\>])'), r'\1'), (re.compile(r'([\]\)\}\>]) ([:;,.])'), r'\1\2'), # Punctuation (re.compile(r"([^']) ' "), r"\1' "), (re.compile(r' ([?!\.])'), r'\1'), (re.compile(r'([^\.])\s(\.)([\]\)}>"\']*)\s*$'), r'\1\2\3'), (re.compile(r'([#$]) '), r'\1'), (re.compile(r' ([;%:,])'), r'\1'), # Starting quotes (re.compile(r'(“)[ ]?'), r'"') ] text = ' '.join(tokens) for regexp, substitution in regexes: text = regexp.sub(substitution, text) return text.strip()
577c2ed235aaf889699efc291d2b206a922f1f4a
1,953
def is_regex(obj): """Cannot do type check against SRE_Pattern, so we use duck typing.""" return hasattr(obj, 'match') and hasattr(obj, 'pattern')
cfd4fc702fb121735f49d4ba61395ce8f6508b1a
1,955
import threading def run_with_timeout(proc, timeout, input=None): """ Run Popen process with given timeout. Kills the process if it does not finish in time. You need to set stdout and/or stderr to subprocess.PIPE in Popen, otherwise the output will be None. The returncode is 999 if the process was killed. :returns: (returncode, stdout string, stderr string) """ output = [] def target(): output.extend(proc.communicate(input)) thread = threading.Thread(target=target) thread.daemon = True thread.start() killed = False thread.join(timeout) if thread.is_alive(): proc.terminate() killed = True thread.join() returncode = proc.returncode if killed: returncode = 999 return returncode, output[0], output[1]
414e18dae8f31b20c472f7da14475f8da5761781
1,956
import json def load_dataset(path): """Load json file and store fields separately.""" with open(path) as f: data = json.load(f)['data'] output = {'qids': [], 'questions': [], 'answers': [], 'contexts': [], 'qid2cid': []} for article in data: for paragraph in article['paragraphs']: output['contexts'].append(paragraph['context']) for qa in paragraph['qas']: output['qids'].append(qa['id']) output['questions'].append(qa['question']) output['qid2cid'].append(len(output['contexts']) - 1) if 'answers' in qa: output['answers'].append(qa['answers']) return output
4ba01f49d6a0aa3329b076fc0de9dd38fb99f2f8
1,957
import inspect def requires_request_arg(method): """ Helper function to handle deprecation of old ActionMenuItem API where get_url, is_show, get_context and render_html all accepted both 'request' and 'parent_context' as arguments """ try: # see if this is a pre-2.15 get_url method that takes both request and context kwargs inspect.signature(method).bind({}) except TypeError: return True else: return False
0ec09e34c04d4d54762051b01af8c80754d47125
1,958
from typing import List def on_deck(elements: List[int], all_vars): """all of the elements must be within the deck""" rules = [] for element in elements: var = all_vars[element - 1] rules.append(var >= 1) rules.append(var <= 52) return rules
2e90dfa45bd90a7c3b834000e070631af5952f36
1,960
def parse_api_error(response): """ Parse the error-message from the API Response. Assumes, that a check if there is an error present was done beforehand. :param response: Dict of the request response ([imdata][0][....]) :type response: ``dict`` :returns: Parsed Error-Text :rtype: ``str`` """ if "error" in response["imdata"][0]: return ( "API-Errorcode " + str(response["imdata"][0]["error"]["attributes"]["code"]) + ": " + str(response["imdata"][0]["error"]["attributes"]["text"]) ) else: return "Unparseable: " + str(response)
acc4256b3245e3e2c10e3ba998bf577e0f51a33e
1,961
def sanitise_utf8(s): """Ensure an 8-bit string is utf-8. s -- 8-bit string (or None) Returns the sanitised string. If the string was already valid utf-8, returns the same object. This replaces bad characters with ascii question marks (I don't want to use a unicode replacement character, because if this function is doing anything then it's likely that there's a non-unicode setup involved somewhere, so it probably wouldn't be helpful). """ if s is None: return None try: s.decode("utf-8") except UnicodeDecodeError: return (s.decode("utf-8", 'replace') .replace(u"\ufffd", u"?") .encode("utf-8")) else: return s
11b864ade1c36e2b42ffbdd76ee2851f01ca7803
1,962
def _in_delta(value, target_value, delta) -> bool: """ Check if value is equal to target value within delta """ return abs(value - target_value) < delta
92ab62a381fc1cfc6bbb82635f196ec4498babf4
1,963
import argparse def parse_arguments(): """ Parse input arguments and store them in a global variable. Returns: Parsed arguments. """ parser = argparse.ArgumentParser(description='Generates a lexicon for gender recognition.') parser.add_argument('dataset', help='file with JSON objects to be processed') parser.add_argument('--faces', action='store_true', help='apply facial recognition over profile images') parser.add_argument('--confidence', metavar='N', type=float, default=0.75, help="minimal confidence for a valid recognition (default=0.75)") parser.add_argument('--lexicon-percentage', metavar='N', type=float, default=0.5, help="Percentage of words to get from the generated lexicon") parser.add_argument('--surnames', action='store_true', help='require fullnames (at least one surname)') parser.add_argument('--remove-outliers', action='store_true', help='remove outliers before generate training and test datasets') return parser.parse_args()
bad4bc4943dc18a63a676bd44b4babf210846085
1,964
import secrets def do_roll(dice: int, sides: int, _: int): """Given an amount of dice and the number of sides per die, simulate a dice roll and return a list of ints representing the outcome values. Modifier is ignored. """ dice = dice or 1 sides = sides or 1 values = sorted(((secrets.randbelow(sides) + 1) for _ in range(0, dice)), reverse=True) return values
2073a37e5b76a85182e8cf786707ed18ca3f2474
1,965
def unorm_to_byte(x): """float x in [0, 1] to an integer [0, 255]""" return min(int(256 * x), 255)
a6870a339b9b0d5466962a9129c717876d8d0a50
1,966
def argunique(a, b): """ 找出a--b对应体中的唯一对应体,即保证最终输出的aa--bb没有重复元素,也没有多重对应 :param a: :param b: :return: aaa, bbb 使得aaa-bbb是唯一对 """ # 先对a中元素进行逐个检查,如果第一次出现,那么添加到aa中,如果不是第一次,那么检查是否一致,不一致则设置成-1 # 设置成-1,代表a中当前元素i有过一对多纪录,剔除。同时-1也不会被再匹配到 seta = {} for i, j in zip(a, b): if i not in seta: seta[i] = j elif seta[i] != j: seta[i] = -1 aa = [i for i in seta if seta[i] != -1] bb = [seta[i] for i in seta if seta[i] != -1] # 再反过来做一遍,以b为索引,剔除重复项 setb = {} for i, j in zip(aa, bb): if j not in setb: setb[j] = i elif setb[j] != i: setb[j] = -1 aaa = [setb[j] for j in setb if setb[j] != -1] bbb = [j for j in setb if setb[j] != -1] return aaa, bbb
e804436203496d5f3109511967a0d75eaca330da
1,969
def _truncate_and_pad_token_ids(token_ids, max_length): """Truncates or pads the token id list to max length.""" token_ids = token_ids[:max_length] padding_size = max_length - len(token_ids) if padding_size > 0: token_ids += [0] * padding_size return token_ids
a8f29fdbc99c3dcac42b9275037d3a3c39c22e12
1,970
def castep_geom_count(dot_castep): """Count the number of geom cycles""" count = 0 with open(dot_castep) as fhandle: for line in fhandle: if 'starting iteration' in line: count += 1 return count
6a619b5853a02a8c118af1fc19da0d803941c84f
1,971
def export_phones(ucm_axl): """ Export Phones """ try: phone_list = ucm_axl.get_phones( tagfilter={ "name": "", "description": "", "product": "", "model": "", "class": "", "protocol": "", "protocolSide": "", "callingSearchSpaceName": "", "devicePoolName": "", "commonDeviceConfigName": "", "commonPhoneConfigName": "", "networkLocation": "", "locationName": "", "mediaResourceListName": "", "networkHoldMohAudioSourceId": "", "userHoldMohAudioSourceId": "", "loadInformation": "", "securityProfileName": "", "sipProfileName": "", "cgpnTransformationCssName": "", "useDevicePoolCgpnTransformCss": "", "numberOfButtons": "", "phoneTemplateName": "", "primaryPhoneName": "", "loginUserId": "", "defaultProfileName": "", "enableExtensionMobility": "", "currentProfileName": "", "loginTime": "", "loginDuration": "", # "currentConfig": "", "ownerUserName": "", "subscribeCallingSearchSpaceName": "", "rerouteCallingSearchSpaceName": "", "allowCtiControlFlag": "", "alwaysUsePrimeLine": "", "alwaysUsePrimeLineForVoiceMessage": "", } ) all_phones = [] for phone in phone_list: # print(phone) phone_details = { "name": phone.name, "description": phone.description, "product": phone.product, "model": phone.model, "protocol": phone.protocol, "protocolSide": phone.protocolSide, "callingSearchSpaceName": phone.callingSearchSpaceName._value_1, "devicePoolName": phone.defaultProfileName._value_1, "commonDeviceConfigName": phone.commonDeviceConfigName._value_1, "commonPhoneConfigName": phone.commonPhoneConfigName._value_1, "networkLocation": phone.networkLocation, "locationName": phone.locationName._value_1, "mediaResourceListName": phone.mediaResourceListName._value_1, "networkHoldMohAudioSourceId": phone.networkHoldMohAudioSourceId, "userHoldMohAudioSourceId": phone.userHoldMohAudioSourceId, "loadInformation": phone.loadInformation, "securityProfileName": phone.securityProfileName._value_1, "sipProfileName": phone.sipProfileName._value_1, "cgpnTransformationCssName": phone.cgpnTransformationCssName._value_1, "useDevicePoolCgpnTransformCss": phone.useDevicePoolCgpnTransformCss, "numberOfButtons": phone.numberOfButtons, "phoneTemplateName": phone.phoneTemplateName._value_1, "primaryPhoneName": phone.primaryPhoneName._value_1, "loginUserId": phone.loginUserId, "defaultProfileName": phone.defaultProfileName._value_1, "enableExtensionMobility": phone.enableExtensionMobility, "currentProfileName": phone.currentProfileName._value_1, "loginTime": phone.loginTime, "loginDuration": phone.loginDuration, # "currentConfig": phone.currentConfig, "ownerUserName": phone.ownerUserName._value_1, "subscribeCallingSearchSpaceName": phone.subscribeCallingSearchSpaceName._value_1, "rerouteCallingSearchSpaceName": phone.rerouteCallingSearchSpaceName._value_1, "allowCtiControlFlag": phone.allowCtiControlFlag, "alwaysUsePrimeLine": phone.alwaysUsePrimeLine, "alwaysUsePrimeLineForVoiceMessage": phone.alwaysUsePrimeLineForVoiceMessage, } line_details = ucm_axl.get_phone(name=phone.name) # print(line_details.lines.line) try: for line in line_details.lines.line: # print(line) phone_details[f"line_{line.index}_dirn"] = line.dirn.pattern phone_details[f"line_{line.index}_routePartitionName"] = line.dirn.routePartitionName._value_1 phone_details[f"line_{line.index}_display"] = line.display phone_details[f"line_{line.index}_e164Mask"] = line.e164Mask except Exception as e: print(e) all_phones.append(phone_details) print( f"exporting: {phone.name}: {phone.model} - {phone.description}") print("-" * 35) print(f"number of phones: {len(all_phones)}") return all_phones except Exception as e: print(e) return []
1487cef48c5666224da57173b968e9988f587a57
1,972
import inspect def _get_init_arguments(cls, *args, **kwargs): """Returns an OrderedDict of args passed to cls.__init__ given [kw]args.""" init_args = inspect.signature(cls.__init__) bound_args = init_args.bind(None, *args, **kwargs) bound_args.apply_defaults() arg_dict = bound_args.arguments del arg_dict['self'] return arg_dict
116c01f9edb838e4b392fa624a454fdf4c455f1a
1,974
def build_audit_stub(obj): """Returns a stub of audit model to which assessment is related to.""" audit_id = obj.audit_id if audit_id is None: return None return { 'type': 'Audit', 'id': audit_id, 'context_id': obj.context_id, 'href': '/api/audits/%d' % audit_id, 'issue_tracker': obj.audit.issue_tracker, }
705f066975bf9dae8704944c71eeb3e313cf445f
1,976
def render_cells(cells, width=80, col_spacing=2): """Given a list of short (~10 char) strings, display these aligned in columns. Example output:: Something like this can be used to neatly arrange long sequences of values in a compact format. Parameters ---------- cells : [(strlen, str), ...] Gives the cells to print as tuples giving the strings length in visible characters and the string to display. width : int The width of the terminal. col_spacing : int Size of the gap to leave between columns. """ # Special case (since max below will fail) if len(cells) == 0: return "" # Columns should be at least as large as the largest cell with padding # between columns col_width = max(strlen for strlen, s in cells) + col_spacing lines = [""] cur_length = 0 for strlen, s in cells: # Once line is full, move to the next if cur_length + strlen > width: lines.append("") cur_length = 0 # Add the current cell (with spacing) lines[-1] += s + (" "*(col_width - strlen)) cur_length += col_width return "\n".join(map(str.rstrip, lines))
714b915430be84980c3a9b74f3c5b2cb89b6acba
1,977
def separate_types(data): """Separate out the points from the linestrings.""" if data['type'] != 'FeatureCollection': raise TypeError('expected a FeatureCollection, not ' + data['type']) points = [] linestrings = [] for thing in data['features']: if thing['type'] != 'Feature': raise TypeError('expected Feature, not ' + thing['type']) geometry_type = thing['geometry']['type'] if geometry_type == 'Point': points.append(thing) elif geometry_type == 'LineString': linestrings.append(thing) else: raise TypeError('expected Point or LineString, not ' + geometry_type) return points, linestrings
28ab8eb7e2cdf1206f4908a15506a9b9af1aa428
1,978
import os def get_index_path(bam_path: str): """ Obtain path to bam index Returns: path_to_index(str) : path to the index file, None if not available """ for p in [bam_path+'.bai', bam_path.replace('.bam','.bai')]: if os.path.exists(p): return p return None
b8ccf66a89d865f49fdb311f2fcf0c371fe5c488
1,979
import os def create_output_folder(ProjectDir): """Create the output folders starting from the project directory. Parameters ---------- ProjectDir : str Name of the project directory. Returns ------- type PicturePath, ResultsPath """ npath = os.path.normpath(ProjectDir) # set pathname for the Output OutputPath = os.path.join(npath, os.path.basename(npath)) # set pathname for the images PicturePath = os.path.join(npath, os.path.basename(npath), "Pictures") # set pathname for the files ResultsPath = os.path.join(npath, os.path.basename(npath), "Results") # Add foldes for outputs if not os.path.exists(OutputPath): os.mkdir(OutputPath) if not os.path.exists(PicturePath): os.mkdir(PicturePath) if not os.path.exists(ResultsPath): os.mkdir(ResultsPath) return PicturePath, ResultsPath
d1f9abf35bf5342707e7928aaa23c699063a3b70
1,980
import six def range_join(numbers, to_str=False, sep=",", range_sep=":"): """ Takes a sequence of positive integer numbers given either as integer or string types, and returns a sequence 1- and 2-tuples, denoting either single numbers or inclusive start and stop values of possible ranges. When *to_str* is *True*, a string is returned in a format consistent to :py:func:`range_expand` with ranges constructed by *range_sep* and merged with *sep*. Example: .. code-block:: python range_join([1, 2, 3, 5]) # -> [(1, 3), (5,)] range_join([1, 2, 3, 5, 7, 8, 9]) # -> [(1, 3), (5,), (7, 9)] range_join([1, 2, 3, 5, 7, 8, 9], to_str=True) # -> "1:3,5,7:9" """ if not numbers: return "" if to_str else [] # check type, convert, make unique and sort _numbers = [] for n in numbers: if isinstance(n, six.string_types): try: n = int(n) except ValueError: raise ValueError("invalid number format '{}'".format(n)) if isinstance(n, six.integer_types): _numbers.append(n) else: raise TypeError("cannot handle non-integer value '{}' in numbers to join".format(n)) numbers = sorted(set(_numbers)) # iterate through numbers, keep track of last starts and stops and fill a list of range tuples ranges = [] start = stop = numbers[0] for n in numbers[1:]: if n == stop + 1: stop += 1 else: ranges.append((start,) if start == stop else (start, stop)) start = stop = n ranges.append((start,) if start == stop else (start, stop)) # convert to string representation if to_str: ranges = sep.join( (str(r[0]) if len(r) == 1 else "{1}{0}{2}".format(range_sep, *r)) for r in ranges ) return ranges
c1b2d10ec1b47fa5c917fccead2ef8d5fc506370
1,981
import inspect def _get_kwargs(func, locals_dict, default=None): """ Convert a function's args to a kwargs dict containing entries that are not identically default. Parameters ---------- func : function The function whose args we want to convert to kwargs. locals_dict : dict The locals dict for the function. default : object Don't include arguments whose values are this object. Returns ------- dict The non-default keyword args dict. """ return {n: locals_dict[n] for n in inspect.signature(func).parameters if locals_dict[n] is not default}
ae0a06cb4e17b5512a03e89d7ca2119c58ea762b
1,982
def legendre(a, p): """Legendre symbol""" tmp = pow(a, (p-1)//2, p) return -1 if tmp == p-1 else tmp
66b86dce23ae10ba226ffb19942b98550bb7c218
1,984
import argparse def get_parser(): """ Create a parser with some arguments used to configure the app. Returns: argparse.ArgumentParser: """ parser = argparse.ArgumentParser(description="configuration") parser.add_argument( "--upload-folder", required=True, metavar="path", help="Target path where the images will be uploaded for inference", ) parser.add_argument( "--config-file", default="/content/computer-vision-REST-API/MaskRCNN_finetune/configs/ResNet-101-FPN/balloon.yaml", metavar="path", help="Path to the model config file. Possible improvement : let the user instead choose the desired model thru the app then load the ad-hoc config file.", ) parser.add_argument( "--weights", default="https://www.dropbox.com/s/otp52ccygc2t3or/ResNet101_FPN_model_final.pth?dl=1", metavar="path", help="Path to the model file weights. Possible improvement : let the user instead choose the desired model thru the app then load the ad-hoc pretrained weights.", ) parser.add_argument( "--remove-colors", default=False, action="store_true", help="One can remove colors of unsegmented pixels for better clarity as the mask and balloons colors can be hard to distinguish.", ) parser.add_argument( "--use-ngrok", default=False, action="store_true", help="Need to set this arg to True to be able to run it on google collab", ) parser.add_argument( "--infer-with-cpu", default=False, action="store_true", help="Use cpu for forward pass (slower)", ) return parser
0f7a375948d3e45157637647908f5c0e6948083a
1,985
def precheck_arguments(args): """ Make sure the argument choices are valid """ any_filelist = (len(args.filelist_name[0]) > 0 or len(args.output_dir[0]) > 0 or args.num_genomes[0] > 0) if len(args.filelist_name[0]) > 0 and len(args.output_dir[0]) == 0: print("Error: Need to specify output directory with -O if using -F") exit(1) if len(args.filelist_name[0]) == 0 and len(args.output_dir[0]) > 0: print("Error: Need to specify a filelist with -F if using -O") exit(1) if len(args.input_fasta[0]) > 0 and any_filelist: print("Error: When using -i flag, cannot use any of other options that imply multiple files") exit(1) if len(args.input_fasta[0]) > 0 and not any_filelist: return "single" elif any_filelist and len(args.input_fasta[0]) == 0: return "multi" else: print("Error: Need to specify either -i or the combination of -F and -O") exit(1)
984865d214cca63eae8bacf5bc7be238e7209ddb
1,986
def disemvowel(sentence): """Disemvowel: Given a sentence, return the sentence with all vowels removed. >>> disemvowel('the quick brown fox jumps over the lazy dog') 'th qck brwn fx jmps vr th lzy dg' """ vowels = ('a','e','i','o','u') for x in sentence: if x in vowels: sentence = sentence.replace(x,"") return sentence pass
d9b6d873c29e82cb65e43f71e2b6298af18b25fd
1,987
import os def split_name_with_nii(filename): """ Returns the clean basename and extension of a file. Means that this correctly manages the ".nii.gz" extensions. :param filename: The filename to clean :return: A tuple of the clean basename and the full extension """ base, ext = os.path.splitext(filename) if ext == ".gz": # Test if we have a .nii additional extension temp_base, add_ext = os.path.splitext(base) if add_ext == ".nii": ext = add_ext + ext base = temp_base return base, ext
d897804e4a0b773a1c23bff8ad8d7e7e678a9799
1,989
def abs(rv): """ Returns the absolute value of a random variable """ return rv.abs()
6bf2f8420f8a5e883dfddfc9a93106662a8f1a74
1,990
def escape(instruction): """ Escape used dot graph characters in given instruction so they will be displayed correctly. """ instruction = instruction.replace('<', r'\<') instruction = instruction.replace('>', r'\>') instruction = instruction.replace('|', r'\|') instruction = instruction.replace('{', r'\{') instruction = instruction.replace('}', r'\}') instruction = instruction.replace(' ', ' ') return instruction
936ed1d6c55650bf5f9ce52af8f113a9d466a534
1,991
def hello_world(): """Print welcome message as the response body.""" return '{"info": "Refer to internal http://metadata-db for more information"}'
ecb2208053e4ff530bcc0dcc117172449a51afbd
1,992
def get_api_key(): """Load API key.""" api_key_file = open('mailgun_api_key.txt', 'r') api_key = api_key_file.read() api_key_file.close() return api_key.strip()
55c87d15d616f0f6dfbc727253c2222128b63560
1,993
import itertools def flatten(colours): """Flatten the cubular array into one long list.""" return list(itertools.chain.from_iterable(itertools.chain.from_iterable(colours)))
41576ef947354c30d1995fefdd30ad86bddbfe6f
1,994
import numpy def create_word_search_board(number: int): """ This function creates a numpy array of zeros, with dimensions of number x number, which is set by the user. The array is then iterated through, and zeros are replaced with -1's to avoid confusion with the alphabet (A) beginning at 0. """ board = numpy.zeros((number, number)) for i in range(len(board)): for x in range(number): board[i][x] = -1 return board
31f22d56c947f61840ba87d028eb7de275d33cc9
1,995
def parse_fastq(fh): """ Parse reads from a FASTQ filehandle. For each read, we return a name, nucleotide-string, quality-string triple. """ reads = [] while True: first_line = fh.readline() if len(first_line) == 0: break # end of file name = first_line[1:].rstrip() seq = fh.readline().rstrip() fh.readline() # ignore line starting with + qual = fh.readline().rstrip() reads.append((name, seq, qual)) return reads
d33d3efebdd1c5f61e25397328c6b0412f1911dd
1,996
def coalesce(*values): """Returns the first not-None arguement or None""" return next((v for v in values if v is not None), None)
245177f43962b4c03c2347725a2e87f8eb5dc08a
1,997
import imp def pyc_file_from_path(path): """Given a python source path, locate the .pyc. See http://www.python.org/dev/peps/pep-3147/ #detecting-pep-3147-availability http://www.python.org/dev/peps/pep-3147/#file-extension-checks """ has3147 = hasattr(imp, 'get_tag') if has3147: return imp.cache_from_source(path) else: return path + "c"
459011ca1f07a023b139695cd2368767d46ca396
2,000
def get_bytes_per_data_block(header): """Calculates the number of bytes in each 128-sample datablock.""" N = 128 # n of amplifier samples # Each data block contains N amplifier samples. bytes_per_block = N * 4 # timestamp data bytes_per_block += N * 2 * header['num_amplifier_channels'] # DC amplifier voltage (absent if flag was off) # bytes_per_block += N * 2 * header['dc_amplifier_data_saved'] if header['dc_amplifier_data_saved'] > 0: bytes_per_block += N * 2 * header['num_amplifier_channels'] # Stimulation data, one per enabled amplifier channels bytes_per_block += N * 2 * header['num_amplifier_channels'] # Board analog inputs are sampled at same rate as amplifiers bytes_per_block += N * 2 * header['num_board_adc_channels'] # Board analog outputs are sampled at same rate as amplifiers bytes_per_block += N * 2 * header['num_board_dac_channels'] # Board digital inputs are sampled at same rate as amplifiers if header['num_board_dig_in_channels'] > 0: bytes_per_block += N * 2 # Board digital outputs are sampled at same rate as amplifiers if header['num_board_dig_out_channels'] > 0: bytes_per_block += N * 2 return bytes_per_block
524e9015dacaf99042dd1493b24a418fff8c6b04
2,001
def add_dep_info(tgt_tokens, lang, spacy_nlp, include_detail_tag=True): """ :param tgt_tokens: a list of CoNLLUP_Token_Template() Objects from CoNLL_Annotations.py file :param spacy_nlp: Spacy language model of the target sentence to get the proper Dependency Tree :return: """ doc = spacy_nlp.tokenizer.tokens_from_list([t.word for t in tgt_tokens]) spacy_nlp.tagger(doc) spacy_nlp.parser(doc) for ix, token in enumerate(doc): tgt_tokens[ix].lemma = token.lemma_ or "_" tgt_tokens[ix].head = token.head.i + 1 if lang in ["ES", "FR"]: detail_tag = token.tag_.split("__") # [VERB , Mood=Sub|Number=Plur|Person=3|Tense=Imp|VerbForm=Fin] tgt_tokens[ix].pos_tag = detail_tag[0] or "_" if include_detail_tag: tgt_tokens[ix].detail_tag = detail_tag[-1] or "_" else: tgt_tokens[ix].pos_tag = token.tag_ or "_" tgt_tokens[ix].pos_universal = token.pos_ or "_" # Is SpaCy already Universal? tgt_tokens[ix].dep_tag = token.dep_ or "_" tgt_tokens[ix].ancestors = [(t.i, t.text) for t in token.ancestors] tgt_tokens[ix].children = [(t.i, t.text) for t in token.children] # print(token.i, token.text, token.pos_, token.dep_, token.head.text, token.head.i, token.tag_) assert len(doc) == len(tgt_tokens), f"LEN Mismatch! Spacy has {len(doc)} tokens and CoNLL has {len(tgt_tokens)} tokens" return tgt_tokens
0083d16f4344a6afaeb5fba9a6b2e9282d617ef3
2,003
import sys def in_notebook() -> bool: """Evaluate whether the module is currently running in a jupyter notebook.""" return "ipykernel" in sys.modules
3be74bda76eaf0ff32c1d48d23c52f8d5f0ea728
2,004
def allowed_file(filename, extensions): """ Check file is image :param filename: string :param extensions: list :return bool: """ return '.' in filename and \ filename.rsplit('.', 1)[1].lower() in extensions
c61e77205e40cd05fc0ea6e4e4f770180f15e6d8
2,005
def payoff_blotto_sign(x, y): """ Returns: (0, 0, 1) -- x wins, y loss; (0, 1, 0) -- draw; (1, 0, 0)-- x loss, y wins. """ wins, losses = 0, 0 for x_i, y_i in zip(x, y): if x_i > y_i: wins += 1 elif x_i < y_i: losses += 1 if wins > losses: return (0, 0, 1) elif wins < losses: return (1, 0, 0) return (0, 1, 0)
5a34ce81fdff8f90ee715d9c82fc55abf7eb2904
2,006
def to_base_str(n, base): """Converts a number n into base `base`.""" convert_string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" if n < base: return convert_string[n] else: return to_base_str(n // base, base) + convert_string[n % base]
bc137d41c9543ef1a201f4bb14234fa277067a77
2,007
def number_of_photons(i,n=6): """Check if number of photons in a sample is higher than n (default value is 6)""" bitstring = tuple(i) if sum(bitstring) > n: return True else: return False
6c7cfea354aa4948d2c94469708f250e6d5b659d
2,008
def generate_response(response, output): """ :param response: :return dictionary """ status, command = None, None if isinstance(response, dict): status = response.get('ok', None) response.get('command', None) elif isinstance(response, object): status = getattr(response, 'ok', None) command = getattr(response, 'command', None) return { 'status': 'successful' if status else 'failed', 'command': command, 'output': output }
ea8764dd3e8f0205a0ec1dd278164140a414dadc
2,009
import os def abspath(url): """ Get a full path to a file or file URL See os.abspath """ if url.startswith('file://'): url = url[len('file://'):] return os.path.abspath(url)
5c739b7894b4b6d3aabbce9813ce27c72eea6f5d
2,010
from typing import Any from typing import get_type_hints from typing import get_origin from typing import Union from typing import get_args def get_repr_type(type_: Any) -> Any: """Parse a type and return an representative type. Example: All of the following expressions will be ``True``:: get_repr_type(A) == A get_repr_type(Annotated[A, ...]) == A get_repr_type(Union[A, B, ...]) == A get_repr_type(Optional[A]) == A """ class Temporary: __annotations__ = dict(type=type_) unannotated = get_type_hints(Temporary)["type"] if get_origin(unannotated) is Union: return get_args(unannotated)[0] return unannotated
fe74d79c1fcc74ff86d0c41db3f8f9da37dbf69a
2,011
import csv def parse_latency_stats(fp): """ Parse latency statistics. :param fp: the file path that stores the statistics :returns an average latency in milliseconds to connect a pair of initiator and responder clients """ latency = [] with open(fp) as csvfile: csvreader = csv.DictReader(csvfile, delimiter=' ', fieldnames=['title', 'time']) for row in csvreader: latency.append(float(row['time']) * 1000) return sum(latency) / len(latency)
c50c730b5c5bea704bd682d003baa0addfd7ee89
2,012
def round_to_nreads(number_set, n_reads, digit_after_decimal=0): """ This function take a list of number and return a list of percentage, which represents the portion of each number in sum of all numbers Moreover, those percentages are adding up to 100%!!! Notice: the algorithm we are using here is 'Largest Remainder' The down-side is that the results won't be accurate, but they are never accurate anyway:) """ unround_numbers = [ x / float(sum(number_set)) * n_reads * 10 ** digit_after_decimal for x in number_set ] decimal_part_with_index = sorted( [(index, unround_numbers[index] % 1) for index in range(len(unround_numbers))], key=lambda y: y[1], reverse=True, ) remainder = n_reads * 10 ** digit_after_decimal - sum( [int(x) for x in unround_numbers] ) index = 0 while remainder > 0: unround_numbers[decimal_part_with_index[index][0]] += 1 remainder -= 1 index = (index + 1) % len(number_set) return [int(x) / float(10 ** digit_after_decimal) for x in unround_numbers]
c7a50b5caffb072b3fb6de9478b4acf83f701780
2,013
def _get_raster_extent(src): """ extract projected extent from a raster dataset (min_x, max_x, min_y, max_y) Parameters ---------- src : gdal raster Returns ------- (min_x, max_x, min_y, max_y) """ ulx, xres, xskew, uly, yskew, yres = src.GetGeoTransform() lrx = ulx + (src.RasterXSize * xres) lry = uly + (src.RasterYSize * yres) return ulx, lrx, lry, uly
49ed0b3c583cbfa5b9ecbc96d94aec42aeba3a32
2,014
def create_neighborhood_polygons(gdf): """ an attempt to muild neighborhoods polygons from asset points""" gdf = gdf.reset_index() neis = gdf['Neighborhood'].unique() gdf['neighborhood_shape'] = gdf.geometry # Must be a geodataframe: for nei in neis: gdf1 = gdf[gdf['Neighborhood'] == nei] inds = gdf1.index polygon = gdf1.geometry.unary_union.convex_hull # gdf.loc[inds, 'neighborhood_shape'] = [polygon for x in range(len(inds))] gdf.loc[inds, 'neighborhood_shape'] = polygon return gdf
7ca77acfd73a4b13f9088e3839121076d1a70730
2,015
def nz2epsmu(N, Z):#{{{ """ Accepts index of refraction and impedance, returns effective permittivity and permeability""" return N/Z, N*Z
3173df57ab5ad573baab87cd4fd6f353fcf69e2c
2,016
def score_to_rating_string(score): """ Convert score to rating """ if score < 1: rating = "Terrible" elif score < 2: rating = "Bad" elif score < 3: rating = "OK" elif score < 4: rating = "Good" else: rating = "Excellent" return rating
0c6a5aba0cb220a470f2d40c73b873d11b1a0f98
2,018
import subprocess import sys def ssh(server, cmd, checked=True): """ Runs command on a remote machine over ssh.""" if checked: return subprocess.check_call('ssh %s "%s"' % (server, cmd), shell=True, stdout=sys.stdout) else: return subprocess.call('ssh %s "%s"' % (server, cmd), shell=True, stdout=sys.stdout)
b8d1d492b7528dc7e601cf994b2c7a32b31af0d3
2,019
def macd(df, ewa_short, ewa_long, ewa_signal, price_col="adj_close"): """Moving Average Convergence Divergence Parameters: ----------- df : DataFrame Input dataframe. ewa_short : int Exponentially weighted average time-window for a short time-span. A common choice for the short time-window is 12 intervals. ewa_long : int Exponentially weighted average time-window for a longer time-span. A common choice for the long time-window is 26 intervals. ewa_signal : int Time-window for the EWA of the difference between long and short averages. price_col : str Column name in `df` used for defining the current indicator (e.g. "open", "close", etc.) Returns: -------- macd_ts : Series Moving average convergence-divergence indicator for the time series. """ ewa_short = int(ewa_short) ewa_long = int(ewa_long) ewa_signal = int(ewa_signal) ewa12 = df[price_col].ewm(span=ewa_short).mean() ewa26 = df[price_col].ewm(span=ewa_long).mean() macd_ts = ewa12 - ewa26 signal_line = macd_ts.ewm(span=ewa_signal).mean() return macd_ts - signal_line, 'stationary'
3140f67371394244b66b9048d273e0d5fee5e471
2,020
def __zedwalther(kin): """ Calculate the z-parameter for the Walther equation (ASTM D341). Parameters ---------- kin: scalar The kinematic viscosity of the lubricant. Returns ------- zed: scalar The z-parameter. """ zed = kin + 0.7 + 10 ** (-1.47 - 1.84 * kin - 0.51 * kin ** 2) return zed
d01a716da03230436c5f511cc65f9e7c96732d99
2,021
def chrelerr(fbest, stop): """ checks whether the required tolerance for a test function with known global minimum has already been achieved Input: fbest function value to be checked stop(0) relative error with which a global minimum with not too small absolute value should be reached stop(1) global minimum function value of a test function stop(2) if abs(fglob) is very small, we stop if the function value is less than stop(2) Output: flag = 0 the required tolerance has been achieved = 1 otherwise """ fglob = stop[1] if fbest - fglob <= max(stop[0] * abs(fglob), stop[2]): return 0 return 1
c90ad548ea9490cdb5a43cfb3559d7f26a0c57fc
2,022