content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def lenzi(df): """Check if a pandas series is empty""" return len(df.index) == 0
561705e6ff0da3bfb03407a721f2aff71a4d42a1
4,905
def num_to_int(num): """ Checks that a numerical value (e.g. returned by robot) is an integer and not a float. Parameters ---------- num : number to check Returns ------- integer : num cast to an integer Raises ------ ValueError : if n is not an integer """ if num % 1 == 0: return int(num) else: raise ValueError('Expecting integer. Got: "{0}" ({1})' .format(num, type(num)))
af470940eb035fe8dd0160dfe9614c2b6d060194
4,906
def format_timedelta(tdelta): """Return the timedelta as a 'HH:mm:ss' string.""" total_seconds = int(tdelta.total_seconds()) hours, remainder = divmod(total_seconds, 60*60) minutes, seconds = divmod(remainder, 60) return "{0:02d}:{1:02d}:{2:02d}".format(hours, minutes, seconds)
852902e7972bcd13df8b60864ebcb2d75b2b259d
4,907
from typing import Tuple import torch def permute_adjacency_twin(t1,t2) -> Tuple[torch.Tensor,torch.Tensor]: """ Makes a permutation of two adjacency matrices together. Equivalent to a renaming of the nodes. Supposes shape (n,n) """ n,_ = t1.shape perm = torch.randperm(n) return t1[perm,:][:,perm],t2[perm,:][:,perm]
df3dc6507b8eae9d148ec9b2e664a427813d93a7
4,908
def make_element_weight_parser(weight_column): """ Parameterize with the column - this allows us to generate data from different analysis result types. """ def parse_element_weight(csv_row): name = csv_row[0] weight = float(csv_row[weight_column]) # Assert not zero? return name, weight return parse_element_weight
ddc3a4f82ecd0fe4833683759b1a1c4296839a54
4,909
def five_fold(data_set): """[summary] Args: data_set (List of Sample objects): The Samples to be partitioned Returns: fold: where fold is list of len n in n-fold of (train,test) where train and test are lists of Samples """ partition_index = int( len(data_set) / 5 ) s = 0 fold = [] for i in range(5): #0-4 tr = data_set.copy() n = s + partition_index # was -1 te = tr[s:n] del tr[s:s + partition_index] fold.append( (tr,te) ) s += partition_index return fold
d4179c238da3e9ebe05ab3513b80bcce982c8728
4,911
from typing import List import os import requests def _query_trembl(accessions: List[str], format: str) -> str: """Searches TrEMBL server for UniProt entries based on accession. The server to use is set as an environment variable 'TREMBL_SERVER'. Normally this would be the internal TrEMBL server which contains the most up-to-date version of the database. Args: accessions: list of UniProt accessions to be passed as query parameter. format: format of matched UniProt entries (txt, fasta, xml, list are valid formats). Returns: str: UniProt entries in flat file format. """ server = os.environ["TREMBL_SERVER"] url = f"{server}/uniprot/?" query = f"id:{' OR id:'.join(i for i in accessions)}" params = {"query": query, "format": format} uniprot_query = requests.get(url, params=params) uniprot_query.raise_for_status() return uniprot_query.text
625f2fdc2054a1ed864e6a3258d00fe33f43787e
4,912
def cache(f): """A decorator to cache results for a given function call. Note: The caching is only done on the first argument, usually "self". """ ret = {} def _Wrapper(*args, **kwargs): self = args[0] if self not in ret: ret[self] = f(*args, **kwargs) return ret[self] return _Wrapper
786218b8c248bcb7c9d519a843dd4542a9b612b0
4,913
def trim_filters(response): """Trim the leading and trailing zeros from a 1-D array or sequence, leaving one zero on each side. This is a modified version of numpy.trim_zeros. Parameters ---------- response : 1-D array or sequence Input array. Returns ------- first : int Index of the last leading zero. last : int Index of the first trailing zero. """ first = 0 for i in response: if i != 0.: if first == 0: first += 1 # to avoid filters with non-zero edges break else: first = first + 1 last = len(response) for i in response[::-1]: if i != 0.: if last == len(response): last -= 1 # to avoid filters with non-zero edges break else: last = last - 1 first -= 1 last += 1 return first, last
2582c5821bd5c8487c0f9d2f55d2d982767d2669
4,914
from pathlib import Path from typing import List import os def relative_paths(root: Path, paths: list) -> List[str]: """ Normalises paths from incoming configuration and ensures they are all strings relative to root """ result = [] for path in paths: # more hacks for exclusions I'm not happy about # maybe we should subclass Path to make this cleaner? exclusion = path.startswith("!") if exclusion: path = path[1:] # make sure paths are relative! if isinstance(path, Path): inp = str(path.relative_to(root)) elif isinstance(path, str): inp = path if os.path.isabs(path): inp = os.path.relpath(path, root) else: raise NotImplementedError() if exclusion: inp = "!" + inp result.append(inp) return result
8193669491bc33b3014f1b44ca7ef4157e760af0
4,916
import bs4 def is_comment(obj): """Is comment.""" return isinstance(obj, bs4.Comment)
e56749b3d5f95754a031cc7286229d942333a22e
4,919
import itertools def CollapseDictionary(mapping): """ Takes a dictionary mapping prefixes to URIs and removes prefix mappings that begin with _ and there is already a map to their value >>> from rdflib import URIRef >>> a = {'ex': URIRef('http://example.com/')} >>> a['_1'] = a['ex'] >>> len(a) 2 >>> a.values() [rdflib.term.URIRef(%(u)s'http://example.com/'), rdflib.term.URIRef(%(u)s'http://example.com/')] >>> CollapseDictionary(a) {'ex': rdflib.term.URIRef(%(u)s'http://example.com/')} >>> a {'ex': rdflib.term.URIRef(%(u)s'http://example.com/'), '_1': rdflib.term.URIRef(%(u)s'http://example.com/')} """ def originalPrefixes(item): return item.find('_') + 1 == 1 revDict = {} for k, v in list(mapping.items()): revDict.setdefault(v, set()).add(k) prefixes2Collapse = [] for k, v in list(revDict.items()): origPrefixes = [] dupePrefixes = [] # group prefixes for a single URI by whether or not # they have a _ prefix for rt, items in itertools.groupby(v, originalPrefixes): if rt: dupePrefixes.extend(items) else: origPrefixes.extend(items) if origPrefixes and len(v) > 1 and len(dupePrefixes): # There are allocated prefixes for URIs that were originally # given a prefix assert len(origPrefixes) == 1 prefixes2Collapse.extend(dupePrefixes) return dict([(k, v) for k, v in list(mapping.items()) if k not in prefixes2Collapse])
9f2befbd52b75b75aa15cadf9e68d5f9eebcae71
4,920
import logging def _VerifyOptions(options): """Verify the passed-in options. Args: options: The parsed options to verify. Returns: Boolean, True if verification passes, False otherwise. """ if options.endpoints_service and not options.openapi_template: logging.error('Please specify openAPI template with --openapi_template ' 'in deploying endpoints.') return False if options.openapi_template and not options.endpoints_service: logging.error('Please specify endpoints service with --endpoints_service ' 'in deploying endpoints.') return False if (options.endpoints_service and options.project_id not in options.endpoints_service): logging.error('The project "%s" is not matched to the endpoints service ' '"%s".', options.project_id, options.endpoints_service) return False return True
872feb5ac314ed2ef28ddbfaeff1b5dafc5e9ed8
4,921
def parse_line(line): """ Parse a queue trace line into a dict """ line = line.split() result = {} if len(line) < 12: return result result["event"] = line[0] result["time"] = float(line[1]) result["from"] = int(line[2]) result["to"] = int(line[3]) result["type"] = line[4] result["size"] = int(line[5]) result["flags"] = line[6] result["fid"] = int(line[7]) result["src"] = line[8] result["dst"] = line[9] result["seqnum"] = int(line[10]) result["pktid"] = int(line[11]) return result
432e6a624626e89d27fe6d3d9ed7c4230d97c0a6
4,922
def get_census_params(variable_ids, county_level=False): """Gets census url params to make an API call. variable_ids: The ids of the variables to request. Automatically includes NAME. county_level: Whether to request at the county level, or the state level.""" keys = variable_ids.copy() keys.append("NAME") params = {"get": ",".join(keys)} params["for"] = "county:*" if county_level else "state:*" return params
b24204c8e9ef82575b54151bdc0ac98de0fb7fc0
4,923
def lookupName(n, names): """Check if name is in list of names Parameters ---------- n : str Name to check names : list List of names to check in Returns ------- bool Flag denoting if name has been found in list (True) or not (False) """ if n in names: return True else: return False
0fbb97e252f5daf9de52a946c206fa74395b01c6
4,924
def calculate_appointments(new_set, old_set): """ Calculate different appointment types. Used for making useful distinctions in the email message. new_set will be the fresh set of all available appointments at a given interval old_set will the previous appointments variable getting passed in. Ex1: Addition of HONEOYE new_set = {'LIVERPOOL', 'BROOKLYN', 'HONEOYE', 'KINGSTON'} old_set = {'LIVERPOOL', 'BROOKLYN', 'KINGSTON'} returns ->-> new_appointments = {'HONEOYE'} all_appointments = {'LIVERPOOL', 'BROOKLYN', 'KINGSTON', HONEOYE} Ex2: No Changes new_set = {'LIVERPOOL', 'BROOKLYN', 'HONEOYE', 'KINGSTON'} old_set = {'LIVERPOOL', 'BROOKLYN', 'HONEOYE', 'KINGSTON'} returns ->-> new_appointments = set() (empty set) all_appointments = {'LIVERPOOL', 'BROOKLYN', 'HONEOYE', 'KINGSTON'} """ new_appointments = new_set.difference(old_set) # set of All appointments minus set of Old appointments yields the set of New appointments old_appointments = new_set.intersection(old_set) # New intersect Old. yields those appointments that (intersect is equivalent the overlap in a venn diagram) return new_appointments, old_appointments # Return new sets
b54735293ba910e2b310e55e263e2611863d088a
4,925
def add(left: int, right: int): """ add up two numbers. """ print(left + right) return 0
75d7bd10cfdfb38211f6faf838b5e200e8593693
4,926
import random def rand_x_digit_num(x): """Return an X digit number, leading_zeroes returns a string, otherwise int.""" return '{0:0{x}d}'.format(random.randint(0, 10**x-1), x=x)
b46864143ca6186ebeede6c687a85d1b585e70db
4,927
def subtractNums(x, y): """ subtract two numbers and return result """ return y - x
2b16636e74a2d1a15e79e4669699c96adcd3833b
4,928
import os import shutil def _copy_and_rename_file(file_path: str, dest_dir: str, new_file_name): """ Copies the specified file to the dest_dir (creating the directory if necessary) and renames it to the new_file_name :param file_path: file path of the file to copy :param dest_dir: directory to copy the file to :param new_file_name: name the file should be changed to :return: file path of the new file """ # Copy File try: # Creating new directory with year if does not exist os.makedirs(dest_dir, exist_ok=True) # Copying File print("Copying file: {0}".format(file_path)) # new_file_copy = shutil.copyfile(file_path, dest_dir) new_file_copy = shutil.copy(file_path, dest_dir) print("Copied file to {0}".format(dest_dir)) # Renaming File print("Renaming file: {0}".format(new_file_copy)) new_file_path = os.path.join(dest_dir, new_file_name) os.rename(src=new_file_copy, dst=new_file_path) print("File successfully renamed to " + new_file_path) return new_file_path except Exception as e: print("Failed to copy or rename file.") print(e)
b53ce9d5e968e257919c8f2eb7749a171eddb59d
4,929
def filter_X_dilutions(df, concentration): """Select only one dilution ('high', 'low', or some number).""" assert concentration in ['high','low'] or type(concentration) is int df = df.sort_index(level=['CID','Dilution']) df = df.fillna(999) # Pandas doesn't select correctly on NaNs if concentration == 'low': df = df.groupby(level=['CID']).first() elif concentration == 'high': df = df.groupby(level=['CID']).last() else: df = df.loc[[x for x in df.index if x[1]==concentration]] df = df.groupby(level=['CID']).last() df = df.replace(999,float('NaN')) # Undo the fillna line above. return df
b886c87c1c5b96e6efc951ef197d3a0fb13707c1
4,930
def update_params(base_param: dict, additional: dict): """overwrite base parameter dictionary Parameters ---------- base_param : dict base param dictionary additional : dict additional param dictionary Returns ------- dict updated parameter dictionary """ for key in additional: base_param[key] = additional[key] return base_param
e73581cb0b8d264343ead56da52c6dc12fe49dd7
4,931
def briconToScaleOffset(brightness, contrast, drange): """Used by the :func:`briconToDisplayRange` and the :func:`applyBricon` functions. Calculates a scale and offset which can be used to transform a display range of the given size so that the given brightness/contrast settings are applied. :arg brightness: Brightness, between 0.0 and 1.0. :arg contrast: Contrast, between 0.0 and 1.0. :arg drange: Data range. """ # The brightness is applied as a linear offset, # with 0.5 equivalent to an offset of 0.0. offset = (brightness * 2 - 1) * drange # If the contrast lies between 0.0 and 0.5, it is # applied to the colour as a linear scaling factor. if contrast <= 0.5: scale = contrast * 2 # If the contrast lies between 0.5 and 1, it # is applied as an exponential scaling factor, # so lower values (closer to 0.5) have less of # an effect than higher values (closer to 1.0). else: scale = 20 * contrast ** 4 - 0.25 return scale, offset
b75ce49f4e79f7fef34a855f2897cfa6b4bd7cc7
4,932
import pathlib def check_path(path: pathlib.Path) -> bool: """Check path.""" return path.exists() and path.is_file()
2279dde6912ae6f6eb51d90ed5e71e0b3892fea9
4,933
def count(A,target): """invoke recursive function to return number of times target appears in A.""" def rcount(lo, hi, target): """Use recursion to find maximum value in A[lo:hi+1].""" if lo == hi: return 1 if A[lo] == target else 0 mid = (lo+hi)//2 left = rcount(lo, mid, target) right = rcount(mid+1, hi, target) return left + right return rcount(0, len(A)-1, target)
79d9be64d332a11993f65f3c0deba8b4de39ebda
4,935
def is_bool_type(typ): """ Check if the given type is a bool. """ if hasattr(typ, '__supertype__'): typ = typ.__supertype__ return isinstance(typ, type) and issubclass(typ, bool)
3d8dfae184be330c8cbd7c0e7382311fef31ede5
4,936
import os def get_file_paths(directory, file=None): """ Collects the file paths from the given directory if the file is not given, otherwise creates a path joining the given directory and file. :param directory: The directory where the file(s) can be found :param file: A file in the directory :return: The sorted list of collected file paths """ file_paths = [] # get the absolute path of the one given file if file is not None: source_file_path = os.path.join(directory, file) if os.path.isfile(source_file_path): file_paths.append(source_file_path) # if the given doesn't exist or it wasn't given, all files from the directory will be loaded # except desktop.ini if len(file_paths) == 0: for child in os.listdir(directory): if child != 'desktop.ini': child_path = os.path.join(directory, child) if os.path.isfile(child_path): file_paths.append(child_path) return sorted(file_paths)
62e79a52682e046b83f9ad89df18d4dece7bf37a
4,937
import numpy def interpolate_missing(sparse_list): """Use linear interpolation to estimate values for missing samples.""" dense_list = list(sparse_list) x_vals, y_vals, x_blanks = [], [], [] for x, y in enumerate(sparse_list): if y is not None: x_vals.append(x) y_vals.append(y) else: x_blanks.append(x) if x_blanks: interpolants = numpy.interp(x_blanks, x_vals, y_vals) for x, y in zip(x_blanks, interpolants): dense_list[x] = y return dense_list
a2983a08f00b4de2921c93cc14d3518bc8bd393d
4,938
def get_jmp_addr(bb): """ @param bb List of PseudoInstructions of one basic block @return Address of jump instruction in this basic block """ for inst in bb: if inst.inst_type == 'jmp_T': return inst.addr return None
13e69032bc7d6ed5413b5efbb42729e11661eab1
4,939
def totals_per_time_frame(data_points, time_frame): """For a set of data points from a single CSV file, calculate the average percent restransmissions per time frame Args: data_points (List[List[int,int,float]]): A list of data points. Each data points consist of 0: 1 if is a transmission, 1: 1 if is a retransmission, 2: time in seconds time_frame (float): increment of time in seconds in which new data points are calculated Returns: List[List[float,float]]: A list of data points containing the percent retransmissions, and the time in seconds """ time_frame_min = 0 time_frame_max = time_frame percent_retransmissions_list = [] transmissions_in_frame = 0 retransmissions_in_frame = 0 index = 0 while time_frame_max < data_points[-1][2] and index < len(data_points): if data_points[index][2] >= time_frame_min and data_points[index][2] < time_frame_max: transmissions_in_frame += data_points[index][0] + data_points[index][1] retransmissions_in_frame += data_points[index][1] index += 1 else: if transmissions_in_frame > 0: percent_retransmissions = 100*retransmissions_in_frame/transmissions_in_frame else: percent_retransmissions = 0 percent_retransmissions_list.append([percent_retransmissions,time_frame_min]) time_frame_min = time_frame_max time_frame_max += time_frame transmissions_in_frame = 0 retransmissions_in_frame = 0 return percent_retransmissions_list
9e71ac2fe7deabd36d7df8ae099575b191260c5d
4,941
import torch def predict_batch(model, x_batch, dynamics, fast_init): """ Compute the softmax prediction probabilities for a given data batch. Args: model: EnergyBasedModel x_batch: Batch of input tensors dynamics: Dictionary containing the keyword arguments for the relaxation dynamics on u fast_init: Boolean to specify if fast feedforward initilization is used for the prediction Returns: Softmax classification probabilities for the given data batch """ # Initialize the neural state variables model.reset_state() # Clamp the input to the test sample, and remove nudging from ouput model.clamp_layer(0, x_batch.view(-1, model.dimensions[0])) model.set_C_target(None) # Generate the prediction if fast_init: model.fast_init() else: model.u_relax(**dynamics) return torch.nn.functional.softmax(model.u[-1].detach(), dim=1)
61102cfa3bcb3e7d52e9f3eca8c97db4d726c1a7
4,944
import math def get_test_paths(paths, snaps): """ Return $snaps paths to be tested on GLUE """ if snaps == -1: return paths interval = len(paths) * 1. / snaps test_paths = [] for i in range(1, snaps+1): idx = int(math.ceil(interval * i)) - 1 test_paths.append(paths[idx]) return test_paths
a2ac1f89740e85b6322e553559850c0e686a28c8
4,945
def is_success(code): """Return that the client's request was successfully received, understood, and accepted.""" return 200 <= code <= 299
8a6e64c0f218ca5a866a444c730e1ebf7628727e
4,946
def transform(x,y): """ This function takes an input vector of x values and y values, transforms them to return the y in a linearized format (assuming nlogn function was used to create y from x) """ final = [] for i in range(0, len(y)): new = y[i]#/x[i] final.append(2 ** new) return final
119db625f5ebf469794bf3bdacd20a1c70ccd133
4,948
import argparse def get_args(arg_input): """Takes args input and returns them as a argparse parser Parameters ------------- arg_input : list, shape (n_nargs,) contains list of arguments passed to function Returns ------------- args : namespace contains namespace with keys and values for each parser argument """ parser = argparse.ArgumentParser(description='tpu creation script') parser.add_argument( '--name', type=str, default='tpu', help='Name to use for tpu vm', ) parser.add_argument( '--zone', type=str, default='europe-west4-a', help='zone', ) parser.add_argument( '--version', type=str, default='tpu-vm-pt-1.11', help='software version to load', ) parser.add_argument( '--accelerator-type', type=str, default='v3-8', help='accelerator type. Eg v3-8, v2-8', ) parser.add_argument( '--project', type=str, default='trc-generative', help='gcloud project name', ) parser.add_argument( '-n', '--number_of_tpus', type=int, default=1, help='Minimum number of atleast_tags required.', ) args = parser.parse_args(arg_input) return args
25b01282deef981197b2c7b62d33211a3200e787
4,950
def cleanup_databases(): """ Returns: bool: admin_client fixture should ignore any existing databases at start of test and clean them up. """ return False
63fa94389609b8e28779d1e9e55e9b1ecde502b6
4,951
def spot_centroid(regions): """Returns centroids for a list of regionprops. Args: regions (regionprops): List of region proposals (skimage.measure). Returns: list: Centroids of regionprops. """ return [r.centroid for r in regions]
f53f403dddf0af123afd207e33cc06254a0f2538
4,952
def overrides(conf, var): """This api overrides the dictionary which contains same keys""" if isinstance(var, list): for item in var: if item in conf: for key, value in conf[item].items(): conf[key] = value elif var in conf: for key, value in conf[var].items(): conf[key] = value return conf
18375dc43a0d684feaf9089756ecb45eb5a366f3
4,953
def make_hashable(data): """Make the given object hashable. It makes it ready to use in a `hash()` call, making sure that it's always the same for lists and dictionaries if they have the same items. :param object data: the object to hash :return: a hashable object :rtype: object """ if isinstance(data, (list, tuple)): return tuple((make_hashable(item) for item in data)) elif isinstance(data, dict): return tuple( (key, make_hashable(value)) for key, value in sorted(data.items()) ) else: return data
e4b88978ddee6d4dfc354845184a0e80b1f434bf
4,954
import os import glob def check_file(file): """ 检查本地有没有这个文件,相关文件路径能否找到文件 并返回文件名 """ # 如果传进来的是文件或者是’‘, 直接返回文件名str if os.path.isfile(file) or file == '': return file # 如果传进来的就是当前项目下的一个全局路径 查找匹配的文件名返回第一个 else: files = glob.glob('./**/' + file, recursive=True) # 验证文件名是否存在 assert len(files), 'File Not Found: %s' % file # 验证文件名是否唯一 assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files) # 返回第一个匹配到的文件名 return files[0]
9a1d3283a26a1fcdca67622413dc4fc8e887fbee
4,955
import torch def regular_channels(audio ,new_channels): """ torchaudio-file([tensor,sample_rate])+target_channel -> new_tensor """ sig ,sr =audio if sig.shape[0 ]==new_channels: return audio if new_channels==1: new_sig =sig[:1 ,:] # 直接取得第一个channel的frame进行操作即可 else: # 融合(赋值)第一个通道 new_sig =torch.cat([sig ,sig] ,dim=0) # c*f->2c*f # 顺带提一句—— return [new_sig ,sr]
5b055d965f35fc4cf0f434b34e8f579f321fee89
4,957
def a_function(my_arg, another): """ This is the brief description of my function. This is a more complete example of my function. It can include doctest, code blocks or any other reST structure. >>> a_function(10, [MyClass('a'), MyClass('b')]) 20 :param int my_arg: The first argument of the function. Just a number. :param another: The other argument of the important function. :type another: A list of :class:`MyClass` :return: The length of the second argument times the first argument. :rtype: int """ return my_arg * len(another)
8624edfe3ec06b53e065a6672c3b21682cdefe06
4,958
import pandas import logging def _shape(df): """ Return DataFrame shape even if is not a Pandas dataframe.""" if type(df) == pandas.DataFrame or type(df) == pandas.Series: return df.shape try: shape = (len(df), len(df.columns)) except Exception as e: logging.error(e) raise e return shape
d5af0e3f92ee649091d9fc8b904e60931fb0f2f7
4,959
import subprocess def get_cmd_output(cmd): """Run a command in shell, and return the Unicode output.""" try: data = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as ex: data = ex.output try: data = data.decode("utf-8") except UnicodeDecodeError: data = data.decode("latin1") return data
f1ac61b45b2adb404b50c5001e8737db2c0a9c95
4,960
import os def get_window_size(): """Return the window width and height""" width = os.popen( "xrandr --current | grep '*' | uniq | awk '{print $1}' | cut -d 'x' -f1").read().strip( "\n") height = os.popen( "xrandr --current | grep '*' | uniq | awk '{print $1}' | cut -d 'x' -f2").read().strip( "\n") if '\n' in width: widths = width.split('\n') heights = height.split('\n') return widths, heights else: return width, height
7d28746f6c1805e31d93b4c934b9442a19d4cfa1
4,961
def homology(long_sequence, short_sequence): """ Cross-compare to find the strand of long sequence with the highest similarity with the short sequence. :param long_sequence: str :param short_sequence: str :return ans: str, the strand of long sequence with the highest similarity with the short sequence """ # number of characters in the long sequence i = len(long_sequence) # number of characters in the short sequence j = len(short_sequence) # number of the same element between long- and short- sequence in a certain part of the long sequence max_match = 0 # position where the max_match begins in long sequence max_match_point = 0 ans = '' # (i - j + 1) = times needed for cross-comparison for k in range(i - j + 1): match = 0 for n in range(j): # if find the same element in the same position of long- and short- sequence, count one if short_sequence[n] == long_sequence[n+k]: match += 1 # find the biggest match, and the start position(k) in long sequence if match > max_match: max_match = match max_match_point = k # the strand of long sequence with the highest similarity with the short sequence ans = long_sequence[max_match_point:(max_match_point + j)] return ans
1865e7b60cfce3b1ca4e7884377a5a218ecba96a
4,963
import logging def create_sql_delete_stmt(del_list, name): """ :param del_list: list of records that need to be formatted in SQL delete statement. :param name: the name of the table :return: SQL statement for deleting the specific records """ sql_list = ", ".join(del_list) sql_stmt = f"DELETE FROM method_usage.pandas_{name} WHERE {name}_id IN ({sql_list})" logging.info(f"{len(del_list)} {name} in delete statement") return sql_stmt
aec744198f1b0dd30836f431ac51a4080911f8ae
4,964
def sanitize_email(email): """ Returns an e-mail address in lower-case and strip leading and trailing whitespaces. >>> sanitize_email(' [email protected] ') '[email protected]' """ return email.lower().strip()
b99e9c38db4fe889e1d0a9175d6535c4790f2f43
4,967
def greet(name): """Greet message, formatted differently for johnny.""" if name == "Johnny": return "Hello, my love!" return "Hello, {name}!".format(name=name)
86efdaccd65a870fd80e402491e9468669cdcd40
4,968
def _toIPv4AddrString(intIPv4AddrInteger): """Convert the IPv4 address integer to the IPv4 address string. :param int intIPv4AddrInteger: IPv4 address integer. :return: IPv4 address string. :rtype: str Example:: intIPv4AddrInteger Return --------------------------------- 3221225985 -> '192.0.2.1' Test: >>> _toIPv4AddrString(3221225985) '192.0.2.1' """ return ( str((intIPv4AddrInteger >> 24) & 0xFF) + '.' + str((intIPv4AddrInteger >> 16) & 0xFF) + '.' + str((intIPv4AddrInteger >> 8) & 0xFF) + '.' + str( intIPv4AddrInteger & 0xFF))
ac5f55146eedaf0b7caca19327ae0a88c9d5282a
4,970
import re def look_behind(s: str, end_idx: int) -> str: """ Given a string containing semi-colons, find the span of text after the last semi-colon. """ span = s[: (end_idx - 1)] semicolon_matches = [ (m.group(), m.start(), m.end()) for m in re.finditer(r"(?<=(;))", span) ] if len(semicolon_matches) == 0: start_idx = 0 else: start_idx = semicolon_matches[-1][2] return span[start_idx:end_idx].strip()
0cc478e73edd713fa72743f36e29001bb214e26c
4,971
import six import pytz def make_aware(dt, tz=None): """ Convert naive datetime object to tz-aware """ if tz: if isinstance(tz, six.string_types): tz = pytz.timezone(tz) else: tz = pytz.utc if dt.tzinfo: return dt.astimezone(dt.tzinfo) else: return tz.localize(dt)
b5003de5055c5d283f47e33dfdd6fbe57d6fce96
4,972
from itertools import product def listCombination(lists) -> list: """ 输入多个列表组成的列表,返回多列表中元素的所有可能组合 :param lists: 多个列表组成的列表 :return: 所有元素可能的组合 """ result = [] resultAppend = result.append for i in product(*lists): resultAppend(i) return result
6023cdc205b2780c5cd2cf56113d48a0675b98bf
4,973
from typing import List def sequence_to_ngram(sequence: str, N: int) -> List[str]: """ Chops a sequence into overlapping N-grams (substrings of length N) :param sequence: str Sequence to convert to N-garm :type sequence: str :param N: Length ofN-grams (int) :type N: int :return: List of n-grams :rtype: List[str] """ return [sequence[i : i + N] for i in range(len(sequence) - N + 1)]
8cbe97ee34c75ca3aad038236bd875ea0c3450cd
4,974
def _format_path(path): """Format path to data for which an error was found. :param path: Path as a list of keys/indexes used to get to a piece of data :type path: collections.deque[str|int] :returns: String representation of a given path :rtype: str """ path_with_brackets = ( ''.join('[{!r}]'.format(fragment) for fragment in path) ) return '{}'.format(path_with_brackets)
1809080453af154824e867cd8104cedbd616b937
4,975
import numpy def generateStructuredGridPoints(nx, ny, v0, v1, v2, v3): """ Generate structured grid points :param nx: number of x cells :param ny: number of y cells :param v0: south west corner :param v1: south east corner :param v2: north east corner :param v3: north west corner :returns array of size (nx, ny, 3) """ # parametric nx1 = nx + 1 ny1 = ny + 1 x = numpy.linspace(0., 1., nx1) y = numpy.linspace(0., 1., ny1) xx1, yy1 = numpy.meshgrid(x, y, indexing='ij') xx0 = 1.0 - xx1 yy0 = 1.0 - yy1 # structured points spts = numpy.zeros(list(xx0.shape) + [3], numpy.float64) for j in range(3): spts[..., j] = xx0*yy0*v0[j] + \ xx1*yy0*v1[j] + \ xx1*yy1*v2[j] + \ xx0*yy1*v3[j] return spts
0de9a3a3a47b26c3c3d56088c7ec55d241edeff3
4,977
def clear(keyword): """``clear`` property validation.""" return keyword in ('left', 'right', 'both', 'none')
c16cc980b9af82b4210e3c8c430cd65934596aa1
4,978
def _get_embl_key(line): """Return first part of a string as a embl key (ie 'AC M14399;' -> 'AC')""" # embl keys have a fixed size of 2 chars return line[:2]
b54f1a94f120f7ac63a0dd2a22bd47d5a5d5eeb9
4,979
import os def size_too_big(path): """Returns true is file is too large (5MB) """ five_mb = 5242880 return os.path.getsize(path) > five_mb
62e926247ab5439732ce61c8e59fe4a50366cba0
4,980
import os def local_tmp_dir(): """tmp directory for tests""" tmp_dir_path = "./tmp" if not os.path.isdir(tmp_dir_path): os.mkdir(tmp_dir_path) return tmp_dir_path
3f1f710ba18ec336982c69225c999a75fed9c481
4,981
def get_query_string_from_process_type_string(process_type_string: str) -> str: # pylint: disable=invalid-name """ Take the process type string of a Node and create the queryable type string. :param process_type_string: the process type string :type process_type_string: str :return: string that can be used to query for subclasses of the process type using 'LIKE <string>' :rtype: str """ if ':' in process_type_string: return f'{process_type_string}.' path = process_type_string.rsplit('.', 2)[0] return f'{path}.'
1380ad90a98da26237176890c52a75684e92964e
4,982
def label_vertices(ast, vi, vertices, var_v): """Label each node in the AST with a unique vertex id vi : vertex id counter vertices : list of all vertices (modified in place) """ def inner(ast): nonlocal vi if type(ast) != dict: if type(ast) == list: # print(vi) pass return ast ast["vertex_id"] = vi vertices.append(ast["tag"]) # if not (ast['tag'] in ['EVar', 'LvVar'] and ast['contents'] in var_v): vi += 1 for k, v in ast.items(): if k != "tag": inner(v) return ast return inner(ast)
1216c3ff1f5995e24f0f3a245fad5db820335f4d
4,983
def object_gatekeeper(obj, is_auth, ignore_standalone=False): """ It's OK to use available_to_public here because the underlying logic is identical. """ if not obj: return False if is_auth: return True else: try: return obj.available_to_public except: pass return False
66f0749788f462ba9a0dfee6edf890245aca15ba
4,984
def adaptsim(f, a, b, eps=1e-8, max_iter=10000): """自适应 Simpson 求积 P.S. 这个函数名来自 Gander, W. and W. Gautschi, “Adaptive Quadrature – Revisited,” BIT, Vol. 40, 2000, pp. 84-101. 该文档可以在 https://people.inf.ethz.ch/gander/ 找到。 但该函数的实现并没有使用此文中的递归方法。 Args: f: 要求积的函数 a, b: 求积区间 eps: 目标精度,达到则停止,返回积分值 max_iter: 最大迭代次数,超出这个次数迭代不到目标精度,则 raise 一个 Exception Returns: (I, m, p) I: 积分的近似值 m: 分层数 p: 分点 Raises: Exception: 无法在 max_iter 步内迭代到目标精度 """ p = [a, b] # 分点 p0 = p ep = [eps] m = 0 q = 0 I = 0 for _iter_times in range(int(max_iter)): n1 = len(ep) n = len(p0) if n <= 1: break h = p0[1] - p0[0] s0 = h / 6 * ( f(p0[0]) + 4 * f(p0[0] + h/2) + f(p0[0] + h ) ) s1 = h / 12 * ( f(p0[0]) + 4 * f(p0[0] + h/4) + f(p0[0] + h/2) ) s2 = h / 12 * ( f(p0[0] + h/2) + 4 * f(p0[0] + 3*h/4) + f(p0[0] + h) ) if abs(s0 - s1 - s2) <= 15 * ep[0]: I += s1 + s2 p0 = p0[1:] if n1 >= 2: ep = ep[1:] q += 1 else: m += 1 p0 = [p0[0], p0[0] + h/2] + p0[1:] if n1 == 1: ep = [ep[0]/2, ep[0]/2] else: ep = [ep[0]/2, ep[1]/2] + ep[1:] if q == 0: p = p0 else: p = p[:q] + p0 else: raise Exception('无法在 max_iter 步内迭代到目标精度') return I, m, p
b24ed3c2493b8ece19a69cf781a75e7a9e0f9cd0
4,985
import numpy as np def smoothedEnsembles(data,lat_bounds,lon_bounds): """ Smoothes all ensembles by taking subsamples """ ### Import modules print('\n------- Beginning of smoothing the ensembles per model -------') ### Save MM newmodels = data.copy() mmean = newmodels[-1,:,:,:,:] # 7 for MMmean otherens = newmodels[:7,:,:,:,:] newmodeltest = np.empty(otherens.shape) for modi in range(otherens.shape[0]): for sh in range(otherens.shape[1]): ensnum = np.arange(otherens.shape[1]) slices = np.random.choice(ensnum,size=otherens.shape[0],replace=False) modelsmooth = otherens[modi] slicenewmodel = np.nanmean(modelsmooth[slices,:,:,:],axis=0) newmodeltest[modi,sh,:,:,:] = slicenewmodel ### Add new class smoothClass = np.append(newmodeltest,mmean[np.newaxis,:,:,:],axis=0) print('--Size of smooth twin --->',newmodeltest.shape) print('--NEW Size of smoothedclass--->',smoothClass.shape) print('------- Ending of smoothing the ensembles per model -------') return smoothClass
ed8fe2bc3d4e77384179d6a1a1406ca9446dc973
4,986
import socket def ip2host(ls_input): """ Parameters : list of a ip addreses ---------- Returns : list of tuples, n=2, consisting of the ip and hostname """ ls_output = [] for ip in ls_input: try: x = socket.gethostbyaddr(ip) ls_output.append((ip, x[0])) except Exception as e: print('Error: ', e) ls_output.append((ip, None)) return ls_output
234b42bf0406ae5fb67d2c1caba9f7f3a1e92a0c
4,987
import itertools def merge(cluster_sentences): """ Merge multiple lists. """ cluster_sentences = list(itertools.chain(*cluster_sentences)) return cluster_sentences
ec5c9bf7a89bf0d047050d3684876ed481617706
4,988
import re def filter_output(output, regex): """Filter output by defined regex. Output can be either string, list or tuple. Every string is split into list line by line. After that regex is applied to filter only matching lines, which are returned back. :returns: list of matching records """ result = [] if isinstance(output, str): for line in output.splitlines(): result += re.findall(regex, line) elif isinstance(output, (list, tuple)): for item in output: result.append(filter_output(item, regex)) else: raise RuntimeError('Only strings and lists are supported by filter_output(), ' 'but output has type {}'.format(type(output))) return result
d9760a644bb83aee513391966522946a6514ab72
4,990
def increment_with_offset(c: str, increment: int, offset: int) -> str: """ Caesar shift cipher. """ return chr(((ord(c) - offset + increment) % 26) + offset)
50b10b6d3aff3dff157dfc46c368ae251ed060bb
4,991
import numpy def phi_analytic(dist, t, t_0, k, phi_1, phi_2): """ the analytic solution to the Gaussian diffusion problem """ phi = (phi_2 - phi_1)*(t_0/(t + t_0)) * \ numpy.exp(-0.25*dist**2/(k*(t + t_0))) + phi_1 return phi
49fac597afa876f81ba5774bf82fedcfb88f6c7f
4,993
import subprocess def get_changed_files(base_commit: str, head_commit: str, subdir: str = '.'): """ Get the files changed by the given range of commits. """ cmd = ['git', 'diff', '--name-only', base_commit, head_commit, '--', subdir] files = subprocess.check_output(cmd) return files.decode('UTF-8').split('\n')
ebc0a117f2f11d585475f4781e67331e3ca9a06a
4,994
def denormalize_ged(g1, g2, nged): """ Converts normalized ged into ged. """ return round(nged * (g1.num_nodes + g2.num_nodes) / 2)
214813120d552ef5ece10349978238117fe26cf3
4,995
import copy def ImproveData_v2 (Lidar_DataOld,Lidar_Data,Data_Safe,Speed,orientation,orientationm1): """ The function calculates new positions for obstacles now taking into account the car's relative speed in relation to each point. We need the accelerometer for that. Return: Advanced_Data : [step_i, distance_i,x_i,y_i, Xsafe_i or Xvel_i, Ysafe_i or Yvel_i] """ """ Filtering the data within the alpha range. Data_Safe -(90,270) DataSafeFiltered (-alpha,+alpha) """ temp=[] i=0 #Updating the data set with the new positions calculated according to the relative speed of the car and the objects Advanced_Data=copy.deepcopy(Data_Safe) while i<len(temp): j=0 while j<len(Data_Safe): if temp[i][0]==Advanced_Data[j][0]: Advanced_Data[j][4]=temp[i][6] Advanced_Data[j][5]=temp[i][7] j+=1 i+=1 return(Advanced_Data)
2bd6c0f167e65ad4a461d75a95539b68dc0b1a70
4,996
def convert_to_signed_int_32_bit(hex_str): """ Utility function to convert a hex string into a 32 bit signed hex integer value :param hex_str: hex String :return: signed 32 bit integer """ val = int(hex_str, 16) if val > 0x7FFFFFFF: val = ((val+0x80000000) & 0xFFFFFFFF) - 0x80000000 return val
f8d39b20475c30f162948167f8534e367d9c58e8
4,998
def parent_node(max_child_node, max_parent_node): """ Parents child node into parent node hierarchy :param max_child_node: MaxPlus.INode :param max_parent_node: MaxPlus.INode """ max_child_node.SetParent(max_parent_node) return max_child_node
1a54d4c485e61361633165da0f05c8f871296ae6
4,999
import getpass import re import logging import sys def get_user(): """从终端获取用户输入的QQ号及密码""" username = input('please input QQ number: ').strip() if not re.match(r'^[1-9][0-9]{4,9}$', username): logging.error('\033[31mQQ number is wrong!\033[0m') sys.exit(1) password = getpass.getpass('password: ') return { 'username': username, 'password': password }
766e8332ea0bed1b793ba80cbf42a43bd54fb800
5,000
def check_if_bst(root, min, max): """Given a binary tree, check if it follows binary search tree property To start off, run `check_if_bst(BT.root, -math.inf, math.inf)`""" if root == None: return True if root.key < min or root.key >= max: return False return check_if_bst(root.left, min, root.key) and check_if_bst( root.right, root.key, max )
1bb4b601ef548aec9a4ab2cf5242bc5875c587a2
5,001
def aggregate(collection, pipeline): """Executes an aggregation on a collection. Args: collection: a `pymongo.collection.Collection` or `motor.motor_tornado.MotorCollection` pipeline: a MongoDB aggregation pipeline Returns: a `pymongo.command_cursor.CommandCursor` or `motor.motor_tornado.MotorCommandCursor` """ return collection.aggregate(pipeline, allowDiskUse=True)
03ea889ea23fb81c6a329ee270df2ac253e90d69
5,002
def _format_port(port): """ compute the right port type str Arguments ------- port: input/output port object Returns ------- list a list of ports with name and type """ all_ports = [] for key in port: one_port = {} one_port['name'] = key port_type = port[key]['type'] if isinstance(port_type, list): types = [] for t in port_type: type_name = t.__module__+'.'+t.__name__ types.append(type_name) one_port['type'] = types else: type_name = port_type.__module__+'.'+port_type.__name__ one_port['type'] = [type_name] all_ports.append(one_port) return all_ports
2fa65686b6b764afc97a200a02baec65645c9879
5,004
import time def pretty_date(d): """ returns a html formatted pretty date """ special_suffixs = {1 : "st", 2 : "nd" , 3 : "rd", 21 : "st", 22 : "nd", 23 : "rd", 31 : "st"} suffix = "th" if d.tm_mday in special_suffixs: suffix = special_suffixs[d.tm_mday] suffix = "<sup>" + suffix + "</sup>" day = time.strftime("%A", d) month = time.strftime("%B", d) return day + " the " + str(d.tm_mday) + suffix + " of " + month + ", " + str(d.tm_year)
7d6675f115021ddd46b2a614e831c9fae8faf7ad
5,005
def _floor(n, base=1): """Floor `n` to a multiple of `base`""" return n // base * base
49019e4aa925b4f77a7f13f9919d36948bd132cc
5,007
import torch def n_step_returns(q_values, rewards, kls, discount=0.99): """ Calculates all n-step returns. Args: q_values (torch.Tensor): the Q-value estimates at each time step [time_steps+1, batch_size, 1] rewards (torch.Tensor): the rewards at each time step [time_steps, batch_size, 1] kls (torch.Tensor): the scaled kl divergences at each time step [time_steps, batch_size, 1] discount (float): the temporal discount factor """ discounts = torch.cat([(discount*torch.ones_like(q_values[:1]))**i for i in range(rewards.shape[0])], 0) rewards[1:] = rewards[1:] - kls[:-1] discounted_returns = torch.cumsum(discounts * rewards, dim=0) terminal_values = discount * discounts * (q_values[1:] - kls) # return torch.cat([q_values[:1], discounted_returns], dim=0) return torch.cat([q_values[:1], discounted_returns + terminal_values], dim=0)
3bbd6026046328dc8ef63ab3e871f6c47636cb80
5,010
def function_3(): """This is a Function prototype in Python""" print("Printing Docs String") return 0
4268904e75772b9fef804931e3a3564fda333bc7
5,011
def get_character_bullet(index: int) -> str: """Takes an index and converts it to a string containing a-z, ie. 0 -> 'a' 1 -> 'b' . . . 27 -> 'aa' 28 -> 'ab' """ result = chr(ord('a') + index % 26) # Should be 0-25 if index > 25: current = index // 26 while current > 0: result = chr(ord('a') + (current - 1) % 25) + result current = current // 26 return result
357f68feb302f11a996b5446c642ad9ca1f0f8d3
5,012
import sys def _system_path_separator(): """ System dependent character for element separation in PATH variable :rtype: str """ if sys.platform == 'win32': return ';' else: return ':'
b89a77d5b444a1b806a75e9f024f2084e3cfc93f
5,014
import argparse def init_argparse(): """Parses the required arguments file name and source database type and returns a parser object""" parser = argparse.ArgumentParser( usage="%(prog)s --filename 'test.dtsx' --source 'postgres'", description="Creates a configuration file in the output directory based on the SQLs in the dtsx file." ) parser.add_argument( "-f", "--filename", action='store', help='Input DTSX file name', required=True ) parser.add_argument( "-s", "--source", action="store", help='Type of the source database (sqlserver, postgres)', required=True ) return parser
1c57c6712819d147ef7917d1100c20353339f7b4
5,015
import os import subprocess import logging def join(kmerfile, codonfile, minhashfile, dtemp): """Externally join with built-in GNU Coreutils in the order label, kmers, codons ,minhash Args: kmerfile (str): Kmer csv file codonfile (str): Codon csv file minhashfile (str): Minhash csv file dtemp (str): the path to a temporary directory Returns: (str) the path of the merged file created References: GNU Coreutils: https://www.gnu.org/software/coreutils/coreutils.html """ kcfile= os.path.join(dtemp, "kcfile.csv") mergefile = os.path.join(dtemp, "mergefile.csv") try: with open(kcfile, 'w') as kcf: options = ['join', '-t', ',', '-1', '1', '-2', '1', kmerfile, codonfile] subprocess.run(options, check=True, stdout=kcf) with open(mergefile, "w") as mf: options2 = ['join', '-t', ',', '-1', '1', '-2', '1', kcfile, minhashfile] subprocess.run(options2, check=True, stdout=mf) os.remove(kcfile) return mergefile except RuntimeError: logging.exception("Could not merge csv files using unix join command")
d3a373573d87a0312ecb8291bb0c81479f6402b6
5,017
def getNamespace(modelName): """Get the name space from rig root Args: modelName (str): Rig top node name Returns: str: Namespace """ if not modelName: return "" if len(modelName.split(":")) >= 2: nameSpace = ":".join(modelName.split(":")[:-1]) else: nameSpace = "" return nameSpace
abfb4c54f2dd1b54563f6c7c84e902ed4ee77b01
5,018
import os def get_output(db, output_id): """ :param db: a :class:`openquake.server.dbapi.Db` instance :param output_id: ID of an Output object :returns: (ds_key, calc_id, dirname) """ out = db('SELECT output.*, ds_calc_dir FROM output, job ' 'WHERE oq_job_id=job.id AND output.id=?x', output_id, one=True) return out.ds_key, out.oq_job_id, os.path.dirname(out.ds_calc_dir)
8ad5cd6b5ca0808038ee29345b8d3e53e80fb9de
5,019
def normalize(output): """将null或者empty转换为暂无输出""" if not output: return '暂无' else: return output
18af58c74325522a64dcfd98a75f55e677c01ca3
5,020
def make_matrix(num_rows, num_cols, entry_fn): """retorna a matriz num_rows X num_cols cuja entrada (i,j)th é entry_fn(i, j)""" return [[entry_fn(i, j) # dado i, cria uma lista for j in range(num_cols)] # [entry_fn(i, 0), ... ] for i in range(num_rows)]
f706773245730eab3ce6cf41b0f6e81fbe3d52ab
5,021
def proj(A, B): """Returns the projection of A onto the hyper-plane defined by B""" return A - (A * B).sum() * B / (B ** 2).sum()
982cdfb1564166dce14432bf24404f066e2acee3
5,023
def v6_multimax(iterable): """Return a list of all maximum values. Bonus 2: Make the function works with lazy iterables. Our current solutions fail this requirement because they loop through our iterable twice and generators can only be looped over one time only. We could keep track of the maximum values as we loop and manually build up a list of maximums """ maximums = [] for item in iterable: if not maximums or maximums[0] == item: maximums.append(item) elif item > maximums[0]: maximums = [item] return maximums
5539adb0dcb6c9db4f8f2f68487fc13c6aa8d067
5,024
import traceback def format_traceback_string(exception): """Format exception traceback as a single string. Args: exception: Exception object. Returns: Full exception traceback as a string. """ return '\n'.join( traceback.TracebackException.from_exception(exception).format() )
debdf53966b26b6562671bf48d283a3bf10d85d5
5,025
def is_batch_norm(layer): """ Return True if `layer` is a batch normalisation layer """ classname = layer.__class__.__name__ return classname.find('BatchNorm') != -1
6494b75a3fbfbfd55ff43b05536a1094290ea915
5,026
import torch def predictive_entropy(y_input, y_target): """ Computes the entropy of predictions by the model :param y_input: Tensor [N, samples, class] :param y_target: Tensor [N] Not used here. :return: mean entropy over all examples """ y_input = torch.exp(y_input) # model output is log_softmax so we exponentiate y_posterior = torch.mean(y_input, dim=1) # Average over all the samples to marginalize over epsilon # y_input is now [N, class] # We want the entropy of y_input epsilon = 1e-25 y_posterior += epsilon # We add a small constant to each term to avoid infinities entropy = - torch.mean(y_posterior * torch.log(y_posterior), dim=1) # [N] entropy on each example return torch.mean(entropy).cpu().numpy()
6c3c4c3cfc93d0c19e2662b54a9b6d41146264d5
5,027
async def find_user_by_cards(app, cards, fields=["username"]): """Find a user by a list of cards assigned to them. Parameters ---------- app : aiohttp.web.Application The aiohttp application instance cards : list The list of cards to search for fields : list, default=["username"] The fields to be returned in the user document Returns ------- user : dict The user document """ if not isinstance(cards, list): cards = [cards] projection = {} for field in fields: projection[field] = 1 if "_id" not in fields: projection["_id"] = 0 return await app["db"].users.find_one({"cards": cards}, projection)
ef5b20ea668b39eda51c859a3b33f1af30a644f5
5,030
def make_keyword_html(keywords): """This function makes a section of HTML code for a list of keywords. Args: keywords: A list of strings where each string is a keyword. Returns: A string containing HTML code for displaying keywords, for example: '<strong>Ausgangsw&ouml;rter:</strong> Nature, Plants, Fauna' """ res_html = '<strong>Ausgangsw&ouml;rter:</strong> ' for word in keywords[:-1]: res_html += word + ', ' res_html += keywords[-1] return res_html
71e35245ad7b2fe2c67f6a4c27d53374945089bd
5,031
def __hit(secret_number, choice): """Check if the choice is equal to secret number""" return secret_number == choice
55bee8370a2480b5ca84cd5f478fd8eb367276bd
5,035