content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def new_channel(): """Instantiates a dict containing a template for an empty single-point channel. """ return { "channel_name": "myChannel", "after_last": "Goto first point", "alternate_direction": False, "equation": "x", "final_value": 0.0, "optimizer_config": { "Enabled": False, "Initial step size": 1.0, "Max value": 1.0, "Min value": 0.0, "Precision": 0.001, "Start value": 0.5 }, "relation_parameters": [ { "channel_name": "Step values", "lookup": None, "use_lookup": False, "variable": "x" } ], "show_advanced": False, "step_items": [ { "center": 0.0, "interp": "Linear", "n_pts": 1, "range_type": "Single", "single": 1.0, "span": 0.0, "start": 1.0, "step": 0.0, "step_type": "Fixed # of pts", "stop": 1.0, "sweep_rate": 0.0 } ], "step_unit": "Instrument", "sweep_mode": "Off", "sweep_rate_outside": 0.0, "use_outside_sweep_rate": False, "use_relations": False, "wait_after": 0.0 }
af05dfda58a0e14f7448f59b057546728dbbeba7
4,638
import sys def is_reload(module_name: str) -> bool: """True if the module given by `module_name` should reload the modules it imports. This is the case if `enable_reload()` was called for the module before. """ mod = sys.modules[module_name] return hasattr(mod, module_name.replace('.', '_') + "_DO_RELOAD_MODULE")
76e169d6e55203c921dc09cc4c9530c1cf104516
4,640
def _is_correct_task(task: str, db: dict) -> bool: """ Check if the current data set is compatible with the specified task. Parameters ---------- task Regression or classification db OpenML data set dictionary Returns ------- bool True if the task and the data set are compatible """ if task == "classification": return db['NumberOfSymbolicFeatures'] == 1 and db['NumberOfClasses'] > 0 elif task == "regression": return True else: return False
49790d8e2b7a16ee9b3ca9c8bc6054fde28b3b6f
4,641
import re def is_valid_semver(version: str) -> bool: """return True if a value is a valid semantic version """ match = re.match(r'^[0-9]+\.[0-9]+\.[0-9]+(-([0-9a-z]+(\.[0-9a-z]+)*))?$', version) return match is not None
811a29a497515d23169916b9d9450fed6364c966
4,642
def set_nested_dict_value(input_dict, key, val): """Uses '.' or '->'-splittable string as key and returns modified dict.""" if not isinstance(input_dict, dict): # dangerous, just replace with dict input_dict = {} key = key.replace("->", ".") # make sure no -> left split_key = key.split('.', 1) if len(split_key) == 2: key_prefix, key_suffix = split_key[0], split_key[1] if key_prefix not in input_dict: input_dict[key_prefix] = {} input_dict[key_prefix] = set_nested_dict_value( input_dict[key_prefix], key_suffix, val) else: # not enough values to unpack input_dict[key] = val return input_dict
2f2a160348b0c5d5fac955a8c6cec6c0ec0d5f0d
4,643
def deserialize_model_fixture(): """ Returns a deserialized version of an instance of the Model class. This simulates the idea that a model instance would be serialized and loaded from disk. """ class Model: def predict(self, values): return [1] return Model()
946e0cc67e4cb14da9b08e6790d336126bb9e43a
4,644
def parse_faq_entries(entries): """ Iterate through the condensed FAQ entries to expand all of the keywords and answers """ parsed_entries = {} for entry in entries: for keyword in entry["keywords"]: if keyword not in parsed_entries: parsed_entries[keyword] = entry["answer"] else: print("Error: Found duplicate keyword '{}' in pre-configured FAQ entries.".format(keyword)) exit(1) return parsed_entries
5258802d9384502f8a00692080cc9ae6ae7e9591
4,647
import os def create_folder(): """Creates a temp_folder on the users desktop""" new_folder_path = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop\\temp_folder') try: if not os.path.exists(new_folder_path): os.makedirs(new_folder_path) except OSError: print("Error: Creating directory: " + new_folder_path) return new_folder_path
6e241891b415649902d522dc336d4f75970284c3
4,648
from typing import OrderedDict def get_od_base( mode = "H+S & B3LYP+TPSS0"): # od is OrderedDict() """ initial parameters are prepared. mode = "H+S & B3LYP+TPSS0" --> ["B3LYP", "TPSS0"] with speration of H and S "H+S & B3LYP" --> ["B3LYP"] with speration of H and S "H+S & TPSSO" --> ["TPSS0"] with speration of H and S """ if mode == "H+S&B3LYP+TPSS0": od = OrderedDict() od['QC Models (Family ID)'] = [["B3LYP", "TPSS0"]] od['H + S'] = [True] od['CV Mode'] = ['10*5KF/LOO'] od['Em type'] = ['Chemical potential'] od['Regularization'] = ['None'] od['Bounds/Constraints'] = ['None'] aod = OrderedDict() aod['in_file'] = "sheet/EmBT-xM4.csv" aod['out_file'] = "sheet/out_" + mode + ".csv" else: raise ValueError("Not supported: {}".format( mode)) return od, aod
6a7aa100d8d244d9a0606a08188153e95a0df44b
4,650
def get_only_filename(file_list): """ Get filename from file's path and return list that has only filename. Input: file_list: List. file's paths list. Attribute: file_name: String. "01.jpg" file_name_without_ext: String. "01" Return: filename_list: Only filename list. """ filename_list = list() for file_path in file_list: file_name = file_path.split("/")[-1] file_name_without_ext = file_name.split(".")[0] filename_list.append(file_name_without_ext) return filename_list
3b9b202a4320825eba9d32170f527c0de6e1bdc6
4,652
def stat_helper(path): """os.path.exists will return None for PermissionError (or any other exception) , leading us to believe a file is not present when it, in fact, is. This is behavior is awful, so stat_helper preserves any exception other than FileNotFoundError. """ try: return path.stat() except FileNotFoundError: return None
32e0863489ca19b55203d31b141d837189655cc2
4,653
def find_last_layer(model): """ Find last layer. Args: model (_type_): Model. Returns: _type_: Last layer. """ for layer in reversed(model.layers): return layer
ff82705e4a74d7ad15b3d0e3e030c340b49052ca
4,655
def seconds_to_time(sec): """ Convert seconds into time H:M:S """ return "%02d:%02d" % divmod(sec, 60)
5fe639a9a6ade59258dfb2b3df8426c7e79d19fa
4,656
import os def checkFile(path: str): """ Checks if a file exists, exists program if not readable Only used if a file needs to exist """ if not os.path.exists(path): print('File: "' + path + '", is not readable.') exit(0) return path
9ca57e541ecf9579dc6e24ab05f48ea46ec029e9
4,657
def schedule_prettify(schedule): """ Принимает на вход расписание в формате: [День недели, Время, Тип занятия, Наименование занятия, Имя преподавателя, Место проведения] Например: ['Чт', '13:00 – 14:30', 'ПЗ', 'Физическая культура', '', 'Кафедра'] """ if not schedule: return 'Сегодня занятий нету' else: bot_message = '' time = '⌚ ' + schedule[1] + '\n' if schedule[2]: schedule_type = schedule[2] else: schedule_type = '' if schedule[3]: subject = '📝 ' + schedule[-3] + '\n' else: subject = '📝 ' + schedule_type + '\n' if schedule[4]: teacher = '👤 ' + schedule[4] + '\n' else: teacher = '' if schedule[5]: location = '📍 ' + schedule[5] + '\n' else: location = '' bot_message += teacher + subject + time + location + '\n' return bot_message
868469b99bb68ec407f6861e12d063bcd6b56236
4,658
import json def serializer(message): """serializes the message as JSON""" return json.dumps(message).encode('utf-8')
7e8d9ae8e31653aad594a81e9f45170a915e291d
4,660
def correct_name(name): """ Ensures that the name of object used to create paths in file system do not contain characters that would be handled erroneously (e.g. \ or / that normally separate file directories). Parameters ---------- name : str Name of object (course, file, folder, etc.) to correct Returns ------- corrected_name Corrected name """ corrected_name = name.replace(" ", "_") corrected_name = corrected_name.replace("\\", "_") corrected_name = corrected_name.replace("/", "_") corrected_name = corrected_name.replace(":", "_") return corrected_name
b1df7a503324009a15f4f08e7641722d15a826b7
4,661
import re def check_exact_match(line, expected_line): """ Uses regular expressions to find an exact (not partial) match for 'expected_line' in 'line', i.e. in the example below it matches 'foo' and succeeds: line value: '66118.999958 - INFO - [MainThread] - ly_test_tools.o3de.asset_processor - foo' expected_line: 'foo' :param line: The log line string to search, i.e. '9189.9998188 - INFO - [MainThread] - example.tests.test_system_example - Log Monitoring test 1' :param expected_line: The exact string to match when searching the line param, i.e. 'Log Monitoring test 1' :return: An exact match for the string if one is found, None otherwise. """ # Look for either start of line or whitespace, then the expected_line, then either end of the line or whitespace. # This way we don't partial match inside of a string. So for example, 'foo' matches 'foo bar' but not 'foobar' regex_pattern = re.compile("(^|\\s){}($|\\s)".format(re.escape(expected_line)), re.UNICODE) if regex_pattern.search(line) is not None: return expected_line return None
d01eaa13c40d66999e870d3b287ac869f64ae314
4,663
def rounding_filters(filters, w_multiplier): """ Calculate and round number of filters based on width multiplier. """ if not w_multiplier: return filters divisor = 8 filters *= w_multiplier new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor) if new_filters < 0.9 * filters: # prevent rounding by more than 10% new_filters += divisor return int(new_filters)
eb2938732792564fd324602fd74be41e6f88b265
4,664
from typing import List def get_gate_names_2qubit() -> List[str]: """Return the list of valid gate names of 2-qubit gates.""" names = [] names.append("cx") names.append("cz") names.append("swap") names.append("zx90") names.append("zz90") return names
d3d7f20263805a186d9142ec087039eb53076346
4,665
def compute_log_zT_var(log_rho_var, log_seebeck_sqr_var, log_kappa_var): """Compute the variance of the logarithmic thermoelectric figure of merit zT. """ return log_rho_var + log_seebeck_sqr_var + log_kappa_var
3528181796aeafb3df5eac09b06852afe028cb13
4,666
import colorsys import hashlib def uniqueColor(string): """ Returns a color from the string. Same strings will return same colors, different strings will return different colors ('randomly' different) Internal: string =md5(x)=> hex =x/maxhex=> float [0-1] =hsv_to_rgb(x,1,1)=> rgb =rgb_to_int=> int :param string: input string :return: int color """ return sum(round(c * 255) << d for c, d in zip(colorsys.hsv_to_rgb(int(hashlib.md5(string.encode('utf-8')).hexdigest(), 16) / 2 ** 128, 1, 1), [16, 8, 0]))
0c895612c3bf2dd5f594a15daf6f2aa5d778eeb0
4,668
def get_instance_types(self): """ Documentation: --- Description: Generate SSH pub """ instance_types = sorted([instance_type["InstanceType"] for instance_type in self.ec2_client.describe_instance_types()["InstanceTypes"]]) return instance_types
583311de8b2f23a967e40c8be5d140f6ab28244c
4,669
def _sizeof_fmt(num): """Format byte size to human-readable format. https://web.archive.org/web/20111010015624/http://blogmag.net/blog/read/38/Print_human_readable_file_size Args: num (float): Number of bytes """ for x in ["bytes", "KB", "MB", "GB", "TB", "PB"]: if num < 1024.0: return f"{num:3.1f} {x}" num /= 1024.0
97c700954248a455592b3da9b274bfda69a7370f
4,670
def kappa(a, b, c, d): """ GO term 2 | yes | no | ------------------------------- GO | yes | a | b | term1 | no | c | d | kapa(GO_1, GO_2) = 1 - (1 - po) / (1 - pe) po = (a + d) / (a + b + c + d) marginal_a = ( (a + b) * ( a + c )) / (a + b + c + d) marginal_b = ( (c + d) * ( b + d )) / (a + b + c + d) pe = (marginal_a + marginal_b) / (a + b + c + d) """ a = float(len(a)) b = float(len(b)) c = float(len(c)) d = float(len(d)) po = (a + d) / (a + b + c + d) marginal_a = ( (a + b) * ( a + c )) / (a + b + c + d) marginal_b = ( (c + d) * ( b + d )) / (a + b + c + d) pe = (marginal_a + marginal_b) / (a + b + c + d) #print (f" {a} | {b}\n {c} | {d}") return 1 - (1 - po) / (1 - pe)
5884a6745f6a93b044eabb1bfe38834cb59366d4
4,671
def zfsr32(val, n): """zero fill shift right for 32 bit integers""" return (val >> n) if val >= 0 else ((val + 4294967296) >> n)
4b890caa0b7b086e923e7b229e5551fd66d24016
4,672
from typing import Union from pathlib import Path def is_dir(path: Union[str, Path]) -> bool: """Check if the given path is a directory :param path: path to be checked """ if isinstance(path, str): path = Path(path) if path.exists(): return path.is_dir() else: return str(path).endswith("/")
540cce7f5c6a25186427ba71b94aa090c2ab90a7
4,673
import re def document_to_vector(lemmatized_document, uniques): """ Converts a lemmatized document to a bow vector representation. 1/0 for word exists/doesn't exist """ #print(uniques) # tokenize words = re.findall(r'\w+', lemmatized_document.lower()) # vector = {} vector = [0]*len(uniques) # list of the words is accessible via vector.keys() # list of 0/1 is accessible via vector.values() # seen = [] for i in range(len(uniques)): for j in range(len(words)): if uniques[i] == words[j]: vector[i] = 1 continue return vector
e4b108b8e99a827788d7eff5d4eabf71021d6e21
4,674
import argparse def parse_args(): """Parse cli arguments.""" parser = argparse.ArgumentParser(description="Java project package name changer.") parser.add_argument("--directory", default=".", type=str, help="Working directory.") parser.add_argument( "--current", required=True, type=str, help='Current package name. For example: "com.example".', ) parser.add_argument( "--target", required=True, type=str, help='Target package name. For example: "org.another".', ) parser.add_argument( "--protected_dirs", default=[], type=str, nargs="+", help="List of protected from any changes directories", ) parser.add_argument( "--protected_files", default=[], type=str, nargs="+", help="List of protected from any changes files", ) return parser.parse_args()
ceddd5028cd7aea1625e31f4c08bb22d112ea71a
4,675
import math def dispos(dra0, decd0, dra, decd): """ Source/credit: Skycat dispos computes distance and position angle solving a spherical triangle (no approximations) INPUT :coords in decimal degrees OUTPUT :dist in arcmin, returns phi in degrees (East of North) AUTHOR :a.p.martinez Parameters: dra0: center RA decd0: center DEC dra: point RA decd: point DEC Returns: distance in arcmin """ radian = 180.0/math.pi # coord transformed in radians alf = dra / radian alf0 = dra0 / radian del_ = decd / radian del0 = decd0 / radian sd0 = math.sin(del0) sd = math.sin(del_) cd0 = math.cos(del0) cd = math.cos(del_) cosda = math.cos(alf - alf0) cosd = sd0*sd + cd0*cd*cosda dist = math.acos(cosd) phi = 0.0 if dist > 0.0000004: sind = math.sin(dist) cospa = (sd*cd0 - cd*sd0*cosda)/sind #if cospa > 1.0: # cospa=1.0 if math.fabs(cospa) > 1.0: # 2005-06-02: fix from [email protected] cospa = cospa/math.fabs(cospa) sinpa = cd*math.sin(alf-alf0)/sind phi = math.acos(cospa)*radian if sinpa < 0.0: phi = 360.0-phi dist *= radian dist *= 60.0 if decd0 == 90.0: phi = 180.0 if decd0 == -90.0: phi = 0.0 return (phi, dist)
5c1b7c79a82f59764fd43ba0d89a763955b09a04
4,676
def cuda_reshape(a, shape): """ Reshape a GPUArray. Parameters: a (gpu): GPUArray. shape (tuple): Dimension of new reshaped GPUArray. Returns: gpu: Reshaped GPUArray. Examples: >>> a = cuda_reshape(cuda_give([[1, 2], [3, 4]]), (4, 1)) array([[ 1.], [ 2.], [ 3.], [ 4.]]) >>> type(a) <class 'pycuda.gpuarray.GPUArray'> """ return a.reshape(shape)
966cae8aeb88aeaeada28a11c284920746771f00
4,677
def update_cv_validation_info(test_validation_info, iteration_validation_info): """ Updates a dictionary with given values """ test_validation_info = test_validation_info or {} for metric in iteration_validation_info: test_validation_info.setdefault(metric, []).append(iteration_validation_info[metric]) return test_validation_info
b2509026e968b1c428836c5313e9c5e824663d4f
4,680
def finish_work(state): """Move all running nodes to done""" state.progress.done = state.progress.done | state.progress.running state.progress.running = set() return state
649e14091737ef36db83b70591b09ee668a416f1
4,681
def sent2labels(sent): """ Extracts gold labels for each sentence. Input: sentence list Output: list with labels list for each token in the sentence """ # gold labels at index 18 return [word[18] for word in sent]
11b4dc93c465d154e8bf8688a5b5c592b94e7265
4,682
def get_course_id_from_capa_module(capa_module): """ Extract a stringified course run key from a CAPA module (aka ProblemBlock). This is a bit of a hack. Its intended use is to allow us to pass the course id (if available) to `safe_exec`, enabling course-run-specific resource limits in the safe execution environment (codejail). Arguments: capa_module (ProblemBlock|None) Returns: str|None The stringified course run key of the module. If not available, fall back to None. """ if not capa_module: return None try: return str(capa_module.scope_ids.usage_id.course_key) except (AttributeError, TypeError): # AttributeError: # If the capa module lacks scope ids or has unexpected scope ids, we # would rather fall back to `None` than let an AttributeError be raised # here. # TypeError: # Old Mongo usage keys lack a 'run' specifier, and may # raise a type error when we try to serialize them into a course # run key. This is tolerable because such course runs are deprecated. return None
dd76b1d6df12f6c7db0d095bb9a48940a850e5c7
4,683
def parse_cypher_file(path: str): """Returns a list of cypher queries in a file. Comments (starting with "//") will be filtered out and queries needs to be seperated by a semilicon Arguments: path {str} -- Path to the cypher file Returns: [str] -- List of queries """ def chop_comment(line): # this function removes inline comments comment_starter = "//" possible_quotes = ["'", '"'] # a little state machine with two state varaibles: in_quote = False # whether we are in a quoted string right now quoting_char = None backslash_escape = False # true if we just saw a backslash comment_init = "" for i, ch in enumerate(line): if not in_quote: if ch == comment_starter[len(comment_init)]: comment_init += ch else: # reset comment starter detection comment_init = "" if comment_starter == comment_init: # a comment started, just return the non comment part of the line comment_init = "" return line[: i - (len(comment_starter) - 1)] if ch in possible_quotes: # quote is starting comment_init = "" quoting_char = ch in_quote = True else: if ch in quoting_char: # quotes is ending in_quote = False quoting_char = None return line queries = [] with open(path) as f: query = "" for line in f: line = chop_comment(line) line = line.rstrip() if line == "": # empty line continue if not line.endswith("\n"): query += "\n" query += line if line.endswith(";"): query = query.strip(";") queries.append(query) query = "" return queries
00c4d357dee9e77160875fd8abb6c2b23d60a091
4,684
def del_method(): """del: Cleanup an item on destroy.""" # use __del__ with caution # it is difficult to know when the object will be actually removed context = "" class _Destroyable: def __del__(self): nonlocal context context = "burn the lyrics" item = _Destroyable() del item return context
727e9cee3048ee0c43111dc193746e44f55f5881
4,685
from typing import Required from typing import Optional def _precedence(match): """ in a dict spec, target-keys may match many spec-keys (e.g. 1 will match int, M > 0, and 1); therefore we need a precedence for which order to try keys in; higher = later """ if type(match) in (Required, Optional): match = match.key if type(match) in (tuple, frozenset): if not match: return 0 return max([_precedence(item) for item in match]) if isinstance(match, type): return 2 if hasattr(match, "glomit"): return 1 return 0
f1b45378aa22720edd4fbd29ce64ef4941587c19
4,686
def unquote_to_bytes(string): """unquote_to_bytes('abc%20def') -> b'abc def'.""" # Note: strings are encoded as UTF-8. This is only an issue if it contains # unescaped non-ASCII characters, which URIs should not. if isinstance(string, str): string = string.encode('utf-8') res = string.split(b'%') res[0] = res[0] for i in range(1, len(res)): item = res[i] try: res[i] = bytes([int(item[:2], 16)]) + item[2:] except ValueError: res[i] = b'%' + item return b''.join(res)
7148b06e10fc92864d9875c785a6397a0a4950aa
4,687
def validators(*chained_validators): """ Creates a validator chain from several validator functions. :param chained_validators: :type chained_validators: :return: :rtype: """ def validator_chain(match): # pylint:disable=missing-docstring for chained_validator in chained_validators: if not chained_validator(match): return False return True return validator_chain
bba03b12c8de007882320e23377a32072734844a
4,688
def stream_name_mapping(stream, exclude_params=['name'], reverse=False): """ Return a complete dictionary mapping between stream parameter names to their applicable renames, excluding parameters listed in exclude_params. If reverse is True, the mapping is from the renamed strings to the original stream parameter names. """ filtered = [k for k in stream.param if k not in exclude_params] mapping = {k:stream._rename.get(k,k) for k in filtered} if reverse: return {v:k for k,v in mapping.items()} else: return mapping
5a9c9ab80ad470c45d22f2e360cc53c979300825
4,690
def endswith(s, tags): """除了模拟str.endswith方法,输入的tag也可以是可迭代对象 >>> endswith('a.dvi', ('.log', '.aux', '.dvi', 'busy')) True """ if isinstance(tags, str): return s.endswith(tags) elif isinstance(tags, (list, tuple)): for t in tags: if s.endswith(t): return True else: raise TypeError return False
c30228f412552d08d09d9c50bdc20f4401477ba5
4,691
import os def imagePath(image): """ Return full path to given image. """ return os.path.join(":/images", image)
73d73bbdea81248c7342d60158f6e00f50c39bae
4,692
import re import os def find_tm5_output(path, expname=None, varname=None, freq=None): """ Finds TM5 outputfiles, which consist of varname + "_" + "AER"[freq] + * + dates + ".nc" inputs: Path (mandatory) experiment name (optional) varname (optional) frequency (optional) output: list of full paths to files """ subexpr = ".*" if expname: subexpr = expname if varname is None: # # select alphanumeric variable name followed by _AER + * + _expname_ + * + dates.nc # # matches like this: # first quotation marks: # emioa_AER*_ # subexpr: # aspi # second quotation marks: # _*_185001-185012.nc to _*_185001010000-185012312300 [6-12 numbers in date] expr = re.compile("(([0-9A-Za-z]+)\w_AER.*)_" + subexpr + "_.*_[0-9]{6,12}-[0-9]{6,12}\.nc$") elif varname is not None and freq == 'fx': expr = re.compile(varname + "_.*" + freq + ".*_" + subexpr + "_.*.nc$") else: expr = re.compile(varname + "_.*" + freq + ".*_" + subexpr + "_.*.nc$") a = [os.path.join(path, f) for f in os.listdir(path) if re.match(expr, f)] return [os.path.join(path, f) for f in os.listdir(path) if re.match(expr, f)]
6f605f992d7cc6722c523d77e2db9917abe883e3
4,693
from typing import Any import json def deserialize(data: str) -> dict: """ Given a string, deserialize it from JSON. """ if data is None: return {} def fix(jd: Any) -> Any: if type(jd) == dict: # Fix each element in the dictionary. for key in jd: jd[key] = fix(jd[key]) return jd if type(jd) == list: # Could be serialized by us, could be a normal list. if len(jd) >= 1 and jd[0] == '__bytes__': # This is a serialized bytestring return bytes(jd[1:]) # Possibly one of these is a dictionary/list/serialized. for i in range(len(jd)): jd[i] = fix(jd[i]) return jd # Normal value, its deserialized version is itself. return jd return fix(json.loads(data))
ce7282fc99985a348ae9cf2748132e0f53993b51
4,696
def seq_windows_df( df, target=None, start_index=0, end_index=None, history_size=1, target_size=1, step=1, single_step=False, ): """ create sliding window tuples for training nns on multivar timeseries """ data = [] labels = [] start_index = start_index + history_size if target is None: target = df if end_index is None: end_index = df.shape[0] - target_size for i in range(start_index, end_index): indices = range(i - history_size, i, step) X = df.iloc[indices] data.append(X) if single_step: label = target[i + target_size] else: label = target[i: i + target_size] labels.append(label) return data, labels
f95511c761e2da9ed493a71e9e849038d5fbba8b
4,697
def _compare_lines(line1, line2, tol=1e-14): """ Parameters ---------- line1: list of str line2: list of str Returns ------- bool """ if len(line1) != len(line2): return False for i, a in enumerate(line1): b = line2[i] if type(a) not in {int, float}: if a != b: return False elif type(a) is int and type(b) is int: if a != b: return False elif type(a) in {int, float} and type(b) in {int, float}: if abs(a - b) > tol: return False else: if a != b: return False return True
ec45a9fea4dfea3988afaa8947d35e0cc5fb27ca
4,699
def helping_func(self, driver, value): """Helper function for testing method composition. """ return value + 1
2a204814213707a255b0b0e57e4d5ca23389045d
4,700
def get_with_label(label, tree): """ Get a tree's node given it's label """ return [n for n in tree.children if n.label == label][0]
fc976bcbbf8f5a03b2a17dd7b5c0061a22bedf60
4,702
def filter_nofix(df,NoFrames): """ Filter for immobilized origami with DNA-PAINT based tracking handle (TH) as described in `spt`_. Positives are groups - with a trajectory within the first 5 frames after the start of the measurement - and number localizations within group are greater or equal to 20% of total measurement duration (in frames) Args: df(pandas.DataFrame): Immobile properties as calulated by apply_props() Returns: pandas.DataFrame: Positives in ``df`` according to TH filter as described above. """ istrue=df.min_frame<=5 istrue=istrue&(df.n_locs/NoFrames>=0.2) # Occupancy of more than 20% df_filter=df.loc[istrue,:] return df_filter
e115cc479c984037adbe3dd662bed1aa70acaafd
4,704
def store_nugget_nodes(gold_nuggets, sys_nuggets, m_mapping): """ Store nuggets as nodes. :param gold_nuggets: :param sys_nuggets: :param m_mapping: :return: """ # Stores time ML nodes that actually exists in gold standard and system. gold_nodes = [] sys_nodes = [] # Store the mapping from nugget id to unified time ML node id. system_nugget_to_node = {} gold_nugget_to_node = {} mapped_system_mentions = set() tid = 0 for gold_index, (system_index, _) in enumerate(m_mapping): node_id = "te%d" % tid tid += 1 gold_script_instance_id = gold_nuggets[gold_index] gold_nugget_to_node[gold_script_instance_id] = node_id gold_nodes.append(node_id) if system_index != -1: system_nugget_id = sys_nuggets[system_index] system_nugget_to_node[system_nugget_id] = node_id sys_nodes.append(node_id) mapped_system_mentions.add(system_index) for system_index, system_nugget in enumerate(sys_nuggets): if system_index not in mapped_system_mentions: node_id = "te%d" % tid tid += 1 system_nugget_to_node[system_nugget] = node_id sys_nodes.append(node_id) return gold_nodes, sys_nodes, gold_nugget_to_node, system_nugget_to_node
659eeccc244d7dfe7fc1c4b9813844d70973b5dc
4,705
def return_union_item(item): """union of statements, next statement""" return " __result.update({0})".format(item)
60fff47ff948f5b62ff6c6793b9dd339c23ecfd7
4,706
import re def _get_toc_string_from_log(file_handle): """ Returns a toc string or None for a given log file (EAC or XLD) Copyright (c) 2018 Konstantin Mochalov Released under the MIT License Original source: https://gist.github.com/kolen/765526 """ def _filter_toc_entries(file_handle): """ Take file handle, return iterator of toc entries """ while True: line = file_handle.readline() # TOC table header: if re.match(r""" \s* .+\s+ \| (?#track) \s+.+\s+ \| (?#start) \s+.+\s+ \| (?#length) \s+.+\s+ \| (?#start sec) \s+.+\s*$ (?#end sec) """, line, re.X): file_handle.readline() break while True: line = file_handle.readline() m = re.match(r""" ^\s* (?P<num>\d+) \s*\|\s* (?P<start_time>[0-9:.]+) \s*\|\s* (?P<length_time>[0-9:.]+) \s*\|\s* (?P<start_sector>\d+) \s*\|\s* (?P<end_sector>\d+) \s*$ """, line, re.X) if not m: break yield m.groupdict() PREGAP = 150 try: entries = list(_filter_toc_entries(file_handle)) num_entries = len(entries) tracknums = [int(e['num']) for e in entries] if [x for x in range(1, num_entries+1)] != tracknums: # Non-standard track number sequence return None leadout_offset = int(entries[-1]['end_sector']) + PREGAP + 1 offsets = [(int(x['start_sector']) + PREGAP) for x in entries] toc_numbers = [1, num_entries, leadout_offset] + offsets return " ".join(str(x) for x in toc_numbers) except Exception as e: # can fail if the log file is malformed print("Ignoring log file because of the following error:") print(e) pass return None
1b8152171dcc5a512ea92df96bdc63497f01499a
4,708
import re def is_hex(hex_str): """Helper function to verify a string is a hex value.""" return re.fullmatch('[0-9a-f]+', hex_str)
c5a53ccbcec36d77bee88d9c81aea46d2a0eec2d
4,710
def get_prebuilt_piccolo(): """ :return: pair of picollo feature model filename and fm.json as a string """ DEFAULT_PREBUILT_PICCOLO = f'/home/besspinuser/tool-suite/tutorial/piccolo-simple-pregen.fm.json' with open(DEFAULT_PREBUILT_PICCOLO, 'r') as f: feature_model = f.read() return 'piccolo-simple-pregen.fm.json', feature_model
90a1ecf20c6d6614b813250ff464b6f308c588dc
4,711
from datetime import datetime def assign_time(): """Get latest time stamp value""" return datetime.strftime(datetime.now(), format='%Y-%m-%d %T')
2096222b23f5eb0d0aa11a6db4f3751b0a207463
4,712
def slowness2speed(value): """invert function of speed2slowness""" speed = (31 - value) / 30 return speed
54d192b2db667ee05b9c5bdd636af23313b72246
4,713
def complex_covariance_from_real(Krr, Kii, Kri): """Summary Parameters ---------- Krr : TYPE Description Kii : TYPE Description Kri : TYPE Description Returns ------- TYPE Description """ K = Krr + Kii + 1j * (Kri.T - Kri) Kp = Krr - Kii + 1j * (Kri.T + Kri) return K, Kp
0c3e7b01bb06ba6b5bbae50f1fab98cb8bd63f45
4,714
from typing import Any from typing import List import numbers def _ensure_list(value: Any) -> List[Any]: """If value is a scalar, converts it to a list of size 1.""" if isinstance(value, list): return value if isinstance(value, str) or isinstance(value, numbers.Number): return [value] raise TypeError( f'Value must be a list, number or a string. Got {type(value)}')
e9cb9814060d9f2f2ad15fe42d0f6bbe192cc60e
4,715
from pathlib import Path def import_requirements(): """Import ``requirements.txt`` file located at the root of the repository.""" with open(Path(__file__).parent / 'requirements.txt') as file: return [line.rstrip() for line in file.readlines()]
ee22aaa76e13c150a2a7981d171ba227887fbceb
4,716
import tarfile import io def _unpack(stream: bytes, path: str) -> str: """Unpack archive in bytes string into directory in ``path``.""" with tarfile.open(fileobj=io.BytesIO(stream)) as tar: tar.extractall(path) return path
81a05c0a60fb06d43592a0a4f4d30cf62d406e01
4,717
def parse(pm, doc): """ Parse one document using the given parsing model :type pm: ParsingModel :param pm: an well-trained parsing model :type fedus: string :param fedus: file name of an document (with segmented EDUs) """ pred_rst = pm.sr_parse(doc) return pred_rst
4389ac7993d370f2a7d404e5668eb7522ee4db70
4,719
def two_sum(nums, target): """ Given an array of integers, return indices of the two numbers such that they add up to a specific target. You may assume that each input would have exactly one solution, and you may not use the same element twice. :type nums: List[int] :type target: int :rtype: List[int] """ diffs_idx = {} for i in range(len(nums)): if nums[i] in diffs_idx: return [diffs_idx[nums[i]], i] diffs_idx[target - nums[i]] = i
ac72eb7137eb0f7161c26b172cd07553c984b5a8
4,720
import json def check_geometry_size(footprint): """ Excessive large geometries are problematic of AWS SQS (max size 256kb) and cause performance issues becuase they are stored in plain text in the JSON blob. This func reads the geojson and applies a simple heuristic to reduce the footprint size through simplification. With each iteration, the geometry is simplified by 0.01 degrees. Parameters ---------- footprint : obj A shapely Polygon or MultiPolygon Returns ------- geojson : dict A geojson representation of the geometry """ geojson = footprint.__geo_interface__ as_str = json.dumps(geojson) geomsize = len(as_str.encode('utf-8')) n_iterations = 0 while geomsize > 125000: footprint = footprint.simplify(0.01) geojson = footprint.__geo_interface__ as_str = json.dumps(geojson) geomsize = len(as_str.encode('utf-8')) n_iterations += 1 return geojson
b2525958a1440fc1ce0d2560150b7fe28b3ec450
4,722
def mises_promo_gain_cote(cotes, mise_minimale, rang, output=False): """ Calcule la répartition des mises pour la promotion "gain en freebet de la cote gagnée" """ mis = [] gains = cotes[rang] * 0.77 + mise_minimale * cotes[rang] for cote in cotes: mis.append((gains / cote)) mis[rang] = mise_minimale if output: print("somme mises=", sum(mis)) print("gain=", gains) return mis
6dfbb9305e769982257a05cb41800a6d2656767b
4,723
def cov(sources): """ Given the array of sources for all image patches, calculate the covariance array between all modes. Parameters ---------- sources : numpy array (floats) The {NUM_MODES x NUM_PATCHES} array of sources. Returns ------- numpy array (floats) The {NUM_MODES x NUM_MODES} covariance array between all modes. """ return (sources @ sources.T)/sources.shape[1]
268dfbc98a5b443e92aadd27ba577f7911ca398f
4,724
def sub(x, y): """sub two numbers""" return y-x
345279da515a877c1f08a8b54ff8f2e7d6a95fec
4,725
def create_error_payload(exception, message, endpoint_id): """ Creates an error payload to be send as a response in case of failure """ print(f'{exception}: {message}') error_payload = { 'status': 'MESSAGE_NOT_SENT', 'endpointId': endpoint_id if endpoint_id else 'NO_ENDPOINT_ID', 'message': f'{exception}: {message}' } return error_payload
90f266d22429d385e828dcdd92fca3d7b2e6df48
4,728
def has_columns(df, columns): """Check if DataFrame has necessary columns. Args: df (pd.DataFrame): DataFrame. columns (list(str): columns to check for. Returns: bool: True if DataFrame has specified columns. """ result = True for column in columns: if column not in df.columns: print("Missing column: {} in DataFrame".format(column)) result = False return result
d2752099fb13cf3fb220cb0c8402917488c32ef1
4,729
import copy def get_subtree_tips(terms: list, name: str, tree): """ get lists of subsubtrees from subtree """ # get the duplicate sequences dups = [e for e in terms if e.startswith(name)] subtree_tips = [] # for individual sequence among duplicate sequences for dup in dups: # create a copy of the tree temptree = copy.deepcopy(tree) # get the node path for the duplicate sequence node_path = temptree.get_path(dup) # for the terminals of the parent of the duplicate sequence # get the terminal names and append them to temp temp = [] for term in node_path[-2].get_terminals(): temp.append(term.name) subtree_tips.append(temp) return subtree_tips, dups
7bebf86ba95ede46f4e4c3ad0926784d4755124b
4,732
def rate(epoch, rate_init, epochs_per_order): """ Computes learning rate as a function of epoch index. Inputs: epoch - Index of current epoch. rate_init - Initial rate. epochs_per_order - Number of epochs to drop an order of magnitude. """ return rate_init * 10.0 ** (-epoch / epochs_per_order)
cc1c7850d4bd98d30b97c7915ceb96eaeadef327
4,734
import json def pretty_format_dict(dct): """ Parameters ---------- dct: dict[Any, Any] Returns ------- str """ return "{}".format(json.dumps(dct, indent=4))
60d9c09da62d7035bd89a6fb52e6f0a1f142f89e
4,735
def match_countries(df_to_match, olympics): """Changes the names of the countries in the df_to_match df so that they match the names of the countries in the olympics df. Parameters ----------- df_to_match : either of the two dataframes: - gdp - pop olympics : the olympics dataframe Returns ----------- df_to_match : the dataframe given as first parameter that now its countries match the countries in the olympics df common_countries : a list with the common countries in the two dataframes """ # countries in the to_match df df_countries = set(df_to_match.columns.tolist()) # countries in the olympics df ol_regions = set(sorted(olympics.region.unique().tolist())) # countries in the to_match df that are not in the olympics df not_in_ol = df_countries.difference(ol_regions) # countries in the olympics df that are not in the to_match df not_in_df = ol_regions.difference(df_countries) # After printing not_in_il and not_int_df, we see that some countries are simply named differently # Therefore, I renames these countries in the to_match df so that they match the countries from the olympics df df_to_match.rename(columns={"United States": "USA", "United Kingdom": "UK", "Antigua and Barbuda": "Antigua", "Congo, Dem. Rep.": "Democratic Republic of the Congo", "Lao": "Laos", "North Macedonia": "Macedonia", "Cote d'Ivoire": "Ivory Coast", "Trinidad and Tobago": "Trinidad", "Micronesia, Fed. Sts.": "Micronesia", "St. Vincent and the Grenadines": "Saint Vincent", "St. Lucia": "Saint Lucia", "St. Kitts and Nevis": "Saint Kitts", "Slovak Republic": "Slovakia", "Kyrgyz Republic": "Kyrgyzstan", "Bolivia": "Boliva", "Congo, Rep.": "Republic of Congo"}, inplace=True) # Check which countries still remain unmatched df_countries = set(df_to_match.columns.tolist()) ol_regions = set(sorted(olympics.region.unique().tolist())) # Countries in the to_match df that are still not in the olympics df not_in_ol = df_countries.difference(ol_regions) # Countries in the olympics df that are still not in the to_match df not_in_df = ol_regions.difference(df_countries) # Printing not_in_ol and not_in_df shows which countries are still not matched. Used as a check. # save the resulting common countries common_countries = ol_regions.intersection(df_countries) return df_to_match, common_countries
256eaac81daee5c621e7dac4c8c27d0b96868418
4,737
import requests import logging def get_reply(session, url, post=False, data=None, headers=None, quiet=False): """ Download an HTML page using the requests session. Low-level function that allows for flexible request configuration. @param session: Requests session. @type session: requests.Session @param url: URL pattern with optional keywords to format. @type url: str @param post: Flag that indicates whether POST request should be sent. @type post: bool @param data: Payload data that is sent with request (in request body). @type data: object @param headers: Additional headers to send with request. @type headers: dict @param quiet: Flag that tells whether to print error message when status code != 200. @type quiet: bool @return: Requests response. @rtype: requests.Response """ request_headers = {} if headers is None else headers request = requests.Request('POST' if post else 'GET', url, data=data, headers=request_headers) prepared_request = session.prepare_request(request) reply = session.send(prepared_request) try: reply.raise_for_status() except requests.exceptions.HTTPError as e: if not quiet: logging.error("Error %s getting page %s", e, url) logging.error("The server replied: %s", reply.text) raise return reply
4baa985db090d0f88762c8f6cfadff084f2b88ad
4,738
def mermin_klyshko_quantum_bound(n): """The quantum bound for the Mermin-Klyshko inequality is :math:`2^{3(n-1)/2}`. :param n: The number of measurement nodes. :type n: Int :returns: The quantum bound. :rtype: Float """ return 2 ** (3 * (n - 1) / 2)
721ca41b19ef72cae77baf1ad6dea5377b6eb67d
4,740
def _get_formatted_atom_types_names_for(connection): """Return formatted atom_type names for a connection.""" names = [] for member in connection.connection_members: if not member.atom_type: label = "" else: label = member.atom_type.name names.append(label) return " --- ".join(names)
b9b21cb37706aa05f7807df1c252985f09fe6fad
4,741
def parse_q(s): """Parse the value of query string q (?q=) into a search sub-term.""" if '=' not in s: names = s.split() term = '/'.join(map(lambda x: 'n.name=' + x, names)) return term else: subterms = s.split() res = [] for subterm in subterms: if '=' not in subterm: res.append('n.name=' + subterm) else: res.append(subterm) term = '&'.join(res) return term
eae907fcb42be4a2c4be26316721ea63aa0284d6
4,742
def getCompleteData(client , response ,comp): """ This function is useful to receive missing data in tcp packet Input : Client = Tcp Object which interact with host end and client response = received response from the host end comp = comparitive struct defined by tcp packet Output : response = returns missing concatenated bytes data or say whole packet """ remaining = comp.size - len(response) while(remaining > 0 ): read = client.recv(remaining) response += read remaining -= len(read) return response
0f3ff5785046771f295a65116e0f79b5c7e45525
4,743
def getattr_by_path(obj, attr, *default): """Like getattr(), but can go down a hierarchy like 'attr.subattr'""" value = obj for part in attr.split('.'): if not hasattr(value, part) and len(default): return default[0] value = getattr(value, part) if callable(value): value = value() return value
3eccbb39e1781a75a6f0061c1c226cefdcfb17c8
4,744
def get_stack_value(stack, key): """Get metadata value from a cloudformation stack.""" for output in stack.outputs: if output['OutputKey'] == key: return output['OutputValue']
a6b193c7d884bac78668dfd85bc2a5cbbb6b3f3b
4,745
def _ldmodule_soversion(target, source, env, for_signature): """Function to determine what to use for SOVERSION""" if 'SOVERSION' in env: return '.$SOVERSION' elif 'LDMODULEVERSION' in env: ldmod_version = env.subst('$LDMODULEVERSION') # We use only the most significant digit of LDMODULEVERSION return '.' + ldmod_version.split('.')[0] else: return ''
21d84d9ed8bc4a186d4619b51318c4a2bd780adb
4,746
def reverse_bits(counter) -> int: """ Reverses the order of the bits in the given counter :param counter: a 7bit value :return: """ # From Elephant reference code (elephant160v2 > spongent.c > retnuoCl) return ((counter & 0x01) << 7) | ((counter & 0x02) << 5) | ((counter & 0x04) << 3) \ | ((counter & 0x08) << 1) | ((counter & 0x10) >> 1) | ((counter & 0x20) >> 3) \ | ((counter & 0x40) >> 5) | ((counter & 0x80) >> 7)
290f62d794e5d17c4b277a714151523835bc6c16
4,747
import requests def check_quota(): """ Check quota for the RANDOM.ORG API :return: True if the request is successful AND there is remaining quota available """ resp = requests.request('GET', 'https://www.random.org/quota/?format=plain') if resp.status_code != 200 or int(resp.text) <= 0: return False return True
ba882714a17dc70fcdc45de5695b139d4e766fbc
4,748
def diff_first_last(L, *opArg): """ (list) -> boolean Precondition: len(L) >= 2 Returns True if the first item of the list is different from the last; else returns False. >>> diff_first_last([3, 4, 2, 8, 3]) False >>> diff_first_last(['apple', 'banana', 'pear']) True >>> diff_first_last([4.0, 4.5]) True --- Additional Test Cases --- >>> diff_first_last(3, 4, 2, 8, 3) False >>> diff_first_last('apple', 'banana', 'pear') True >>> diff_first_last([5, 4], 4, 5, 4) True >>> diff_first_last([5, 4], 4, [5, 4]) False >>> diff_first_last('eeee') Invalid length. Nothing to compare to. >>> diff_first_last([5]) Invalid length. Nothing to compare to. Additional test cases show that the function can handle non-list inputs of various kinds. Function can also handle invalid inputs of various kinds """ print () print ('---Checking if first and last values are unequal---') print ('Input is: ', L, *opArg) if not opArg: if type(L) == str: print ('Invalid length. Nothing to compare input to.') return None elif len(L) >= 2: print (L[0] != L[-1]) return (L[0] != L[-1]) else: print ('Invalid length. Nothing to compare input to.') return None else: print (L != opArg[-1]) return (L != opArg[-1])
30d6afe76c4fdf759d4a989a5c9cc8b4eb8c62c1
4,749
def ValidClassWmi(class_name): """ Tells if this class for our ontology is in a given WMI server, whatever the namespace is. This is used to display or not, the WMI url associated to a Survol object. This is not an absolute rule. """ return class_name.startswith(("CIM_", "Win32_", "WMI_"))
f3fda0492bb42cefaba8a0226cb13558907bf995
4,750
from typing import MutableMapping def flatten(d, separator='_', parent_key=None): """ Converts a nested hierarchy of key/value object (e.g. a dict of dicts) into a flat (i.e. non-nested) dict. :param d: the dict (or any other instance of collections.MutableMapping) to be flattened. :param separator: the separator to use when concatenating nested key names into flattened key names. :param parent_key: used internally for recursion. :return: a flattened dict (i.e. containing no nested dicts as values). """ if separator is None: separator = '_' if parent_key is None: parent_key = '' dict_type = dict if d is None else type(d) items = [] for k, v in d.items(): new_key = parent_key + separator + k if parent_key else k if isinstance(v, MutableMapping): items.extend(flatten(v, separator=separator, parent_key=new_key).items()) else: items.append((new_key, v)) return dict_type(items)
d07daba5007c4c4efee1ccb2033a42e9a52a7efb
4,751
def divide_list(array, number): """Create sub-lists of the list defined by number. """ if len(array) % number != 0: raise Exception("len(alist) % number != 0") else: return [array[x:x+number] for x in range(0, len(array), number)]
09882945b971ce13f7983c33562df0dfde77165c
4,752
def get_exception_message(exception: Exception) -> str: """Returns the message part of an exception as string""" return str(exception).strip()
6e002329425f716115a5fddb32cbf36cf568ee81
4,753
def _get_frame_time(time_steps): """ Compute average frame time. :param time_steps: 1D array with cumulative frame times. :type time_steps: numpy.ndarray :return: The average length of each frame in seconds. :rtype: float """ if len(time_steps.shape) != 1: raise ValueError("ERROR: Time series must be a 1D array.") frame_time = time_steps[-1]/(len(time_steps) - 1) # Need to ignore the first frame (0). return frame_time
e849e5d6bcbc14af357365b3e7f98f1c50d93ee4
4,754
def query_table3(song): """ This function returns the SQL neccessary to get all users who listened to the song name passed as an argument to this function. """ return "select user_name from WHERE_SONG where song_name = '{}';".format(song)
ed9a3fb7eb369c17027871e28b02600b78d483a9
4,755
def check_paragraph(index: int, line: str, lines: list) -> bool: """Return True if line specified is a paragraph """ if index == 0: return bool(line != "") elif line != "" and lines[index - 1] == "": return True return False
b5737a905b32b07c0a53263255d3c581a8593dfa
4,756
def extract_el_from_group(group, el): """Extract an element group from a group. :param group: list :param el: element to be extracted :return: group without the extracted element, the extracted element """ extracted_group = [x for x in group if x != el] return [extracted_group] + [[el]]
ed6598fd0d7dcb01b35a5c2d58c78d8c2a2397f5
4,757
def extract_sha256_hash(hash): """Extrach SHA256 hash or return None """ prefix = 'sha256:' if hash and hash.startswith(prefix): return hash.replace(prefix, '') return None
11e9f352f3783657d52772c4b69387151d13f3d2
4,761
def lick(): """ Returns a string when a user says 'lick' (This is a joke command) :return: A string """ return "*licks ice cream cone*"
a4e92d7371abe078c48196b0f7d7e899b1b0e19e
4,762
def parse_ascii(state: str, size: int) -> str: """ Args: state: an ascii picture of a cube size: the size of the cube Returns: a string of the cube state in ULFRBD order """ U = [] L = [] F = [] R = [] B = [] D = [] lines = [] for line in state.splitlines(): line = line.strip().replace(" ", "") if line: lines.append(line) U = "".join(lines[0:size]) for line in lines[size : size * 2]: L.append(line[0:size]) F.append(line[size : size * 2]) R.append(line[size * 2 : size * 3]) B.append(line[size * 3 : size * 4]) L = "".join(L) F = "".join(F) R = "".join(R) B = "".join(B) D = "".join(lines[size * 2 : size * 4]) return "".join([U, L, F, R, B, D])
7ec24a22c3052a76c820dcca54c913c2d5229e5d
4,763
from typing import Iterable import functools import operator def prod(iterable:Iterable) -> Iterable: """math.prod support for Python versions < v3.8""" return functools.reduce(operator.mul, iterable, 1)
be811e39b7dd70669fbfc84db5492b4c7383d68f
4,765
import subprocess def compress_video(video_path): """ Compress video. :param video_path: Path to the video. :return: None. """ return subprocess.call(["gzip", video_path]) == 0
9159076bae502da7c863dc6ef16372a6e2da4161
4,766
def trim_resource(resource): """ trim_resource """ return resource.strip(" \t\n\r/")
5a9d9bbf6da72cf967eee1e9198d109f096e3e41
4,767
def get_block(blockidx, blocksz, obj): """ Given obj, a list, return the intersection of obj[blockidx*blocksz:(blockidx+1)*blocksz] and obj Ex: get_block(2, 100, range(250) returns [200, 201, ..., 249] """ if blockidx*blocksz > len(obj): return [] elif (blockidx+1)*blocksz > len(obj): return obj[blockidx*blocksz:] else: return obj[blockidx*blocksz:(blockidx+1)*blocksz]
8666cc30be23619a49f899beec17d3ba1f0fb357
4,768
import platform def get_dataset_mrnet_args(parser, args=[]): """ Get all relevant parameters to handle the dataset -> here: MRNET """ # determine path if platform.system() == "Linux": path = "/home/biomech/Documents/OsteoData/MRNet-v1.0/" else: path = "C:/Users/Niko/Documents/data/MRNet-v1.0/MRNet-v1.0" # path = "C:/Users/ga46yeg/data/MRNet-v1.0" # Dataset MRNet: # ------------------------------------------------------------------------ parser.add_argument( "--root_dir_mrnet", type=str, default=path, help="Directory of the dataset" ) parser.add_argument( "--perspectives", type=list, default=["axial", "coronal", "sagittal"], help="Perspectives of the Mr Scans", ) parser.add_argument( "--classes", type=list, default=["abn", "acl", "men"], help="Classify for these classes", ) # ------------------------------------------------------------------------ return parser
466cb843fca4a09f52a72603dcd2c4379ea1e54d
4,769