content
stringlengths
22
815k
id
int64
0
4.91M
def columns_not_changed(df, col_to_keep): """ insert the clean columns as features without changing the columns :param df: dataframe :param col_to_keep: columns that are clean and should not be changed :return unchanged columns plus SK_ID_CURR """ df = df.loc[:, ['SK_ID_CURR'] + col_to_keep] df.loc[df.DAYS_ID_PUBLISH > 0, :] = np.nan col_to_turn_positive = ['DAYS_BIRTH', 'DAYS_EMPLOYED', 'DAYS_REGISTRATION', 'DAYS_ID_PUBLISH'] df[col_to_turn_positive] = df[col_to_turn_positive].abs() return df
1,200
def removeRestaurantFromList(update: Update, context: CallbackContext) -> int: """Removes the current restaurant from the current preferred list.""" query = update.callback_query query.answer() # Removing the restaurant from the list in the database removeRestaurantFromListDb( context.chat_data.get("current_list_restaurants").restaurants.current.id, context.chat_data.get("current_list_restaurants").id, ) # Removing the restaurant even from the local list context.chat_data.get("current_list_restaurants").restaurants.remove() # Sending a message of success and displaying back the updated list. context.bot.edit_message_text( chat_id=update.effective_chat.id, message_id=context.chat_data.get("fav_list_message_id"), text=getString( "GENERAL_RestaurantRemovedFromFavList", context.chat_data.get("lang") ), ) # Sending a new empty message which will be overwritten immediately by going back to the RESTAURANT_INFOS_DISPLAY state. newId = context.bot.send_message( chat_id=update.effective_chat.id, text="_" ).message_id # Storing the new id which will be used from now on to modify the message. context.chat_data.update({"fav_list_message_id": newId}) return showCurrentFavRestaurant(update, context)
1,201
def _process_null(_): """ Placeholder for an efficient replacement for when no columns of a `WaveformReducer` are activated. """ return dict()
1,202
def GenerateRst(proto_file): """Generate a RST representation from a FileDescriptor proto.""" source_code_info = SourceCodeInfo(proto_file.name, proto_file.source_code_info) # Find the earliest detached comment, attribute it to file level. # Also extract file level titles if any. header, comment = FormatHeaderFromFile('=', source_code_info.file_level_comment, proto_file.name) package_prefix = NormalizeFQN('.' + proto_file.package + '.')[:-1] package_type_context = TypeContext(source_code_info, package_prefix) msgs = '\n'.join( FormatMessage(package_type_context.ExtendMessage(index, msg.name), msg) for index, msg in enumerate(proto_file.message_type)) enums = '\n'.join( FormatEnum(package_type_context.ExtendEnum(index, enum.name), enum) for index, enum in enumerate(proto_file.enum_type)) debug_proto = FormatProtoAsBlockComment(proto_file) return header + comment + msgs + enums # + debug_proto
1,203
def make_padded_batch(items): """ Pads sequences in a batch, so they are all the same length as the longest. """ max_len = max(len(d["input_ids"]) for d in items) if max_len == 0: return {k: torch.zeros((0, 0), dtype=torch.long) for k in items[0]} return { k: pad_sequence([d[k] for d in items if len(d["input_ids"])], batch_first=True) for k, v in items[0].items() }
1,204
def hlmlEventSetCreate() -> hlml_t.HLML_EVENT_SET.TYPE: """ Create an empty set of events Parameters: None. Returns: st (HLML_EVENT_SET) - An empty set of events. """ global _hlmlOBJ st = hlml_t.HLML_EVENT_SET.TYPE fn = _hlmlOBJ.get_func_ptr("hlml_event_set_create") ret = fn(ctypes.byref(st)) check_return(ret) return st
1,205
def is_ip_addr(value): """ Check that the supplied value is an Internet Protocol address, v.4, represented by a dotted-quad string, i.e. '1.2.3.4'. >>> vtor.check('ip_addr', '1 ') '1' >>> vtor.check('ip_addr', ' 1.2') '1.2' >>> vtor.check('ip_addr', ' 1.2.3 ') '1.2.3' >>> vtor.check('ip_addr', '1.2.3.4') '1.2.3.4' >>> vtor.check('ip_addr', '0.0.0.0') '0.0.0.0' >>> vtor.check('ip_addr', '255.255.255.255') '255.255.255.255' >>> vtor.check('ip_addr', '255.255.255.256') Traceback (most recent call last): VdtValueError: the value "255.255.255.256" is unacceptable. >>> vtor.check('ip_addr', '1.2.3.4.5') Traceback (most recent call last): VdtValueError: the value "1.2.3.4.5" is unacceptable. >>> vtor.check('ip_addr', '1.2.3. 4') Traceback (most recent call last): VdtValueError: the value "1.2.3. 4" is unacceptable. >>> vtor.check('ip_addr', 0) Traceback (most recent call last): VdtTypeError: the value "0" is of the wrong type. """ if not isinstance(value, StringTypes): raise VdtTypeError(value) value = value.strip() try: dottedQuadToNum(value) except ValueError: raise VdtValueError(value) return value
1,206
async def do_work(number): """ # 此方法在调用时不会立即被执行,而是返回一个协程对象 :param number: :return: """ print('Number:', number)
1,207
def batchEuclid (features, patterns, knowledge): """ Classifies whole dataset via euclidean distance. Returns score. """ dists = euclidean_distances (knowledge, features) preds = np.array (dists).argmin (axis = 0) truthVector = (preds.T.astype (float) == patterns) pos = truthVector.sum () score = pos / features.shape[0] return score
1,208
def upload_plans_for_template(request): """ Allow user to upload a csv file to create plans based on a previously selected template """ ctxd = {} context = RequestContext(request, ctxd) return render_to_response("rundb/plan/modal_batch_planning_upload.html", context_instance=context)
1,209
def calculate_total_correlativity(coefficient_array): """ Returns the total correlativity of the coefficient_array. The total correlativity is the sum of the absolute values and a measure of how correlated to timeseries are. The greater the value the more correlated.""" return sum(map(abs, coefficient_array))
1,210
def merge_by_name(out_file_name, in_file_names): """ Merge name-sorted bam files into bam file sorted by name""" args = ['samtools', 'merge', '-n', out_file_name] args.extend(in_file_names) log_subprocess.check_call(args)
1,211
def test_execute_command(): """ Execute "SELECT 1;" Assert: A result of 1 is returned """ cur = create_connection(host=host, port=port, user=usr, password=paswd, db=db) assert type(cur) == pymysql.cursors.Cursor stmt = "SELECT 1" result = execute_command(cur, stmt) assert result[0][0] == 1
1,212
def mosmix_example(): """Retrieve Mosmix mosmix data by DWD.""" # A. MOSMIX-L -- Specific stations_result - each station with own file Settings.tidy = True Settings.humanize = True request = DwdMosmixRequest( parameter=["DD", "ww"], start_issue=DwdForecastDate.LATEST, # automatically set if left empty mosmix_type=DwdMosmixType.LARGE, ) stations = request.filter_by_station_id( station_id=["01001", "01008"], ) response = next(stations.values.query()) # meta data enriched with information from metadata_for_forecasts() output_section("Metadata", response.stations.df) output_section("Forecasts", response.df) # B. MOSMIX-S -- All stations_result - specified stations_result are extracted. request = DwdMosmixRequest( parameter=["DD", "ww"], start_issue=DwdForecastDate.LATEST, # automatically set if left empty mosmix_type=DwdMosmixType.SMALL, ) stations = request.filter_by_station_id( station_id=["01001", "01008"], ) response = next(stations.values.query()) output_section("Metadata", response.stations.df) output_section("Forecasts", response.df)
1,213
def hide_topic(topic_id: TopicID, moderator_id: UserID) -> BoardTopicHidden: """Hide the topic.""" topic = _get_topic(topic_id) moderator = _get_user(moderator_id) now = datetime.utcnow() topic.hidden = True topic.hidden_at = now topic.hidden_by_id = moderator.id db.session.commit() aggregate_topic(topic) topic_creator = _get_user(topic.creator_id) return BoardTopicHidden( occurred_at=now, initiator_id=moderator.id, initiator_screen_name=moderator.screen_name, board_id=topic.category.board_id, topic_id=topic.id, topic_creator_id=topic_creator.id, topic_creator_screen_name=topic_creator.screen_name, topic_title=topic.title, moderator_id=moderator.id, moderator_screen_name=moderator.screen_name, url=None, )
1,214
def shell(): """ Starts an interactive shell with app object imported. """ # Local vars defined/imported will be available in shells global scope import IPython from app.main import app IPython.embed()
1,215
def argmax(a, axis=None, out=None): """ Returns the indices of the maximum values along an axis. Parameters ---------- a: array_like axis: int, optional By default, the index is into the flattened array, otherwise along the specified axis. out: numpy.array, optional If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype. Returns ------- numpy.ndarray[int]""" a = a.data if isinstance(a, Tensor) else a return np.argmax(a, axis, out)
1,216
def FlowBalance_rule(model, node): """Ensures that flows into and out of a node are equal """ return model.Supply[node] \ + sum(model.Flow[i, node] for i in model.NodesIn[node]) \ - model.Demand[node] \ - sum(model.Flow[node, j] for j in model.NodesOut[node]) \ == 0
1,217
def evaluate_perfs(num_images, duration): """ Calculate and print the inference duration. """ #fps = float(num_images / duration) #print("Throughput=%.2f fps, total frames = %.0f , time=%.4f seconds" %(fps, num_images, duration)) print("Duration = %.2f ms" %(duration * 1000))
1,218
def test_config_merging_missing(): """ If we have set a boolean value in the TOML file, but not on the CLI, we want the TOML value to be taken. """ toml = StringIO( dedent( """\ [tool.vulture] verbose = true ignore_names = ["name1"] """ ) ) cliargs = [ "cli_path", ] result = make_config(cliargs, toml) assert result["verbose"] is True assert result["ignore_names"] == ["name1"]
1,219
def abcd(actual, predicted, distribution, as_percent=True): """ Confusion Matrix: |`````````````|`````````````| | TN[0][0] | FP[0][1] | | | | |`````````````|`````````````| | FN[1][0] | TP[1][1] | | | | ````````````````````````````` """ c_mtx = confusion_matrix(actual, predicted) "Probablity of Detection: Pd" try: p_d = c_mtx[1][1] / (c_mtx[1][1] + c_mtx[1][0]) # TP/(TP+FN) except ZeroDivisionError: p_d = 0 "Probability of False Alarm: Pf" try: p_f = c_mtx[0][1] / (c_mtx[0][1] + c_mtx[0][0]) # FP/(FP+TN) except ZeroDivisionError: p_f = 0 "Precision" try: p_r = c_mtx[1][1] / (c_mtx[1][1] + c_mtx[0][1]) # TP/(TP+FP) if not np.isfinite(p_r): p_r = 0 except ZeroDivisionError: p_r = 0 "Recall (Same as Pd)" r_c = p_d "F1 measure" try: f1 = 2 * c_mtx[1][1] / (2 * c_mtx[1][1] + c_mtx[0][1] + 1 * c_mtx[1][0]) # F1 = 2*TP/(2*TP+FP+FN) except ZeroDivisionError: f1 = 0 e_d = 2 * p_d * (1 - p_f) / (1 + p_d - p_f) g = np.sqrt(p_d - p_d * p_f) # Harmonic Mean between True positive rate and True negative rate try: auroc = round(roc_auc_score(actual, distribution), 2) except ValueError: auroc = 0 if as_percent is True: return p_d * 100, p_f * 100, p_r * 100, r_c * 100, f1 * 100, e_d * 100, g * 100, auroc * 100 else: return p_d, p_f, p_r, r_c, f1, e_d, g, auroc
1,220
def get_gene_name(protein_id, web_fallback=True): """Return the gene name for the given UniProt ID. This is an alternative to get_hgnc_name and is useful when HGNC name is not availabe (for instance, when the organism is not homo sapiens). Parameters ---------- protein_id : str UniProt ID to be mapped. web_fallback : Optional[bool] If True and the offline lookup fails, the UniProt web service is used to do the query. Returns ------- gene_name : str The gene name corresponding to the given Uniprot ID. """ try: gene_name = uniprot_gene_name[protein_id] # There are cases when the entry is in the resource # table but the gene name is empty. Often this gene # name is actually available in the web service RDF # so here we return only if the gene name is not None # and not empty string. if gene_name: return gene_name except KeyError: pass if not web_fallback: return None g = query_protein(protein_id) if g is None: return None query = rdf_prefixes + """ SELECT ?name WHERE { ?gene a up:Gene . ?gene skos:prefLabel ?name . } """ res = g.query(query) if res: gene_name = [r for r in res][0][0].toPython() if not gene_name: return None return gene_name return None
1,221
def AddEventTypePositionalArg(parser): """Adds event type positional arg.""" parser.add_argument( 'event_type', help='Type of event (e.g. com.google.gc.object.finalize).')
1,222
def summarize(): """ Returns summary of articles """ if request.method == 'POST': url = request.form['pageurl'] parser = HtmlParser.from_url(url, Tokenizer(LANGUAGE)) stemmer = Stemmer(LANGUAGE) summarizer = Summarizer(stemmer) summarizer.stop_words = get_stop_words(LANGUAGE) final = [] for sentence in summarizer(parser.document, SENTENCES_COUNT): final.append(str(sentence)) return render_template('result.html', len=len(final), summary=final)
1,223
def _env_constructor(loader, node): """ Replaces environment variables in YAML file """ value = loader.construct_scalar(node) for group in env_pattern.findall(value): try: value = value.replace(f"${{{group}}}", os.environ.get(group)) except TypeError as error: print( f"An error occured while parsing YAML file:\n\n\tENV variable {group} not set\n" ) raise Exception(f"ENV variable {group} not set") from error return value
1,224
def version(include_copyright=False): """Get the version number of ``ssocr``. Equivalent of running: ``ssocr --version`` Parameters ---------- include_copyright : :class:`bool`, optional Whether to include the copyright information. Returns ------- :class:`str` The version number (and possibly copyright information). """ out = _run([ssocr_exe, '--version']) if include_copyright: return out return out.splitlines()[0].split()[-1]
1,225
def intersection(n: np.ndarray, d: float, A: np.ndarray, b: np.ndarray) -> List[np.ndarray]: """Return the intersection of the plane and convex ployhedron. Returns a list of points which define the intersection between the plane nx = d and the convex ployhedron defined by linear inequalities Ax <= b. Args: n (np.ndarray): Normal vector of the plane. d (np.ndarray): Offset (or distance) vector of the plane. A (np.ndarray): LHS coefficents defining the linear inequalities. b (np.ndarray): RHS constants defining the linear inequalities. Returns: List[np.ndarray]: List of vertices defining the intersection (if any). Raises: ValueError: Normal vector must be length 3. ValueError: Matrix A must be of shape (n,3). """ if len(n) != 3: raise ValueError('Normal vector must be length 3.') if len(A[0]) != 3: raise ValueError('Matrix A must be of shape (n,3).') pts = [] n_d = np.hstack((n,d)) A_b = np.hstack((A,b)) for indices in itertools.combinations(range(len(A)),2): R_c = np.vstack((n,A[list(indices)])) R_d = np.vstack((n_d,A_b[list(indices)])) if np.linalg.matrix_rank(R_c) == 3 and np.linalg.matrix_rank(R_d) == 3: det = np.linalg.det(R_c) if det != 0: x_1 = np.linalg.det(R_d[:,[3,1,2]])/det x_2 = np.linalg.det(R_d[:,[0,3,2]])/det x_3 = np.linalg.det(R_d[:,[0,1,3]])/det x = np.array([[x_1],[x_2],[x_3]]) if all(np.matmul(A,x) <= b + 1e-10): pts.append(np.round(x, 10)) return pts
1,226
def create_session(checkpoint_path, target_device): """Create ONNX runtime session""" if target_device == 'GPU': providers = ['CUDAExecutionProvider'] elif target_device == 'CPU': providers = ['CPUExecutionProvider'] else: raise ValueError( f'Unsupported target device {target_device}, ' f'Expected one of: "CPU", "GPU"' ) session = ort.InferenceSession(checkpoint_path, providers=providers) input_names = [x.name for x in session.get_inputs()] return session, input_names
1,227
def V_eN_int(cgf_1, cgf_2, mol): """ Compute electron-nuclear integral between two contracted gaussian functions. """ v = 0 for i, _ in enumerate(cgf_1.alpha): for j, _ in enumerate(cgf_2.alpha): for k in range(mol.num_atoms): v += cgf_1.co[i] * cgf_2.co[j] * potential((cgf_1.alpha[i], cgf_1.coordinate), (cgf_2.alpha[j], cgf_2.coordinate), mol.coordinates[k], mol.charges[k]) return v
1,228
def create_subword_vocab(input_data, subword_size): """create subword vocab from input data""" def generate_subword(word, subword_size): """generate subword for word""" subwords = [] chars = list(word) char_length = len(chars) for i in range(char_length-subword_size+1): subword = ''.join(chars[i:i+subword_size]) subwords.append(subword) return subwords subword_vocab_lookup = {} for sentence in input_data: words = sentence.strip().split(' ') for word in words: word_vocabs = [word, word.lower(), word.capitalize(), word.upper()] for word_vocab in word_vocabs: subword_vocabs = generate_subword(word_vocab, subword_size) for subword_vocab in subword_vocabs: if subword_vocab not in subword_vocab_lookup: subword_vocab_lookup[subword_vocab] = 1 else: subword_vocab_lookup[subword_vocab] += 1 return subword_vocab_lookup
1,229
def main() -> None: """Create a map of nodes with available SSH connection.""" map_ssh = folium.Map(location=[45.523, -122.675], zoom_start=2) with open("lib/base_data.txt") as tsv: for row in csv.reader(tsv, delimiter="\t"): name = row[0] try: x = float(row[1]) y = float(row[2]) folium.Marker([x, y], popup=name).add_to(map_ssh) except ValueError: pass map_ssh.save("map_ssh.html")
1,230
def request_json(input_data): """Request JSON data from full node, given request data input. More info: http://docs.python-requests.org/en/master/""" requested_data = None # Prevent no state if all servers fail! for full_node_url in full_node_list_http: try: requested_data = requests.get(full_node_url, data=input_data) except requests.exceptions.ConnectionError as err: print("...") print("Error: {}".format(full_node_url)) print(err) print("...") continue if requested_data.status_code is not 200: # Fail! Move onto the next URL! print("./\/\/\.") print("Not online: {}".format(full_node_url)) print(".\/\/\/.") continue else: print("---") print("Online: {}".format(full_node_url)) print(requested_data) num_workers = json.loads(json.dumps(requested_data.text)) print(num_workers) print("===") continue return requested_data
1,231
def rrange(x, y = 0): """ Creates a reversed range (from x - 1 down to y). Example: >>> rrange(10, 0) # => [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] """ return range(x - 1, y - 1, -1)
1,232
def registry_auth_gcloud(deployment, project, service_key): """ Setup GCR authentication with a service_key This changes *global machine state* on where docker can push to! """ encrypted_service_key_path = os.path.join( 'deployments', deployment, 'secrets', service_key ) with decrypt_file(encrypted_service_key_path) as decrypted_service_key_path: subprocess.check_call([ 'gcloud', 'auth', 'activate-service-account', '--key-file', os.path.abspath(decrypted_service_key_path) ]) subprocess.check_call([ 'gcloud', 'auth', 'configure-docker' ]) yield
1,233
def einsum_outshape(subscripts, *operants): """Compute the shape of output from `numpy.einsum`. Does not support ellipses. """ if "." in subscripts: raise ValueError(f"Ellipses are not supported: {subscripts}") insubs, outsubs = subscripts.replace(",", "").split("->") if outsubs == "": return () insubs = np.array(list(insubs)) innumber = np.concatenate([op.shape for op in operants]) outshape = [] for o in outsubs: indices, = np.where(insubs == o) try: outshape.append(innumber[indices].max()) except ValueError: raise ValueError(f"Invalid subscripts: {subscripts}") return tuple(outshape)
1,234
def random_points(region, color="00FFFF", points=100, seed=0): """Generates a specified number of random points inside a given area. Args: region(feature): region to generate points color(color code): default is red, I think points(numeric): how many points do you want? Default is 100 seed:(numeric): default is 0 Returns: a feature collection of locations """ if not isinstance(region, ee.Geometry): err_str = "\n\nThe region of interest must be an ee.Geometry." raise AttributeError(err_str) color = "000000" if color is None: color = "00FFFF" if points is None: points = 100 points_rand = ee.FeatureCollection.randomPoints( region=region, points=points, seed=seed ) return points_rand
1,235
def test_init_smart_sleep_mode(): """Test that the new state array is populated correctly.""" const = get_const("1.4") sensor_id = 1 sensor = Sensor(sensor_id) sensor.add_child_sensor(0, const.Presentation.S_LIGHT_LEVEL) sensor.add_child_sensor(1, const.Presentation.S_LIGHT_LEVEL) assert not sensor.new_state sensor.init_smart_sleep_mode() assert 0 in sensor.new_state assert isinstance(sensor.new_state[0], ChildSensor) assert 1 in sensor.new_state assert isinstance(sensor.new_state[1], ChildSensor)
1,236
def GridSearch(X_train_df, Y_train_df, hps_NaN_dict, features_l, hps_models_dict, cv=5, n_jobs=-1, randomise=True): """Launch a grid search over different value of the hps.""" # Compute all the possible combinations of hps tuples_hp = BuildHpTuples(X_train_df, hps_NaN_dict, features_l, hps_models_dict) # Creates dataframe in which all results will be stored # (allows early stopping of grid search) pd_res_df = pd.DataFrame() # Executes a Cross Validation for all possible tuples scores_param = [] # Randomisation of the tuples if randomise: np.random.shuffle(tuples_hp) for tuple_i in tqdm(tuples_hp): [best_score, best_params_n, best_params_v, pd_res_df] = subGridSearch(X_train_df, Y_train_df, tuple_i, pd_res_df, cv=cv, n_jobs=n_jobs) results = (best_score, best_params_n, best_params_v) scores_param.append(results) # Extract best scores and parameters maxi = 0 best_params_names = 0 best_params_values = 0 for sublist in scores_param: if sublist[0] > maxi: maxi = sublist[0] best_params_names = sublist[1] best_params_values = sublist[2] # Return result return maxi, best_params_names, best_params_values
1,237
def atm(): """Fixture to compute atmospheric properties just once before tests. Returns: Atmosphere object """ return Atmosphere(h_geom_truth_arr)
1,238
def toPaf(inputFile, fileformat): """ This function converts the file (input) into a Preference-based Argumentation Framework. Here the PAF is represented by one set and 3 dictionnaries: - the set contains the arguments - the first dictionnary contains the attacks from an argument a to another - the second one is filled with the opposite relation, it is used to go faster when searching 'who is attacking this argument' - the last one contains the preferences, the keys are the prefered arguments and the values are the one they are prefered over file --> set*dict*dict*dict """ assert type(inputFile) is str, "The first argument of this method must be the name of the inputFile. (type String)" assert type(fileformat) is str, "The second argument of this method must be the extension of the inputFile. (type String)" args.clear() attacksFrom.clear() preferences.clear() try: file = open(inputFile, "r") except FileNotFoundError: print("Unable to open the file.") raise FileNotFoundError("Unable to find the file.") except(OSError, IOError): print("System error") raise if fileformat == "ptgf": parse_ptgf(file) elif fileformat == "papx": parse_papx(file) else: print("Unsupported format ", fileformat,", suported formats are : ") print_formats() raise UnsupportedFormatException("Unsuported format : ", fileformat) file.flush() file.close()
1,239
def get_key(my_dict: dict, val): """Get key value form dictionary using key value. Args: collection_name: dict: collection in dictionary format val: Value in dictionary Returns: Key from dictionary. """ for key, value in my_dict.items(): if val == value: return key return "key doesn't exist"
1,240
def test_get_mesh_grid_as_point_cloud_downsample(): """ Sample a regular grid and return the (x,y) coordinates of the sampled points. """ min_x = -3 # integer, minimum x-coordinate of 2D grid max_x = 0 # integer, maximum x-coordinate of 2D grid min_y = 2 # integer, minimum y-coordinate of 2D grid max_y = 5 # integer, maximum y-coordinate of 2D grid # return pts, a Numpy array of shape (N,2) pts = get_mesh_grid_as_point_cloud(min_x, max_x, min_y, max_y, downsample_factor=2.0) assert pts.shape == (4, 2) gt_pts = [[-3.0, 2.0], [0.0, 2.0], [-3.0, 5.0], [0.0, 5.0]] assert np.allclose(gt_pts, pts)
1,241
def closest(point, points): """works, but @@DEPRECATED!! return index into points of closest-to-point """ da = dist_array(point, points) return N.argmin(da)
1,242
def generate_provenance_json(script="unknown", params={}): """Generate the provenance in a format which can later be output as valid json. Inputs: string: The name of the script used to trigger the data generation/deidentification/synthesis process dict: The parameters used to tune the data generation etc. process; should include random seeds and other options as appropriate for the method Returns: dict: Details of the script called by the user and any relevant parameters """ commit = get_git_commit_hash() local_mods = get_local_changes() provenance = {"script": script, "commit": commit, "local_modifications": local_mods, "parameters": params} return provenance
1,243
def dot(v, w): """v_1 * w_1 + ... + v_n * w_n""" return sum(v_i * w_i for v_i, w_i in zip(v, w))
1,244
def get_users(): """Query for user accounts.""" return JsonResponse(queryFor(UserAccount))
1,245
def test_create_project_pi_too_short(client): """Create a project with too short PI.""" create_unit_admins(num_admins=2) current_unit_admins = models.UnitUser.query.filter_by(unit_id=1, is_admin=True).count() assert current_unit_admins == 3 proj_data_short_pi = proj_data.copy() proj_data_short_pi["pi"] = "" response = client.post( tests.DDSEndpoint.PROJECT_CREATE, headers=tests.UserAuth(tests.USER_CREDENTIALS["unituser"]).token(client), json=proj_data_short_pi, ) assert response.status_code == http.HTTPStatus.BAD_REQUEST created_proj = models.Project.query.filter_by( created_by="unituser", title=proj_data_short_pi["title"], pi=proj_data_short_pi["pi"], description=proj_data_short_pi["description"], ).one_or_none() assert not created_proj
1,246
def get_added_and_removed_pitches( chord_root_tpc: int, chord_type: ChordType, changes: str, key_tonic_tpc: int, key_mode: KeyMode, ) -> Dict[str, str]: """ Get a mapping of pitch alterations from the given chord. Pitches are given and returned with PitchType TPC because accidental-specific math is required to correctly apply accidentals. Parameters ---------- chord_root_tpc : int The root pitch of the given chord, in TPC notation. chord_type : ChordType The type of the given chord. changes : str A string of the changes or alterations of a given chord, like "64" or "+b2". key_tonic_tpc : int The tonic pitch of the current key, including any relative root, in TPC notation. key_mode : KeyMode The mode of the current key, including any relative root. Returns ------- changed_pitches : Dict[str, str] A dictionary representing pitch alterations to the given chord. Each entry represents a mapping of original_pitch -> new_pitch, represented as a string of their TPC integer. If original_pitch is empty, then the new_pitch is simply added. If new_pitch begins with "+", then it is added in an upper octave. """ added_pitches = [] removed_pitches = [] # First, we have to find the chord numeral degree, since changes are notated numerically # relative to the chord's tonal pitch class. # i.e., any "2" change to a IV chord will have some V in it, regardless of any accidentals. chord_root_str = get_pitch_string(chord_root_tpc, PitchType.TPC) for degree in range(1, 8): interval = get_interval_from_scale_degree(str(degree), True, key_mode, PitchType.TPC) pitch_str = get_pitch_string(interval + key_tonic_tpc, PitchType.TPC) if pitch_str[0] == chord_root_str[0]: break changes_list = split_changes_into_list(changes) # Calculate added pitches first for change in changes_list: while change[0] in "v^+": change = change[1:] # Find the scale degree for this change accidental_count, new_change = get_accidental_adjustment(change, in_front=True) accidental_count = abs(accidental_count) octave = "+" if int(new_change) >= 8 else "" # Convert change to be relative to the key tonic, including accidentals change_degree = (int(new_change) + degree - 2) % 7 # -2 since both are 1-indexed change_degree += 1 # Conver back to 1-indexing change_degree_str = change[:accidental_count] + str(change_degree) # Calculate interval above scale degree, including additional octaves interval = get_interval_from_scale_degree(change_degree_str, True, key_mode, PitchType.TPC) # Store added pitch, including "+" if the pitch is an octave up added_pitches.append(octave + str(interval + key_tonic_tpc)) # Calculate chord vector in ascending pitch order chord_vector = get_vector_from_chord_type(chord_type, PitchType.TPC, chord_root_tpc) chord_vector = np.where(chord_vector == 1)[0] ascending_chord_vector = [] for degree in range(1, 8): interval = get_interval_from_scale_degree(str(degree), True, key_mode, PitchType.TPC) pitch_str = get_pitch_string(interval + chord_root_tpc, PitchType.TPC) for pitch in chord_vector: if get_pitch_string(pitch, PitchType.TPC)[0] == pitch_str[0]: ascending_chord_vector.append(pitch) # Calculate removed pitches for change in changes_list: if change[0] == "+": # Added pitch only - no deletion removed_pitches.append("") _, new_change = get_accidental_adjustment(change, in_front=True) if change[0] == "^" or (new_change in "246" and change[0] == "#"): # Replaces the above pitch if change == "#6" and len(ascending_chord_vector) == 3: # Special case: If #6 occurs for a triad, it is an addition, # since it cannot be a lower replacement to a non-existent 7 removed_pitches.append("") continue # 2 replaces the 2nd chord pitch, 4 replaces the 3rd, etc. removed_pitches.append(str(ascending_chord_vector[int(change[-1]) // 2])) elif change[0] == "v" or (new_change in "246" and change[0] != "#"): # Replaces the below pitch # 2 replaces the 1st chord pitch, 4 replaces the 2nd, etc. removed_pitches.append(str(ascending_chord_vector[int(change[-1]) // 2 - 1])) else: # No removed pitch removed_pitches.append("") return {removed: added for removed, added in zip(removed_pitches, added_pitches)}
1,247
def get_n_random_points_in_region(region_mesh, N, s=None): """ Gets N random points inside (or on the surface) of a mes """ region_bounds = region_mesh.bounds() if s is None: s = int(N * 2) X = np.random.randint(region_bounds[0], region_bounds[1], size=s) Y = np.random.randint(region_bounds[2], region_bounds[3], size=s) Z = np.random.randint(region_bounds[4], region_bounds[5], size=s) pts = [[x, y, z] for x, y, z in zip(X, Y, Z)] ipts = region_mesh.insidePoints(pts).points() if N <= ipts.shape[0]: return ipts[np.random.choice(ipts.shape[0], N, replace=False), :] else: return get_n_random_points_in_region(region_mesh, N, s=int(N * 4))
1,248
def test_get_issues_for_org_merges_issues_pull_requests(github, requests_mock): """ The '_get_issues_for_org' helper should merge both issues with pull requests and treat them both as issues """ repo1 = fixtures.repository( issues=[fixtures.issue(), fixtures.issue(), fixtures.issue()], pull_requests=[fixtures.issue(pr=True)], private=False ) repo2 = fixtures.repository( issues=[fixtures.issue(), fixtures.issue()], pull_requests=[fixtures.issue(pr=True), fixtures.issue(pr=True)], private=False ) expected_titles_are_pr = {} for repo in [repo1, repo2]: for issue in repo['issues']['nodes']: expected_titles_are_pr[issue['title']] = False for pull_request in repo['pullRequests']['nodes']: expected_titles_are_pr[pull_request['title']] = True api_response_body = fixtures.api_response_body(repos=[repo1, repo2]) requests_mock.add_github_graphql(json=api_response_body) issues = github._get_issues_for_org(requests.Session(), 'org') titles_are_pr = {i['title']: i['is_pull_request'] for i in issues} assert titles_are_pr == expected_titles_are_pr
1,249
def create_computer_query(tx, computer, query, value, rel): """Creates the queries for a computer. Arguments: tx {neo4j.Session} -- Neo4j session computer {dict} -- Single computer object. query {str} -- Query to use. value {[type]} -- Value to read from: LocalAdmins, DcomUsers or RemoteDesktopUsers rel {[type]} -- Value to set: AdminTo, ExecuteDCOM or CanRDP """ for entry in computer[value]: aType = entry['Type'] aName = entry['Name'] statement = query.format(aType, rel) props = {'name': computer['Name'], 'target': aName} tx.run(statement, props=props)
1,250
def make_octad_tables(basis): """Return tables for encoding an decoding octads. Octads are numbered in the lexicographical order as given by the numbers of the corresponding Golay codewords in 'gcode' representation. The function returns a triple (oct_enc_table, oct_dec_table, oct_enc_offset) Given octad o, the correspondig Golay code word in gcode representation is oct_dec_table(o), for 0 <= 0 < 759. Given an octad (or a complement of an octad) v in 'gcode' representation, the number of the corresponding octad is: (oct_enc_table[v1] >> 1) + 3 * (v1 >> 3) - oct_enc_offset , where v1 = v & 0x7ff. The vector v is a (possibly complemeted) octad if the following condition holds: oct_enc_table[v1] < 255 . It is not a complemented octad if in addition we have: (v >> 12) & oct_enc_table[v1] & 1 == 0 """ codewords = lin_table(basis[:11]) oct_dec_table = numpy.zeros(759, dtype = uint16) octad = 0 d = {} w = {} for gcode, vector in enumerate(codewords[:2048]): weight = bw24(vector) if weight in [8, 16]: oct_dec_table[octad] = gcode + ((weight & 16) << 7) d[gcode] = octad - 3 * (gcode >> 3) w[gcode] = weight >> 4 octad += 1 assert octad == 759 d_min, d_max = min(d.values()), max(d.values()) assert d_min <= 0 assert d_max - d_min < 127 oct_enc_table = numpy.full(2048, 255, dtype = uint8) for gcode, dict_value in d.items(): new_value = dict_value - d_min oct_enc_table[gcode] = w[gcode] + 2 * new_value return oct_enc_table, oct_dec_table, -d_min
1,251
def fetch_profile(request): """ attaches the user.profile object into the request object""" context = {} if request.user.is_authenticated(): profile_module_name = get_my_profile_module_name() profile = getattr(request, profile_module_name, None) if profile != None: context[profile_module_name] = profile return context
1,252
def storeLabledImagesInFile(): """Consolidates the images in the emotion directories and stores them in data.npy and lables.npy file. Does virtual sampling for classes which do not have sufficient samples""" data = [] labels = [] if os.path.exists(OUTPUT_DIRECTORY): images = [] noOfImages = [] for dir in EMOTION_DIRECTORIES: if os.path.exists(OUTPUT_DIRECTORY + dir): images.append(os.listdir(OUTPUT_DIRECTORY + dir)) noOfImages.append(len(images[-1])) targetCount = max(noOfImages) for i in range(0, len(EMOTION_DIRECTORIES)): if os.path.exists(OUTPUT_DIRECTORY + EMOTION_DIRECTORIES[i]): mask = np.zeros((100, 100)) for j in range(0, targetCount): if (j != 0 and j % noOfImages[i] == 0): mask = np.random.randint(0, 3, (100, 100)) face = cv2.imread(OUTPUT_DIRECTORY + EMOTION_DIRECTORIES[i] + "/" + images[i][j % noOfImages[i]])[:, :, 1] face = face + mask face[np.where(face >= 256)] = 255 data.append(face) labels.append(i) np.save(OUTPUT_DIRECTORY + "/data", np.array(data)) np.save(OUTPUT_DIRECTORY + "/labels", np.array(labels)) else: print("Invalid path " + OUTPUT_DIRECTORY) return False
1,253
def test_device_put_method_empty_model_name(flask_app, db): # pylint: disable=unused-argument """ To verify that registration device method is working properly and response is correct""" registration = create_registration(REG_REQ_DATA, uuid.uuid4()) registration.update_status('Pending Review') headers = {'Content-Type': 'multipart/form-data'} request_data = copy.deepcopy(REQUEST_DATA) request_data['reg_id'] = registration.id request_data['model_name'] = '' rv = flask_app.put(DEVICE_REGISTRATION_API, data=request_data, headers=headers) data = json.loads(rv.data.decode('utf-8')) assert rv.status_code == 422 assert 'model_name' in data assert data['model_name'][0] == 'model name value should be between 1 and 1000'
1,254
def to_local_op(input): """Returns the local tensor of a consistent tensor. Args: input (Tensor): the input tensor. For example: .. code-block:: python >>> import oneflow as flow >>> import numpy as np >>> np_arr = np.array([0.5, 0.6, 0.7]).astype(np.float32) >>> input = flow.Tensor(np_arr) >>> placement = flow.placement("cpu", {0:range(1)}) >>> consistent_tensor = input.to_consistent(placement, [flow.sbp.split(0)]) >>> consistent_tensor.to_local() tensor([0.5, 0.6, 0.7], dtype=oneflow.float32) """ assert input.is_consistent, "input must be a consistent tensor!" return flow.F.to_local(input)
1,255
def col_round(x): """ As Python 3 rounds 0.5 fraction to closest even, floor and cell round methods used here to round 0.5 up to next digit and 0.4 down back to previos. """ frac = x - math.floor(x) if frac < 0.5: return math.floor(x) return math.ceil(x)
1,256
def _create_notebook(client, headers: dict[str, str]) -> str: """Create a notebook. :param client: Test API client. :param headers: Headers with the access token. :return: Notebook ID. """ n = {"name": "Test Notebook"} r = client.post("/notebooks/notebook", headers=headers, json=n) return r.json["result"]["id"]
1,257
def iterate_steps(n): """Calculate derivatives for different parameter classes, and plot""" for vary in ['bary', 'halo', 'progenitor']: print(n, vary) step_convergence(n, Nstep=10, vary=vary) choose_step(n, Nstep=10, vary=vary)
1,258
def createTables(): """ Populate the array with names of sql DDL files """ for sqlFileName in ["Address.sql", "Electricity.sql", "CodeViolationsReport.sql", "FireRescueEMSResponse.sql", "NaturalGasReport.sql", "WaterReport.sql"]: try: runSqlFile("create/" + sqlFileName) print "Created table '{}'".format(sqlFileName.split(".sql")[0]) except Exception as e: pass
1,259
def plot_range_range_rate(x_sat_orbdyn_stm:np.ndarray, x_obs_multiple:np.ndarray, t_sec: np.array): """ Plots range and range relative to the station Args: x_sat_orbdyn_stm (np.ndarray): satellite trajectory array. x_obs_multiple (np.ndarray): observer positions. t_sec (np.ndarray): array of timesteps. """ if len(x_obs_multiple.shape) == 2: x_obs_multiple = np.expand_dims(x_obs_multiple) fig = plt.figure(figsize=(14,14)) n_obs = x_obs_multiple.shape[2] for i in range(n_obs): r, rr = range_range_rate(x_sat_orbdyn_stm, x_obs_multiple[:,:,i]) ax1 = fig.add_subplot(n_obs, 2, i*2+1) ax1.plot(t_sec, r) ax1.set_xlabel('Time (s)') ax1.set_ylabel('Range (m)') ax1.grid(':') ax1.title.set_text('Station 1 - Range') ax2 = fig.add_subplot(n_obs, 2, i*2+2) ax2.plot(t_sec, rr) ax2.set_xlabel('Time (s)') ax2.set_ylabel('Range rate (m/s)') ax2.grid(':') ax2.title.set_text('Station 1 - Range Rate') fig.subplots_adjust(hspace=0.3) return fig
1,260
def parse_autostep(rule): """ Parse the autostep line """ parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument("--autoscreenshot", dest="autoscreenshot", action="store") args = clean_args(vars(parser.parse_args(rules))) parser = None return args
1,261
def AUTO_TECHSUPPORT_GLOBAL_state(db, state): """ Knob to make techsupport invocation event-driven based on core-dump generation """ table = "AUTO_TECHSUPPORT" key = "GLOBAL" data = { "state": state, } try: update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) except Exception as err: exit_with_error(f"Error: {err}", fg="red")
1,262
def _make_tag(tagname: str, content: str = "", **attrs) -> str: """Creates a tag.""" tag = f"<{tagname} " for key, value in attrs.items(): if key == "raw": tag += " " + value continue if key == "cls": key = "class" if isinstance(value, float): value = round(value, 2) tag += f"{_slugify(key)}='{value}' " tag += f">{content}</{tagname}>" return tag
1,263
def download(url: str, dest: Optional[str] = None, extract: bool=True, ignore_if_exists: bool = False, compression: Optional[str] = None): """ Download a file from the internet. Args: url: the url to download dest: destination file if extract=False, or destionation dir if extract=True. If None, it will be the last part of URL. extract: extract a tar.gz or zip file? ignore_if_exists: don't do anything if file exists Returns: the destination filename. """ base_url = url.split("?")[0] if dest is None: dest = [f for f in base_url.split("/") if f][-1] if os.path.exists(dest) and ignore_if_exists: return dest stream = UrlStream(url) extension = base_url.split(".")[-1].lower() if extract and extension in ['gz', 'bz2', 'zip', 'tgz', 'tar']: os.makedirs(dest, exist_ok=True) if extension == "gz" and not base_url.endswith(".tar.gz"): decompressed_file = gzip.GzipFile(fileobj=stream) with open(os.path.join(dest, url.split("/")[-1][:-3]), 'wb') as f: while True: d = decompressed_file.read(1024 * 1024) if not d: break f.write(d) else: if extension in ['gz', 'bz2', "tgz", "tar"]: decompressed_file = tarfile.open(fileobj=stream, mode='r|' + (compression or ( "gz" if extension == "tgz" else extension))) elif extension == 'zip': decompressed_file = zipfile.ZipFile(stream, mode='r') else: assert False, "Invalid extension: %s" % extension decompressed_file.extractall(dest) else: try: with open(dest, 'wb') as f: for d in stream.iter_content(1024 * 1024): f.write(d) except: os.remove(dest) raise return dest
1,264
def action_functions(action_id: str): """Determines which function needs to be run.""" action_mappings = { NewIncidentSubmission.form_slack_view: [report_incident_from_submitted_form], UpdateParticipantCallbacks.submit_form: [update_participant_from_submitted_form], UpdateParticipantCallbacks.update_view: [update_update_participant_modal], } # this allows for unique action blocks e.g. invite-user or invite-user-1, etc for key in action_mappings.keys(): if key in action_id: return action_mappings[key] return []
1,265
def get_default_sender(): """ Determines the sender / to address for outgoing emails. """ try: return os.environ["EMAIL"] except KeyError: pass else: # Guess. # Not sure if euid is the right one to use here. user = pwd.getpwuid(os.geteuid()).pw_name host = socket.getfqdn() return f"{user}@{host}"
1,266
def setup_function(function): """ Make sure there are no adapters defined before start of test """ clear_adapters() # Setup a basic int adapter for all tests @adapter((str, float, int), (int, str)) def to_int(obj, to_cls): return to_cls(obj)
1,267
def get_best_response_actions_as_string(best_response_actions): """Turns a dict<bytes, int> into a bytestring compatible with C++. i.e. the bytestring can be copy-pasted as the brace initialization for a {std::unordered_,std::,absl::flat_hash_}map<std::string, int>. Args: best_response_actions: A dict mapping bytes to ints. Returns: A bytestring that can be copy-pasted to brace-initialize a C++ std::map<std::string, T>. """ best_response_keys = sorted(best_response_actions.keys()) best_response_strings = [ "%s: %i" % (k, best_response_actions[k]) for k in best_response_keys ] return "{%s}" % (", ".join(best_response_strings))
1,268
def rank_velocity_genes(adata, vkey="velocity_S", prefix_store="rank", **kwargs): """Rank genes based on their raw and absolute velocities for each cell group. Parameters ---------- adata: :class:`~anndata.AnnData` AnnData object that contains the gene-wise velocities. vkey: str (default: 'velocity_S') The velocity key. prefix_store: str (default: 'rank') The prefix added to the key for storing the returned in adata. kwargs: Keyword arguments passed to `vf.rank_genes`. Returns ------- adata: :class:`~anndata.AnnData` AnnData object which has the rank dictionary for velocities in `.uns`. """ rdict = rank_genes(adata, vkey, **kwargs) rdict_abs = rank_genes(adata, vkey, abs=True, **kwargs) adata.uns[prefix_store + "_" + vkey] = rdict adata.uns[prefix_store + "_abs_" + vkey] = rdict_abs return adata
1,269
def build_genome(tree, genome): """ Goes through a tree and builds a genome from all codons in the subtree. :param tree: An individual's derivation tree. :param genome: The list of all codons in a subtree. :return: The fully built genome of a subtree. """ if tree.codon: # If the current node has a codon, append it to the genome. genome.append(tree.codon) for child in tree.children: # Recurse on all children. genome = child.build_genome(genome) return genome
1,270
def fixture_yaml_formatting_fixtures(fixture_filename: str) -> Tuple[str, str, str]: """Get the contents for the formatting fixture files. To regenerate these fixtures, please run ``test/fixtures/test_regenerate_formatting_fixtures.py``. Ideally, prettier should not have to change any ``formatting-after`` fixtures. """ before_path = formatting_before_fixtures_dir / fixture_filename prettier_path = formatting_prettier_fixtures_dir / fixture_filename after_path = formatting_after_fixtures_dir / fixture_filename before_content = before_path.read_text() prettier_content = prettier_path.read_text() formatted_content = after_path.read_text() return before_content, prettier_content, formatted_content
1,271
def test_partial_trace_4_by_4(): """Test for 4-by-4 matrix.""" test_input_mat = np.arange(1, 17).reshape(4, 4) pt_1 = partial_trace(test_input_mat, [1], [2, 2]) pt_2 = partial_trace(test_input_mat, [2], [2, 2]) expected_pt_1 = np.array([[12, 14], [20, 22]]) bool_mat = np.isclose(expected_pt_1, pt_1) np.testing.assert_equal(np.all(bool_mat), True) expected_pt_2 = np.array([[7, 11], [23, 27]]) bool_mat = np.isclose(expected_pt_2, pt_2) np.testing.assert_equal(np.all(bool_mat), True)
1,272
def get_base_parser(*args, **kwargs): """ Main parser """ parser=argparse.ArgumentParser(*args, **kwargs) # formatter_class=argparse.ArgumentDefaultsHelpFormatter, # ) # parser.add_argument('--help_all', '--help_model', '--help_dataset', '--help_strategy', '--help_task', '--help_ptracker', # action=PrintHelpAction, nargs=0, # help="Print help for given model, dataset, task, strategy args") parser.add_argument('--task', type=str, default='fsl', choices=TASKS.keys(), help='Task name') parser.add_argument('--dataset', type=str, default='mini', choices=DATASETS.keys(), help='Dataset name') parser.add_argument('--model', type=str, default='protonet', choices=MODELS.keys(), help='FSL method name') parser.add_argument('--backbone', type=str, default='Conv4', choices=BACKBONES.keys(), help='Backbone neural network name') parser.add_argument('--strategy', type=str, default=None, choices=STRATEGIES.keys(), help='Imbalance strategy. If None, no imbalance strategy is used') parser.add_argument('--gpu', default='0', help='gpu number or "cpu"') parser.add_argument('--seed', type=int, default=0) parser.add_argument('--deterministic', type=str2bool, nargs='?', const=True, default=True, help='If true, the tasks are generated deteministically based on the given seed') parser.add_argument('--results_folder', type=str, default="../experiments/dummy/", # default="../../experiments/", help='parent folder where all experiments are saved') parser.add_argument('--experiment_name', type=str, default="default") parser.add_argument('--experiment_folder', type=str, default=None, help='experiment folder used to save checkpoints and results') parser.add_argument('--clean_folder', type=str2bool, nargs='?', const=True, default=False, help='Clears the experiment folder if it exisits') parser.add_argument('--storage_friendly', type=str2bool, nargs='?', const=True, default=True, help='Deletes previously saved models systematically, only keeps best, latest model') parser.add_argument('--data_path', type=str, default="data/", help='Data folder with datasets in named subdirectories.') parser.add_argument('--continue_from', type=str, default=None, help="Continue from a checkpoint file, epoch, or 'latest', 'best', or 'from_scratch'/None.") parser.add_argument('--load_backbone_only', type=str2bool, nargs='?', const=True, default=False, help="Loads the backbone only from 'continue_from'") parser.add_argument('--dummy_run', type=str2bool, nargs='?', const=True, default=False, help='A dry run of the settings with a 1 epoch and validation, a reduced number of tasks, no saving') parser.add_argument('--conventional_split', type=str2bool, nargs='?', const=True, default=None, help='Joins classes in meta-training and meta-validation datests. ' 'Then conventional 80%%-20%% split for train-val datasets. ' 'If None, will be split automatically based on model.') parser.add_argument('--conventional_split_from_train_only', type=str2bool, nargs='?', const=True, default=False, help='Performs conventional 80%%-20%% data split from the train dataset only,' ' without joining with the validation split. Working only when meta-dataset reduced, see' ' data.dataset_utils.prep_datasets() for details.') parser.add_argument('--backbone_channel_dim', type=int, default=64, help='Number of channels of the backbone model.') parser.add_argument('--tqdm', type=str2bool, nargs='?', const=True, default=False, help="Enable/Disable tqdm, especially useful when running experiment and redirecting to files") group = parser.add_argument_group('TASK SAMPLING OPTIONS') group.add_argument('--num_epochs', type=int, default=100, help="If none, then will stop training after achieving a stopping criterion, see ExperimentBuilder") group.add_argument('--num_tasks_per_epoch', type=int, default=500) group.add_argument('--num_tasks_per_validation', type=int, default=200, help="Number of tasks to evaluate on after every epoch.") group.add_argument('--num_tasks_per_testing', type=int, default=600, help="Number of tasks to evaluate on after meta-training.") group.add_argument('--evaluate_on_test_set_only', '--test', type=str2bool, nargs='?', const=True, default=False, help="If present, no (further) training is performed and only the test dataset is evaluated.") group.add_argument('--val_or_test', type=str, choices=["test","val"], default="val", help="Dataset to perform validation on. Default val") group.add_argument('--no_val_loop', type=str2bool, nargs='?', const=True, default=False, help="No validation loop. Default=False, meaning assume there is a validation loop.") group.add_argument('--test_performance_tag', type=str, default="test", help='The tag name for the performance file evaluated on test set, eg "test" in epoch-###_test.json') group = parser.add_argument_group('VISUALISATION OPTIONS') group.add_argument('--fix_class_distribution', type=str2bool, nargs='?', const=True, default=False, help='If present, will fix the class distribution such that the model will be evaluated and tested ' 'on the same set of classes between tasks.') group.add_argument('--count_samples_stats', type=str2bool, nargs='?', const=True, default=False, help='If true, counts the images and stores the distribution stats of images shown during the run') return parser
1,273
def MakeBuildDirectory(context=None): """Prepares the build and work directories.""" if context is None: raise ValueError("context can't be None") build_dir = config.CONFIG.Get("PyInstaller.build_dir", context=context) work_path = config.CONFIG.Get("PyInstaller.workpath_dir", context=context) CleanDirectory(build_dir) CleanDirectory(work_path)
1,274
def conv_bboxinfo_bboxXYHW_to_centerscale(bbox_xyhw, bLooseBox = False): """ from (bbox_xyhw) -> (center, scale) Args: bbox_xyhw: [minX,minY,W,H] bLooseBox: if true, draw less tight box with sufficient margin (SPIN's default setting) Output: center: bbox center scale: scaling images before cropping. reference size is 200 pix (why??). >1.0 means size up, <1.0 means size down. See get_transform() h = 200 * scale t = np.zeros((3, 3)) t[0, 0] = float(res[1]) / h t[1, 1] = float(res[0]) / h t[0, 2] = res[1] * (-float(center[0]) / h + .5) t[1, 2] = res[0] * (-float(center[1]) / h + .5) t[2, 2] = 1 """ center = [bbox_xyhw[0] + bbox_xyhw[2]/2, bbox_xyhw[1] + bbox_xyhw[3]/2] if bLooseBox: scaleFactor =1.2 scale = scaleFactor*max(bbox_xyhw[2], bbox_xyhw[3])/200 #This is the one used in SPIN's pre-processing. See preprocessdb/coco.py else: scale = max(bbox_xyhw[2], bbox_xyhw[3])/200 return center, scale
1,275
def add_video(db: Session, video: schemas.Video): """ Adds video to table video_library model used is Video. Attributes: - video_user_id: int, non-nullable, foreign_key - video_link: string, non-nullable, unique - video_name: string, non-nullable, - video_height: int, non-nullable, - video_width: int, non-nullable, - file_format: string, non-nullable, - ts_upload: datetime, non-nullable, - categories: string, nullable, - description: string, nullable, - length: int, non-nullable, - views: int, non-nullable - no_likes: int, non-nullable, - no_dislikes: int, non-nullable, Args: - db: Session - video: schemas.Video Returns: - db_video object """ db_video = models.Video( video_username=video.video_username, video_link=video.video_link, video_name=video.video_name, video_height=video.video_height, video_width=video.video_width, file_format=video.file_format, ts_upload=get_timestamp_now(), categories=video.categories, description=video.description, length=video.length, views=video.views, no_likes=video.no_likes, no_dislikes=video.no_dislikes, ) # add to database db.add(db_video) db.commit() db.refresh(db_video) return db_video
1,276
def compute_principal_axes(xyz_centered, weights=None, twodim=True): """ :param xyz_centered: [list_of_xs, lst_of_ys, list_of_zs] :param weights: weights of each pixel :param twodim: whether to compute two main axes in xy plane, or three axes in 3D image. :return: ax1, ax2, (ax3 if not twodim else None) """ if twodim: xyz_centered = xyz_centered[:2] cov = np.cov(xyz_centered, aweights=weights)#covariance between the variables x,y,z. pixels are the observations evals, evecs = np.linalg.eig(cov)#MB: it seems to be for finding the main axis of the worm # sort eigenvalues in decreasing order sort_indices = np.argsort(evals)[::-1] ax1 = evecs[:, sort_indices[0]] ax2 = evecs[:, sort_indices[1]] if twodim: ax3 = None else: ax3 = evecs[:, sort_indices[2]] return ax1, ax2, ax3
1,277
def stream_http_get(S, dest): """Get contents of http://dest/ via HTTP/1.0 and samclasses.StreamSession S.""" C = S.connect(dest) C.send('GET / HTTP/1.0\r\n\r\n') while True: line = stream_readline(C).strip() if line.find('Content-Length: ') == 0: clen = int(line.split()[1]) if line == '': break s = C.recv(clen, timeout=None) time.sleep(2.0) C.close() return s
1,278
def get_definition(division_id, path=None): """ Returns the expected contents of a definition file. """ config = {} division = Division.get(division_id, from_csv=ocd_division_csv) # Determine slug, domain and authority. name = division.name if not name: print('%-60s unknown name: check slug and domain manually' % division.id) if division._type == 'country': slug = 'Federal electoral districts' config['domain'] = name config['authority'] = ['Her Majesty the Queen in Right of Canada'] elif division._type in ('province', 'territory'): slug = '%s electoral districts' % name config['domain'] = name config['authority'] = ['Her Majesty the Queen in Right of %s' % name] elif division._type in ('cd', 'csd'): province_or_territory_sgc_code = type_id(division.id)[:2] if province_or_territory_sgc_code == '24' and division.id in divisions_with_boroughs(): slug = re.compile(r'\A%s (boroughs|districts)\Z' % name) elif province_or_territory_sgc_code == '12' and division.attrs['classification'] != 'T': slug = '%s districts' % name elif province_or_territory_sgc_code == '47' and division.attrs['classification'] != 'CY': slug = '%s divisions' % name elif province_or_territory_sgc_code == '48' and division.attrs['classification'] == 'MD': slug = '%s divisions' % name elif province_or_territory_sgc_code == '24': if division.id in quartiers: slug = '%s quartiers' % name else: slug = '%s districts' % name else: slug = '%s wards' % name config['domain'] = '%s, %s' % (name, province_or_territory_abbreviation(division.id)) if province_or_territory_sgc_code == '12' and 'boundaries/ca_ns_districts/' in path: config['authority'] = ['Her Majesty the Queen in Right of Nova Scotia'] elif province_or_territory_sgc_code == '13' and 'boundaries/ca_nb_wards/' in path: config['authority'] = ['Her Majesty the Queen in Right of New Brunswick'] elif province_or_territory_sgc_code == '24' and 'boundaries/ca_qc_' in path: config['authority'] = ['Directeur général des élections du Québec'] elif province_or_territory_sgc_code == '47' and division.attrs['classification'] != 'CY': config['authority'] = ['MuniSoft'] elif division._type == 'csd': config['authority'] = authorities + [division.attrs['organization_name']] else: config['authority'] = [''] # We have no expectation for the authority of a Census division elif division._type == 'borough': province_or_territory_sgc_code = type_id(division.parent.id)[:2] if name: slug = '%s districts' % name config['domain'] = '%s, %s, %s' % (name, division.parent.name, province_or_territory_abbreviation(division.parent.id)) else: slug = None config['domain'] = None if province_or_territory_sgc_code == '24': config['authority'] = ['Directeur général des élections du Québec'] else: config['authority'] = [division.parent.attrs['organization_name']] else: raise Exception('%s: Unrecognized OCD type %s' % (division.id, division._type)) return (slug, config)
1,279
def api_last_blog_update(request): """Return the date of the last blog update. This is a PRIVATE API. Format: __lastblogupdate.json JSON return: {'lastupdate': '2019-01-31'} or if none available: {'lastupdate': None} """ api_code = enter_api_call('api_last_blog_update', request) if not request or request.GET is None: ret = Http404(HTTP404_NO_REQUEST('/__lastblogupdate.json')) exit_api_call(api_code, ret) raise ret lastupdate = None try: with open(settings.OPUS_LAST_BLOG_UPDATE_FILE, 'r') as fp: lastupdate = fp.read().strip() except: try: log.error('api_last_blog_update: Failed to read file "%s"', settings.OPUS_LAST_BLOG_UPDATE_FILE) except: log.error('api_last_blog_update: Failed to read file UNKNOWN') ret = json_response({'lastupdate': lastupdate}) exit_api_call(api_code, ret) return ret
1,280
def main(): """ 测试当前类的方法的主方法入口 :return: """ # DataProcessing("../data_example/test.tsv","../data_example/test.tfrecords").test_txt2tfrecords() # DataProcessing("../data_example/train.tsv","../data_example/train.tfrecords").train_txt2tfrecords() # DataProcessing("../feature_data/tsz_submission_12.tfrecords","../submission/sub.csv").parase_tfrecords_to_dataFrame(1500) # data = DataProcessing("../data_example/train.tfrecords",False).load_train_data(2000) # data = DataProcessing("../data_example/test.tfrecords",False).load_test_data(1500) # print(data.head())
1,281
def pearson(arr1, arr2): """ calculate pearson correlation between two numpy arrays. :param arr1: one array, the feature is a column. the shape is `m * n` :param arr2: the other array, the feature is a column. the shape is `m * k` :return: a pearson score np.array , the shape is `k * n` """ assert arr1.shape[0] == arr2.shape[0] n = arr1.shape[0] sums = np.multiply.outer(arr2.sum(0), arr1.sum(0)) stds = np.multiply.outer(arr2.std(0), arr1.std(0)) return (arr2.T.dot(arr1) - sums / n) / stds / n
1,282
def check_input(image): """Check that the provided image consists of a single connected domain of pixels. """ # Check that the input image has no floating pixels. labeled_array, num_features = label(image.astype(int)+1) assert num_features==1, "The input image must contain a single solid domain of connected pixels but it appears to have floating pixels" # # Check that the input image has no holes. s = np.sum( np.abs(image.astype(int)[1:,:]-image.astype(int)[0:-1,:]), axis=0 ) assert np.alltrue( s <= 2 ), "The input image must contain a single solid domain of connected pixels but it appears to have holes" #
1,283
def exact_match_filter(query_set, field, values): """Check if a field exactly matches a value.""" return field_filter(lambda x, y: Q(**{x: y}), query_set, field, values)
1,284
def clean_savepoints(using=None): """ Resets the counter used to generate unique savepoint ids in this thread. """ get_connection(using).clean_savepoints()
1,285
def get_move_descriptions(get_moved_ids, initial_state, current_state, obj_stuff, sort_attributes, obj_attributes): """ Get all 'move' descriptions from the current state (if any). Parameters ---------- get_moved_ids: function Function that extracts the id of objects that are being moved. initial_state: nd.array Initial state of the environment. current_state: nd.array Current state of the environment. obj_stuff: list of objects and their sizes List of initial objects {type, color, category} and their sizes. sort_attributes: function Function that separates adjective and name attributes. obj_attributes: list of list List of the list of object attributes for each object. Returns ------- descr: list of str List of 'move' descriptions satisfied by the current state. """ obj_moved = get_moved_ids(initial_state, current_state) verb = 'Move' move_descriptions = [] for i_obj in obj_moved: att = obj_attributes[i_obj] adj_att, name_att = sort_attributes(att) for adj in adj_att: quantifier = 'any' for name in name_att: move_descriptions.append('{} {} {}'.format(verb, adj, name)) move_descriptions.append('{} {} {} object'.format(verb, quantifier, adj)) for name in name_att: move_descriptions.append('{} any {}'.format(verb, name)) return move_descriptions.copy()
1,286
def reliability_diagram(labels, probs, class_conditional=False, y_axis='accuracy', img=False): """Reliability Diagram plotting confidence against accuracy. Note that this reliability diagram is created by looking at the calibration of the set of datapoints that surround each datapoint, not through mutually exclusive bins. Args: labels: label vector. probs: probability matrix out of a softmax. class_conditional: whether to visualize every class independently, or conflate classes. y_axis: takes 'accuracy or 'error'. Set y_axis to 'error' to graph the calibration error (confidence - accuracy) against the accuracy instead. img: return as image rather than as a plot. Returns: fig: matplotlib.pyplot figure. """ probs = np.array(probs) labels = np.array(labels) probs, _ = verify_probability_shapes(probs) labels_matrix = one_hot_encode(labels, probs.shape[1]) if class_conditional: for class_index in range(probs.shape[1]): if img: return to_image(plot_diagram( probs[:, class_index], labels_matrix[:, class_index], y_axis)) else: return plot_diagram( probs[:, class_index], labels_matrix[:, class_index], y_axis) else: if img: return to_image( plot_diagram(probs.flatten(), labels_matrix.flatten(), y_axis)) else: return plot_diagram(probs.flatten(), labels_matrix.flatten(), y_axis)
1,287
def GuessSlugFromPath(path): """Returns the slug.""" if path.endswith('index.md'): # If it ends with index, get the second last path component. return path.split('/')[-2] else: # Otherwise, just get the filename. return path.split('/')[-1].split('.')[0]
1,288
def get_file(fname, origin, cache_subdir='datasets', file_hash=None): """Downloads a file from a URL if not already in the cache. ref: https://github.com/keras-team/keras/blob/7a39b6c62d43c25472b2c2476bd2a8983ae4f682/keras/utils/data_utils.py#L123 By default the file at the url `origin` is downloaded to the CACHE_DIR `~/.kerasy`, placed in the cache_subdir `datasets`, and given the filename `fname`. The final location of a file `example.txt` would therefore be `~/.kerasy/datasets/example.txt`. You have to make a directory `~/.kerasy` and `~./kerasy/datasets`, and check whether you can access these directories using `os.access("DIRECOTRY", os.W_OK)` If this method returns False, you have to change the ownership of them like ``` $ sudo chown iwasakioshuto: ~/.kerasy $ sudo chown iwasakioshuto: ~/.kerasy/datasets ``` """ # /Users/<username>/.kerasy/`cache_subdir` DATADIR = os.path.join(DATADIR_BASE, cache_subdir) if not os.path.exists(DATADIR): os.makedirs(DATADIR) fpath = os.path.join(DATADIR, fname) if not os.path.exists(fpath): print('Downloading data from', origin) error_msg = 'URL fetch failure on {} : {} -- {}' try: try: request.urlretrieve(origin, fpath) except HTTPError as e: raise Exception(error_msg.format(origin, e.code, e.msg)) except URLError as e: raise Exception(error_msg.format(origin, e.errno, e.reason)) except (Exception, KeyboardInterrupt): if os.path.exists(fpath): os.remove(fpath) raise return fpath
1,289
def simplified_fit(train_loader, val_loader, model, loss_fn, optimizer, n_epochs, is_cuda_available, metrics=[], start_epoch=0, scheduler = None, log_interval=1): """ TODO """ train_list = [] valid_list = [] log_interval = len(train_loader)//2 if scheduler != None: for epoch in range(0, start_epoch): scheduler.step() for epoch in range(start_epoch, n_epochs): if scheduler != None: scheduler.step() # Train stage train_loss, _metrics = train_epoch(train_loader, model, loss_fn, optimizer, is_cuda_available, log_interval, metrics) message = 'Epoch: {}/{}. Train set: Average loss: {:.4f}'.format(epoch + 1, n_epochs, train_loss) for metric in _metrics: message += '\t{}: {}'.format(metric.name(), metric.value()) train_list.append(metric.value()) # Validation stage if val_loader != None: val_loss, _metrics = test_epoch(val_loader, model, loss_fn, is_cuda_available, metrics) val_loss /= len(val_loader) message += '\nEpoch: {}/{}. Validation set: Avg loss: {:.4f}'.format(epoch + 1, n_epochs,val_loss) for metric in _metrics: message += '\t{}: {}'.format(metric.name(), metric.value()) valid_list.append(metric.value()) print(message) return (train_list, valid_list)
1,290
def _onenorm_matrix_power_nnm(A, p): """ Compute the 1-norm of a non-negative integer power of a non-negative matrix. Parameters ---------- A : a square ndarray or matrix or sparse matrix Input matrix with non-negative entries. p : non-negative integer The power to which the matrix is to be raised. Returns ------- out : float The 1-norm of the matrix power p of A. """ # check input if int(p) != p or p < 0: raise ValueError('expected non-negative integer p') p = int(p) if len(A.shape) != 2 or A.shape[0] != A.shape[1]: raise ValueError('expected A to be like a square matrix') # Explicitly make a column vector so that this works when A is a # numpy matrix (in addition to ndarray and sparse matrix). v = torch.ones((A.shape[0], 1), dtype=A.dtype, device=A.device) M = A.t() for _ in range(p): v = M.mm(v) return torch.max(v).item()
1,291
def _stiff_terms_null(states, *args, **kwargs): """Dummy function""" return states
1,292
def add_dataset(dset_fp, dset_fromroot, list_ids, up3d_fp, # pylint: disable=too-many-locals, too-many-arguments, too-many-statements, too-many-branches train_list_f, val_list_f, train_val_list_f, test_list_f, scale_f, train_spec, val_spec, test_spec, target_person_size, landmarks, train_crop, test_crop, running_idx, only_missing=False, with_rlswap=True, write_gtjoints_as_lm=False, human_annotations=False): """Add a dataset to the collection.""" test_ids = [int(id_[1:6]) for id_ in test_spec] train_ids = [int(id_[1:6]) for id_ in train_spec] val_ids = [int(id_[1:6]) for id_ in val_spec] LOGGER.info("Split: %d train, %d val, %d test.", len(train_ids), len(val_ids), len(test_ids)) LOGGER.info("Writing dataset...") for im_idx in tqdm.tqdm(train_ids + val_ids + test_ids): image = scipy.misc.imread(path.join(up3d_fp, '%05d_image.png' % (im_idx))) with open(path.join(up3d_fp, '%05d_fit_crop_info.txt' % (im_idx)), 'r') as inf: cropinfo = [int(val) for val in inf.readline().strip().split()] assert image.ndim == 3 out_exists = (path.exists(path.join(dset_fp, '%05d_image.png' % (running_idx))) and path.exists(path.join(dset_fp, '%05d_ann_vis.png' % (running_idx)))) if with_rlswap and im_idx not in test_ids: out_exists = out_exists and ( path.exists(path.join(dset_fp, '%05d_image.png' % (running_idx + 1))) and path.exists(path.join(dset_fp, '%05d_ann_vis.png' % (running_idx + 1)))) if not (only_missing and out_exists or write_gtjoints_as_lm): if human_annotations: landmark_pos = np.load(path.join(up3d_fp, '%05d_joints.npy' % (im_idx))) else: landmark_pos = get_landmark_positions(path.join(up3d_fp, '%05d_body.pkl' % (im_idx)), (cropinfo[1], cropinfo[0]), landmarks) fac_y = cropinfo[0] / float(cropinfo[3] - cropinfo[2]) fac_x = cropinfo[1] / float(cropinfo[5] - cropinfo[4]) landmark_pos[:2, :] /= np.mean([fac_x, fac_y]) landmark_pos[0, :] += cropinfo[4] landmark_pos[1, :] += cropinfo[2] joints = np.load(path.join(up3d_fp, '%05d_joints.npy' % (im_idx))) joints = np.vstack((joints, np.all(joints > 0, axis=0)[None, :])) person_size = robust_person_size(joints) norm_factor = float(target_person_size) / person_size joints[:2, :] *= norm_factor if not (only_missing and out_exists or write_gtjoints_as_lm): landmark_pos[:2, :] *= norm_factor if write_gtjoints_as_lm: landmark_pos = joints.copy() image = scipy.misc.imresize(image, norm_factor, interp='bilinear') if im_idx in test_ids: crop = test_crop else: crop = train_crop if image.shape[0] > crop or image.shape[1] > crop: LOGGER.debug("Image (original %d, here %d) too large (%s)! Cropping...", im_idx, running_idx, str(image.shape[:2])) person_center = np.mean(joints[:2, joints[2, :] == 1], axis=1) crop_y, crop_x = get_crop(image, person_center, crop) image = image[crop_y[0]:crop_y[1], crop_x[0]:crop_x[1], :] landmark_pos[0, :] -= crop_x[0] landmark_pos[1, :] -= crop_y[0] assert image.shape[0] == crop or image.shape[1] == crop, ( "Error cropping image (original %d, here %d)!" % (im_idx, running_idx)) assert image.shape[0] <= crop and image.shape[1] <= crop and image.shape[2] == 3, ( "Wrong image shape (original %d, here %d)!" % (im_idx, running_idx)) vis_im = vs.visualize_pose(image, landmark_pos, scale=1.) if not (only_missing and out_exists): scipy.misc.imsave(path.join(dset_fp, '%05d_image.png' % (running_idx)), image) scipy.misc.imsave(path.join(dset_fp, '%05d_ann_vis.png' % (running_idx)), vis_im) if with_rlswap and im_idx not in test_ids: if landmark_pos.shape[1] == 14: landmark_pos_swapped = landmark_pos[:, rlswap_lsp] else: landmark_pos_swapped = landmark_pos[:, rlswap_landmarks_91] landmark_pos_swapped[0, :] = image.shape[1] - landmark_pos_swapped[0, :] image_swapped = image[:, ::-1, :] # Use core visualization for 14 joints. vis_im_swapped = vs.visualize_pose(image_swapped, landmark_pos_swapped, scale=1) if not (only_missing and out_exists): scipy.misc.imsave(path.join(dset_fp, '%05d_image.png' % (running_idx + 1)), image_swapped) scipy.misc.imsave(path.join(dset_fp, '%05d_ann_vis.png' % (running_idx + 1)), vis_im_swapped) list_fs = [] list_id_ids = [] if im_idx in train_ids: list_fs.append(train_val_list_f) list_id_ids.append(2) list_fs.append(train_list_f) list_id_ids.append(0) elif im_idx in val_ids: list_fs.append(train_val_list_f) list_id_ids.append(2) list_fs.append(val_list_f) list_id_ids.append(1) elif im_idx in test_ids: list_fs.append(test_list_f) list_id_ids.append(3) for list_f, list_id_idx in zip(list_fs, list_id_ids): # pylint: disable=bad-continuation list_f.write( """# %d %s 3 %d %d %d """ % ( list_ids[list_id_idx], path.join('/' + dset_fromroot, '%05d_image.png' % (running_idx)), image.shape[0], image.shape[1], landmark_pos.shape[1])) for landmark_idx, landmark_point in enumerate(landmark_pos.T): list_f.write("%d %d %d\n" % (landmark_idx + 1, int(landmark_point[0]), int(landmark_point[1]))) list_f.flush() list_ids[list_id_idx] += 1 scale_f.write("%05d_image.png %f\n" % (running_idx, norm_factor)) scale_f.flush() running_idx += 1 if with_rlswap and im_idx not in test_ids: for list_f, list_id_idx in zip(list_fs, list_id_ids): # pylint: disable=bad-continuation list_f.write( """# %d %s 3 %d %d %d """ % ( list_ids[list_id_idx], path.join('/' + dset_fromroot, '%05d_image.png' % (running_idx)), image.shape[0], image.shape[1], landmark_pos.shape[1])) for landmark_idx, landmark_point in enumerate(landmark_pos_swapped.T): list_f.write("%d %d %d\n" % (landmark_idx + 1, int(landmark_point[0]), int(landmark_point[1]))) list_f.flush() list_ids[list_id_idx] += 1 scale_f.write("%05d_image.png %f\n" % (running_idx, norm_factor)) scale_f.flush() running_idx += 1 return running_idx
1,293
def _compute_non_batch_kl(mu_a, sigma_a, mu_b, sigma_b): """Non-batch KL for N(mu_a, sigma_a), N(mu_b, sigma_b).""" # Check using numpy operations # This mostly repeats the tensorflow code _kl_mvn_mvn(), but in numpy. # So it is important to also check that KL(mvn, mvn) = 0. sigma_b_inv = np.linalg.inv(sigma_b) t = np.trace(sigma_b_inv.dot(sigma_a)) q = (mu_b - mu_a).dot(sigma_b_inv).dot(mu_b - mu_a) k = mu_a.shape[0] l = np.log(np.linalg.det(sigma_b) / np.linalg.det(sigma_a)) return 0.5 * (t + q - k + l)
1,294
def wait_until(fn, timeout, period, message): """ :param fn: callable function :param timeout: :param period: :param message: :return: bool """ mustend = time() + timeout while time() < mustend: if fn(): return True sleep(period) raise TimeoutError(message)
1,295
def test_atomic_normalized_string_length_nistxml_sv_iv_atomic_normalized_string_length_1_4(mode, save_output, output_format): """ Type atomic/normalizedString is restricted by facet length with value 0. """ assert_bindings( schema="nistData/atomic/normalizedString/Schema+Instance/NISTSchema-SV-IV-atomic-normalizedString-length-1.xsd", instance="nistData/atomic/normalizedString/Schema+Instance/NISTXML-SV-IV-atomic-normalizedString-length-1-4.xml", class_name="NistschemaSvIvAtomicNormalizedStringLength1", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
1,296
def proxy_wasm_cpp_host_fetch_remote_crates(): """This function defines a collection of repos and should be called in a WORKSPACE file""" maybe( http_archive, name = "proxy_wasm_cpp_host__addr2line__0_16_0", url = "https://crates.io/api/v1/crates/addr2line/0.16.0/download", type = "tar.gz", sha256 = "3e61f2b7f93d2c7d2b08263acaa4a363b3e276806c68af6134c44f523bf1aacd", strip_prefix = "addr2line-0.16.0", build_file = Label("//bazel/cargo/remote:BUILD.addr2line-0.16.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__addr2line__0_17_0", url = "https://crates.io/api/v1/crates/addr2line/0.17.0/download", type = "tar.gz", sha256 = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b", strip_prefix = "addr2line-0.17.0", build_file = Label("//bazel/cargo/remote:BUILD.addr2line-0.17.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__adler__1_0_2", url = "https://crates.io/api/v1/crates/adler/1.0.2/download", type = "tar.gz", sha256 = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe", strip_prefix = "adler-1.0.2", build_file = Label("//bazel/cargo/remote:BUILD.adler-1.0.2.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__aho_corasick__0_7_18", url = "https://crates.io/api/v1/crates/aho-corasick/0.7.18/download", type = "tar.gz", sha256 = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f", strip_prefix = "aho-corasick-0.7.18", build_file = Label("//bazel/cargo/remote:BUILD.aho-corasick-0.7.18.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__ansi_term__0_11_0", url = "https://crates.io/api/v1/crates/ansi_term/0.11.0/download", type = "tar.gz", sha256 = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b", strip_prefix = "ansi_term-0.11.0", build_file = Label("//bazel/cargo/remote:BUILD.ansi_term-0.11.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__anyhow__1_0_45", url = "https://crates.io/api/v1/crates/anyhow/1.0.45/download", type = "tar.gz", sha256 = "ee10e43ae4a853c0a3591d4e2ada1719e553be18199d9da9d4a83f5927c2f5c7", strip_prefix = "anyhow-1.0.45", build_file = Label("//bazel/cargo/remote:BUILD.anyhow-1.0.45.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__atty__0_2_14", url = "https://crates.io/api/v1/crates/atty/0.2.14/download", type = "tar.gz", sha256 = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8", strip_prefix = "atty-0.2.14", build_file = Label("//bazel/cargo/remote:BUILD.atty-0.2.14.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__autocfg__1_0_1", url = "https://crates.io/api/v1/crates/autocfg/1.0.1/download", type = "tar.gz", sha256 = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a", strip_prefix = "autocfg-1.0.1", build_file = Label("//bazel/cargo/remote:BUILD.autocfg-1.0.1.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__backtrace__0_3_63", url = "https://crates.io/api/v1/crates/backtrace/0.3.63/download", type = "tar.gz", sha256 = "321629d8ba6513061f26707241fa9bc89524ff1cd7a915a97ef0c62c666ce1b6", strip_prefix = "backtrace-0.3.63", build_file = Label("//bazel/cargo/remote:BUILD.backtrace-0.3.63.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__bincode__1_3_3", url = "https://crates.io/api/v1/crates/bincode/1.3.3/download", type = "tar.gz", sha256 = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad", strip_prefix = "bincode-1.3.3", build_file = Label("//bazel/cargo/remote:BUILD.bincode-1.3.3.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__bitflags__1_3_2", url = "https://crates.io/api/v1/crates/bitflags/1.3.2/download", type = "tar.gz", sha256 = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a", strip_prefix = "bitflags-1.3.2", build_file = Label("//bazel/cargo/remote:BUILD.bitflags-1.3.2.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__byteorder__1_4_3", url = "https://crates.io/api/v1/crates/byteorder/1.4.3/download", type = "tar.gz", sha256 = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610", strip_prefix = "byteorder-1.4.3", build_file = Label("//bazel/cargo/remote:BUILD.byteorder-1.4.3.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__cc__1_0_72", url = "https://crates.io/api/v1/crates/cc/1.0.72/download", type = "tar.gz", sha256 = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee", strip_prefix = "cc-1.0.72", build_file = Label("//bazel/cargo/remote:BUILD.cc-1.0.72.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__cfg_if__1_0_0", url = "https://crates.io/api/v1/crates/cfg-if/1.0.0/download", type = "tar.gz", sha256 = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd", strip_prefix = "cfg-if-1.0.0", build_file = Label("//bazel/cargo/remote:BUILD.cfg-if-1.0.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__clap__2_33_3", url = "https://crates.io/api/v1/crates/clap/2.33.3/download", type = "tar.gz", sha256 = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002", strip_prefix = "clap-2.33.3", build_file = Label("//bazel/cargo/remote:BUILD.clap-2.33.3.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__cpp_demangle__0_3_3", url = "https://crates.io/api/v1/crates/cpp_demangle/0.3.3/download", type = "tar.gz", sha256 = "8ea47428dc9d2237f3c6bc134472edfd63ebba0af932e783506dcfd66f10d18a", strip_prefix = "cpp_demangle-0.3.3", build_file = Label("//bazel/cargo/remote:BUILD.cpp_demangle-0.3.3.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__cranelift_bforest__0_78_0", url = "https://crates.io/api/v1/crates/cranelift-bforest/0.78.0/download", type = "tar.gz", sha256 = "cc0cb7df82c8cf8f2e6a8dd394a0932a71369c160cc9b027dca414fced242513", strip_prefix = "cranelift-bforest-0.78.0", build_file = Label("//bazel/cargo/remote:BUILD.cranelift-bforest-0.78.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__cranelift_codegen__0_78_0", url = "https://crates.io/api/v1/crates/cranelift-codegen/0.78.0/download", type = "tar.gz", sha256 = "fe4463c15fa42eee909e61e5eac4866b7c6d22d0d8c621e57a0c5380753bfa8c", strip_prefix = "cranelift-codegen-0.78.0", build_file = Label("//bazel/cargo/remote:BUILD.cranelift-codegen-0.78.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__cranelift_codegen_meta__0_78_0", url = "https://crates.io/api/v1/crates/cranelift-codegen-meta/0.78.0/download", type = "tar.gz", sha256 = "793f6a94a053a55404ea16e1700202a88101672b8cd6b4df63e13cde950852bf", strip_prefix = "cranelift-codegen-meta-0.78.0", build_file = Label("//bazel/cargo/remote:BUILD.cranelift-codegen-meta-0.78.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__cranelift_codegen_shared__0_78_0", url = "https://crates.io/api/v1/crates/cranelift-codegen-shared/0.78.0/download", type = "tar.gz", sha256 = "44aa1846df275bce5eb30379d65964c7afc63c05a117076e62a119c25fe174be", strip_prefix = "cranelift-codegen-shared-0.78.0", build_file = Label("//bazel/cargo/remote:BUILD.cranelift-codegen-shared-0.78.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__cranelift_entity__0_78_0", url = "https://crates.io/api/v1/crates/cranelift-entity/0.78.0/download", type = "tar.gz", sha256 = "a3a45d8d6318bf8fc518154d9298eab2a8154ec068a8885ff113f6db8d69bb3a", strip_prefix = "cranelift-entity-0.78.0", build_file = Label("//bazel/cargo/remote:BUILD.cranelift-entity-0.78.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__cranelift_frontend__0_78_0", url = "https://crates.io/api/v1/crates/cranelift-frontend/0.78.0/download", type = "tar.gz", sha256 = "e07339bd461766deb7605169de039e01954768ff730fa1254e149001884a8525", strip_prefix = "cranelift-frontend-0.78.0", build_file = Label("//bazel/cargo/remote:BUILD.cranelift-frontend-0.78.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__cranelift_native__0_78_0", url = "https://crates.io/api/v1/crates/cranelift-native/0.78.0/download", type = "tar.gz", sha256 = "03e2fca76ff57e0532936a71e3fc267eae6a19a86656716479c66e7f912e3d7b", strip_prefix = "cranelift-native-0.78.0", build_file = Label("//bazel/cargo/remote:BUILD.cranelift-native-0.78.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__cranelift_wasm__0_78_0", url = "https://crates.io/api/v1/crates/cranelift-wasm/0.78.0/download", type = "tar.gz", sha256 = "1f46fec547a1f8a32c54ea61c28be4f4ad234ad95342b718a9a9adcaadb0c778", strip_prefix = "cranelift-wasm-0.78.0", build_file = Label("//bazel/cargo/remote:BUILD.cranelift-wasm-0.78.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__crc32fast__1_2_1", url = "https://crates.io/api/v1/crates/crc32fast/1.2.1/download", type = "tar.gz", sha256 = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a", strip_prefix = "crc32fast-1.2.1", build_file = Label("//bazel/cargo/remote:BUILD.crc32fast-1.2.1.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__ed25519_compact__0_1_11", url = "https://crates.io/api/v1/crates/ed25519-compact/0.1.11/download", type = "tar.gz", sha256 = "f1f45ef578ef75efffba301628066d951042f6e988f21f8b548928468ba5877b", strip_prefix = "ed25519-compact-0.1.11", build_file = Label("//bazel/cargo/remote:BUILD.ed25519-compact-0.1.11.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__either__1_6_1", url = "https://crates.io/api/v1/crates/either/1.6.1/download", type = "tar.gz", sha256 = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457", strip_prefix = "either-1.6.1", build_file = Label("//bazel/cargo/remote:BUILD.either-1.6.1.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__env_logger__0_8_4", url = "https://crates.io/api/v1/crates/env_logger/0.8.4/download", type = "tar.gz", sha256 = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3", strip_prefix = "env_logger-0.8.4", build_file = Label("//bazel/cargo/remote:BUILD.env_logger-0.8.4.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__errno__0_2_8", url = "https://crates.io/api/v1/crates/errno/0.2.8/download", type = "tar.gz", sha256 = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1", strip_prefix = "errno-0.2.8", build_file = Label("//bazel/cargo/remote:BUILD.errno-0.2.8.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__errno_dragonfly__0_1_2", url = "https://crates.io/api/v1/crates/errno-dragonfly/0.1.2/download", type = "tar.gz", sha256 = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf", strip_prefix = "errno-dragonfly-0.1.2", build_file = Label("//bazel/cargo/remote:BUILD.errno-dragonfly-0.1.2.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__fallible_iterator__0_2_0", url = "https://crates.io/api/v1/crates/fallible-iterator/0.2.0/download", type = "tar.gz", sha256 = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7", strip_prefix = "fallible-iterator-0.2.0", build_file = Label("//bazel/cargo/remote:BUILD.fallible-iterator-0.2.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__getrandom__0_2_3", url = "https://crates.io/api/v1/crates/getrandom/0.2.3/download", type = "tar.gz", sha256 = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753", strip_prefix = "getrandom-0.2.3", build_file = Label("//bazel/cargo/remote:BUILD.getrandom-0.2.3.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__gimli__0_25_0", url = "https://crates.io/api/v1/crates/gimli/0.25.0/download", type = "tar.gz", sha256 = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7", strip_prefix = "gimli-0.25.0", build_file = Label("//bazel/cargo/remote:BUILD.gimli-0.25.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__gimli__0_26_1", url = "https://crates.io/api/v1/crates/gimli/0.26.1/download", type = "tar.gz", sha256 = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4", strip_prefix = "gimli-0.26.1", build_file = Label("//bazel/cargo/remote:BUILD.gimli-0.26.1.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__hashbrown__0_11_2", url = "https://crates.io/api/v1/crates/hashbrown/0.11.2/download", type = "tar.gz", sha256 = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e", strip_prefix = "hashbrown-0.11.2", build_file = Label("//bazel/cargo/remote:BUILD.hashbrown-0.11.2.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__hermit_abi__0_1_19", url = "https://crates.io/api/v1/crates/hermit-abi/0.1.19/download", type = "tar.gz", sha256 = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33", strip_prefix = "hermit-abi-0.1.19", build_file = Label("//bazel/cargo/remote:BUILD.hermit-abi-0.1.19.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__hmac_sha512__0_1_9", url = "https://crates.io/api/v1/crates/hmac-sha512/0.1.9/download", type = "tar.gz", sha256 = "77e806677ce663d0a199541030c816847b36e8dc095f70dae4a4f4ad63da5383", strip_prefix = "hmac-sha512-0.1.9", build_file = Label("//bazel/cargo/remote:BUILD.hmac-sha512-0.1.9.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__humantime__2_1_0", url = "https://crates.io/api/v1/crates/humantime/2.1.0/download", type = "tar.gz", sha256 = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4", strip_prefix = "humantime-2.1.0", build_file = Label("//bazel/cargo/remote:BUILD.humantime-2.1.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__indexmap__1_7_0", url = "https://crates.io/api/v1/crates/indexmap/1.7.0/download", type = "tar.gz", sha256 = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5", strip_prefix = "indexmap-1.7.0", build_file = Label("//bazel/cargo/remote:BUILD.indexmap-1.7.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__io_lifetimes__0_3_3", url = "https://crates.io/api/v1/crates/io-lifetimes/0.3.3/download", type = "tar.gz", sha256 = "278e90d6f8a6c76a8334b336e306efa3c5f2b604048cbfd486d6f49878e3af14", strip_prefix = "io-lifetimes-0.3.3", build_file = Label("//bazel/cargo/remote:BUILD.io-lifetimes-0.3.3.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__itertools__0_10_1", url = "https://crates.io/api/v1/crates/itertools/0.10.1/download", type = "tar.gz", sha256 = "69ddb889f9d0d08a67338271fa9b62996bc788c7796a5c18cf057420aaed5eaf", strip_prefix = "itertools-0.10.1", build_file = Label("//bazel/cargo/remote:BUILD.itertools-0.10.1.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__itoa__0_4_8", url = "https://crates.io/api/v1/crates/itoa/0.4.8/download", type = "tar.gz", sha256 = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4", strip_prefix = "itoa-0.4.8", build_file = Label("//bazel/cargo/remote:BUILD.itoa-0.4.8.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__lazy_static__1_4_0", url = "https://crates.io/api/v1/crates/lazy_static/1.4.0/download", type = "tar.gz", sha256 = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646", strip_prefix = "lazy_static-1.4.0", build_file = Label("//bazel/cargo/remote:BUILD.lazy_static-1.4.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__libc__0_2_107", url = "https://crates.io/api/v1/crates/libc/0.2.107/download", type = "tar.gz", sha256 = "fbe5e23404da5b4f555ef85ebed98fb4083e55a00c317800bc2a50ede9f3d219", strip_prefix = "libc-0.2.107", build_file = Label("//bazel/cargo/remote:BUILD.libc-0.2.107.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__linux_raw_sys__0_0_28", url = "https://crates.io/api/v1/crates/linux-raw-sys/0.0.28/download", type = "tar.gz", sha256 = "687387ff42ec7ea4f2149035a5675fedb675d26f98db90a1846ac63d3addb5f5", strip_prefix = "linux-raw-sys-0.0.28", build_file = Label("//bazel/cargo/remote:BUILD.linux-raw-sys-0.0.28.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__log__0_4_14", url = "https://crates.io/api/v1/crates/log/0.4.14/download", type = "tar.gz", sha256 = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710", strip_prefix = "log-0.4.14", build_file = Label("//bazel/cargo/remote:BUILD.log-0.4.14.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__mach__0_3_2", url = "https://crates.io/api/v1/crates/mach/0.3.2/download", type = "tar.gz", sha256 = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa", strip_prefix = "mach-0.3.2", build_file = Label("//bazel/cargo/remote:BUILD.mach-0.3.2.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__memchr__2_4_1", url = "https://crates.io/api/v1/crates/memchr/2.4.1/download", type = "tar.gz", sha256 = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a", strip_prefix = "memchr-2.4.1", build_file = Label("//bazel/cargo/remote:BUILD.memchr-2.4.1.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__memoffset__0_6_4", url = "https://crates.io/api/v1/crates/memoffset/0.6.4/download", type = "tar.gz", sha256 = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9", strip_prefix = "memoffset-0.6.4", build_file = Label("//bazel/cargo/remote:BUILD.memoffset-0.6.4.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__miniz_oxide__0_4_4", url = "https://crates.io/api/v1/crates/miniz_oxide/0.4.4/download", type = "tar.gz", sha256 = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b", strip_prefix = "miniz_oxide-0.4.4", build_file = Label("//bazel/cargo/remote:BUILD.miniz_oxide-0.4.4.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__more_asserts__0_2_1", url = "https://crates.io/api/v1/crates/more-asserts/0.2.1/download", type = "tar.gz", sha256 = "0debeb9fcf88823ea64d64e4a815ab1643f33127d995978e099942ce38f25238", strip_prefix = "more-asserts-0.2.1", build_file = Label("//bazel/cargo/remote:BUILD.more-asserts-0.2.1.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__object__0_27_1", url = "https://crates.io/api/v1/crates/object/0.27.1/download", type = "tar.gz", sha256 = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9", strip_prefix = "object-0.27.1", build_file = Label("//bazel/cargo/remote:BUILD.object-0.27.1.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__once_cell__1_8_0", url = "https://crates.io/api/v1/crates/once_cell/1.8.0/download", type = "tar.gz", sha256 = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56", strip_prefix = "once_cell-1.8.0", build_file = Label("//bazel/cargo/remote:BUILD.once_cell-1.8.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__parity_wasm__0_42_2", url = "https://crates.io/api/v1/crates/parity-wasm/0.42.2/download", type = "tar.gz", sha256 = "be5e13c266502aadf83426d87d81a0f5d1ef45b8027f5a471c360abfe4bfae92", strip_prefix = "parity-wasm-0.42.2", build_file = Label("//bazel/cargo/remote:BUILD.parity-wasm-0.42.2.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__paste__1_0_6", url = "https://crates.io/api/v1/crates/paste/1.0.6/download", type = "tar.gz", sha256 = "0744126afe1a6dd7f394cb50a716dbe086cb06e255e53d8d0185d82828358fb5", strip_prefix = "paste-1.0.6", build_file = Label("//bazel/cargo/remote:BUILD.paste-1.0.6.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__ppv_lite86__0_2_15", url = "https://crates.io/api/v1/crates/ppv-lite86/0.2.15/download", type = "tar.gz", sha256 = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba", strip_prefix = "ppv-lite86-0.2.15", build_file = Label("//bazel/cargo/remote:BUILD.ppv-lite86-0.2.15.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__proc_macro2__1_0_32", url = "https://crates.io/api/v1/crates/proc-macro2/1.0.32/download", type = "tar.gz", sha256 = "ba508cc11742c0dc5c1659771673afbab7a0efab23aa17e854cbab0837ed0b43", strip_prefix = "proc-macro2-1.0.32", build_file = Label("//bazel/cargo/remote:BUILD.proc-macro2-1.0.32.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__psm__0_1_16", url = "https://crates.io/api/v1/crates/psm/0.1.16/download", type = "tar.gz", sha256 = "cd136ff4382c4753fc061cb9e4712ab2af263376b95bbd5bd8cd50c020b78e69", strip_prefix = "psm-0.1.16", build_file = Label("//bazel/cargo/remote:BUILD.psm-0.1.16.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__quote__1_0_10", url = "https://crates.io/api/v1/crates/quote/1.0.10/download", type = "tar.gz", sha256 = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05", strip_prefix = "quote-1.0.10", build_file = Label("//bazel/cargo/remote:BUILD.quote-1.0.10.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__rand__0_8_4", url = "https://crates.io/api/v1/crates/rand/0.8.4/download", type = "tar.gz", sha256 = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8", strip_prefix = "rand-0.8.4", build_file = Label("//bazel/cargo/remote:BUILD.rand-0.8.4.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__rand_chacha__0_3_1", url = "https://crates.io/api/v1/crates/rand_chacha/0.3.1/download", type = "tar.gz", sha256 = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88", strip_prefix = "rand_chacha-0.3.1", build_file = Label("//bazel/cargo/remote:BUILD.rand_chacha-0.3.1.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__rand_core__0_6_3", url = "https://crates.io/api/v1/crates/rand_core/0.6.3/download", type = "tar.gz", sha256 = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7", strip_prefix = "rand_core-0.6.3", build_file = Label("//bazel/cargo/remote:BUILD.rand_core-0.6.3.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__rand_hc__0_3_1", url = "https://crates.io/api/v1/crates/rand_hc/0.3.1/download", type = "tar.gz", sha256 = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7", strip_prefix = "rand_hc-0.3.1", build_file = Label("//bazel/cargo/remote:BUILD.rand_hc-0.3.1.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__regalloc__0_0_32", url = "https://crates.io/api/v1/crates/regalloc/0.0.32/download", type = "tar.gz", sha256 = "a6304468554ed921da3d32c355ea107b8d13d7b8996c3adfb7aab48d3bc321f4", strip_prefix = "regalloc-0.0.32", build_file = Label("//bazel/cargo/remote:BUILD.regalloc-0.0.32.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__regex__1_5_4", url = "https://crates.io/api/v1/crates/regex/1.5.4/download", type = "tar.gz", sha256 = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461", strip_prefix = "regex-1.5.4", build_file = Label("//bazel/cargo/remote:BUILD.regex-1.5.4.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__regex_syntax__0_6_25", url = "https://crates.io/api/v1/crates/regex-syntax/0.6.25/download", type = "tar.gz", sha256 = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b", strip_prefix = "regex-syntax-0.6.25", build_file = Label("//bazel/cargo/remote:BUILD.regex-syntax-0.6.25.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__region__2_2_0", url = "https://crates.io/api/v1/crates/region/2.2.0/download", type = "tar.gz", sha256 = "877e54ea2adcd70d80e9179344c97f93ef0dffd6b03e1f4529e6e83ab2fa9ae0", strip_prefix = "region-2.2.0", build_file = Label("//bazel/cargo/remote:BUILD.region-2.2.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__rsix__0_23_9", url = "https://crates.io/api/v1/crates/rsix/0.23.9/download", type = "tar.gz", sha256 = "1f64c5788d5aab8b75441499d99576a24eb09f76fb267b36fec7e3d970c66431", strip_prefix = "rsix-0.23.9", build_file = Label("//bazel/cargo/remote:BUILD.rsix-0.23.9.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__rustc_demangle__0_1_21", url = "https://crates.io/api/v1/crates/rustc-demangle/0.1.21/download", type = "tar.gz", sha256 = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342", strip_prefix = "rustc-demangle-0.1.21", build_file = Label("//bazel/cargo/remote:BUILD.rustc-demangle-0.1.21.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__rustc_hash__1_1_0", url = "https://crates.io/api/v1/crates/rustc-hash/1.1.0/download", type = "tar.gz", sha256 = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2", strip_prefix = "rustc-hash-1.1.0", build_file = Label("//bazel/cargo/remote:BUILD.rustc-hash-1.1.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__rustc_version__0_4_0", url = "https://crates.io/api/v1/crates/rustc_version/0.4.0/download", type = "tar.gz", sha256 = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366", strip_prefix = "rustc_version-0.4.0", build_file = Label("//bazel/cargo/remote:BUILD.rustc_version-0.4.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__semver__1_0_4", url = "https://crates.io/api/v1/crates/semver/1.0.4/download", type = "tar.gz", sha256 = "568a8e6258aa33c13358f81fd834adb854c6f7c9468520910a9b1e8fac068012", strip_prefix = "semver-1.0.4", build_file = Label("//bazel/cargo/remote:BUILD.semver-1.0.4.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__serde__1_0_130", url = "https://crates.io/api/v1/crates/serde/1.0.130/download", type = "tar.gz", sha256 = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913", strip_prefix = "serde-1.0.130", build_file = Label("//bazel/cargo/remote:BUILD.serde-1.0.130.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__serde_derive__1_0_130", url = "https://crates.io/api/v1/crates/serde_derive/1.0.130/download", type = "tar.gz", sha256 = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b", strip_prefix = "serde_derive-1.0.130", build_file = Label("//bazel/cargo/remote:BUILD.serde_derive-1.0.130.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__smallvec__1_7_0", url = "https://crates.io/api/v1/crates/smallvec/1.7.0/download", type = "tar.gz", sha256 = "1ecab6c735a6bb4139c0caafd0cc3635748bbb3acf4550e8138122099251f309", strip_prefix = "smallvec-1.7.0", build_file = Label("//bazel/cargo/remote:BUILD.smallvec-1.7.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__stable_deref_trait__1_2_0", url = "https://crates.io/api/v1/crates/stable_deref_trait/1.2.0/download", type = "tar.gz", sha256 = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3", strip_prefix = "stable_deref_trait-1.2.0", build_file = Label("//bazel/cargo/remote:BUILD.stable_deref_trait-1.2.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__strsim__0_8_0", url = "https://crates.io/api/v1/crates/strsim/0.8.0/download", type = "tar.gz", sha256 = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a", strip_prefix = "strsim-0.8.0", build_file = Label("//bazel/cargo/remote:BUILD.strsim-0.8.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__syn__1_0_81", url = "https://crates.io/api/v1/crates/syn/1.0.81/download", type = "tar.gz", sha256 = "f2afee18b8beb5a596ecb4a2dce128c719b4ba399d34126b9e4396e3f9860966", strip_prefix = "syn-1.0.81", build_file = Label("//bazel/cargo/remote:BUILD.syn-1.0.81.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__target_lexicon__0_12_2", url = "https://crates.io/api/v1/crates/target-lexicon/0.12.2/download", type = "tar.gz", sha256 = "d9bffcddbc2458fa3e6058414599e3c838a022abae82e5c67b4f7f80298d5bff", strip_prefix = "target-lexicon-0.12.2", build_file = Label("//bazel/cargo/remote:BUILD.target-lexicon-0.12.2.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__termcolor__1_1_2", url = "https://crates.io/api/v1/crates/termcolor/1.1.2/download", type = "tar.gz", sha256 = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4", strip_prefix = "termcolor-1.1.2", build_file = Label("//bazel/cargo/remote:BUILD.termcolor-1.1.2.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__textwrap__0_11_0", url = "https://crates.io/api/v1/crates/textwrap/0.11.0/download", type = "tar.gz", sha256 = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060", strip_prefix = "textwrap-0.11.0", build_file = Label("//bazel/cargo/remote:BUILD.textwrap-0.11.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__thiserror__1_0_30", url = "https://crates.io/api/v1/crates/thiserror/1.0.30/download", type = "tar.gz", sha256 = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417", strip_prefix = "thiserror-1.0.30", build_file = Label("//bazel/cargo/remote:BUILD.thiserror-1.0.30.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__thiserror_impl__1_0_30", url = "https://crates.io/api/v1/crates/thiserror-impl/1.0.30/download", type = "tar.gz", sha256 = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b", strip_prefix = "thiserror-impl-1.0.30", build_file = Label("//bazel/cargo/remote:BUILD.thiserror-impl-1.0.30.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__unicode_width__0_1_9", url = "https://crates.io/api/v1/crates/unicode-width/0.1.9/download", type = "tar.gz", sha256 = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973", strip_prefix = "unicode-width-0.1.9", build_file = Label("//bazel/cargo/remote:BUILD.unicode-width-0.1.9.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__unicode_xid__0_2_2", url = "https://crates.io/api/v1/crates/unicode-xid/0.2.2/download", type = "tar.gz", sha256 = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3", strip_prefix = "unicode-xid-0.2.2", build_file = Label("//bazel/cargo/remote:BUILD.unicode-xid-0.2.2.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__vec_map__0_8_2", url = "https://crates.io/api/v1/crates/vec_map/0.8.2/download", type = "tar.gz", sha256 = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191", strip_prefix = "vec_map-0.8.2", build_file = Label("//bazel/cargo/remote:BUILD.vec_map-0.8.2.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__wasi__0_10_2_wasi_snapshot_preview1", url = "https://crates.io/api/v1/crates/wasi/0.10.2+wasi-snapshot-preview1/download", type = "tar.gz", sha256 = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6", strip_prefix = "wasi-0.10.2+wasi-snapshot-preview1", build_file = Label("//bazel/cargo/remote:BUILD.wasi-0.10.2+wasi-snapshot-preview1.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__wasmparser__0_81_0", url = "https://crates.io/api/v1/crates/wasmparser/0.81.0/download", type = "tar.gz", sha256 = "98930446519f63d00a836efdc22f67766ceae8dbcc1571379f2bcabc6b2b9abc", strip_prefix = "wasmparser-0.81.0", build_file = Label("//bazel/cargo/remote:BUILD.wasmparser-0.81.0.bazel"), ) maybe( new_git_repository, name = "proxy_wasm_cpp_host__wasmsign__0_1_2", remote = "https://github.com/jedisct1/wasmsign", commit = "fa4d5598f778390df09be94232972b5b865a56b8", build_file = Label("//bazel/cargo/remote:BUILD.wasmsign-0.1.2.bazel"), init_submodules = True, ) maybe( http_archive, name = "proxy_wasm_cpp_host__wasmtime__0_31_0", url = "https://crates.io/api/v1/crates/wasmtime/0.31.0/download", type = "tar.gz", sha256 = "311d06b0c49346d1fbf48a17052e844036b95a7753c1afb34e8c0af3f6b5bb13", strip_prefix = "wasmtime-0.31.0", build_file = Label("//bazel/cargo/remote:BUILD.wasmtime-0.31.0.bazel"), ) maybe( new_git_repository, name = "proxy_wasm_cpp_host__wasmtime_c_api_macros__0_19_0", remote = "https://github.com/bytecodealliance/wasmtime", commit = "c1a6a0523dbc59d176f708ea3d04e6edb48480ec", build_file = Label("//bazel/cargo/remote:BUILD.wasmtime-c-api-macros-0.19.0.bazel"), init_submodules = True, ) maybe( http_archive, name = "proxy_wasm_cpp_host__wasmtime_cranelift__0_31_0", url = "https://crates.io/api/v1/crates/wasmtime-cranelift/0.31.0/download", type = "tar.gz", sha256 = "ab3083a47e1ede38aac06a1d9831640d673f9aeda0b82a64e4ce002f3432e2e7", strip_prefix = "wasmtime-cranelift-0.31.0", build_file = Label("//bazel/cargo/remote:BUILD.wasmtime-cranelift-0.31.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__wasmtime_environ__0_31_0", url = "https://crates.io/api/v1/crates/wasmtime-environ/0.31.0/download", type = "tar.gz", sha256 = "1c2d194b655321053bc4111a1aa4ead552655c8a17d17264bc97766e70073510", strip_prefix = "wasmtime-environ-0.31.0", build_file = Label("//bazel/cargo/remote:BUILD.wasmtime-environ-0.31.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__wasmtime_jit__0_31_0", url = "https://crates.io/api/v1/crates/wasmtime-jit/0.31.0/download", type = "tar.gz", sha256 = "864ac8dfe4ce310ac59f16fdbd560c257389cb009ee5d030ac6e30523b023d11", strip_prefix = "wasmtime-jit-0.31.0", build_file = Label("//bazel/cargo/remote:BUILD.wasmtime-jit-0.31.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__wasmtime_runtime__0_31_0", url = "https://crates.io/api/v1/crates/wasmtime-runtime/0.31.0/download", type = "tar.gz", sha256 = "ab97da813a26b98c9abfd3b0c2d99e42f6b78b749c0646344e2e262d212d8c8b", strip_prefix = "wasmtime-runtime-0.31.0", build_file = Label("//bazel/cargo/remote:BUILD.wasmtime-runtime-0.31.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__wasmtime_types__0_31_0", url = "https://crates.io/api/v1/crates/wasmtime-types/0.31.0/download", type = "tar.gz", sha256 = "ff94409cc3557bfbbcce6b14520ccd6bd3727e965c0fe68d63ef2c185bf379c6", strip_prefix = "wasmtime-types-0.31.0", build_file = Label("//bazel/cargo/remote:BUILD.wasmtime-types-0.31.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__winapi__0_3_9", url = "https://crates.io/api/v1/crates/winapi/0.3.9/download", type = "tar.gz", sha256 = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419", strip_prefix = "winapi-0.3.9", build_file = Label("//bazel/cargo/remote:BUILD.winapi-0.3.9.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__winapi_i686_pc_windows_gnu__0_4_0", url = "https://crates.io/api/v1/crates/winapi-i686-pc-windows-gnu/0.4.0/download", type = "tar.gz", sha256 = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6", strip_prefix = "winapi-i686-pc-windows-gnu-0.4.0", build_file = Label("//bazel/cargo/remote:BUILD.winapi-i686-pc-windows-gnu-0.4.0.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__winapi_util__0_1_5", url = "https://crates.io/api/v1/crates/winapi-util/0.1.5/download", type = "tar.gz", sha256 = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178", strip_prefix = "winapi-util-0.1.5", build_file = Label("//bazel/cargo/remote:BUILD.winapi-util-0.1.5.bazel"), ) maybe( http_archive, name = "proxy_wasm_cpp_host__winapi_x86_64_pc_windows_gnu__0_4_0", url = "https://crates.io/api/v1/crates/winapi-x86_64-pc-windows-gnu/0.4.0/download", type = "tar.gz", sha256 = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f", strip_prefix = "winapi-x86_64-pc-windows-gnu-0.4.0", build_file = Label("//bazel/cargo/remote:BUILD.winapi-x86_64-pc-windows-gnu-0.4.0.bazel"), )
1,297
def single_test(model, testdata, max_seq_len=20): """Get accuracy for a single model and dataloader. Args: model (nn.Module): MCTN2 Model testdata (torch.utils.data.DataLoader): Test Dataloader max_seq_len (int, optional): Maximum sequence length. Defaults to 20. Returns: _type_: _description_ """ model.eval() print('Start Testing ---------->>') pred = [] true = [] with torch.no_grad(): for i, inputs in enumerate(testdata): # process input src, _, _, labels, _ = _process_input_L2(inputs, max_seq_len) # We only need the source text as input! No need for target! _, _, _, head_out = model(src) pred.append(head_out) true.append(labels) eval_results_include = eval_mosei_senti_return( torch.cat(pred, 0), torch.cat(true, 0), exclude_zero=False) eval_results_exclude = eval_mosei_senti_return( torch.cat(pred, 0), torch.cat(true, 0), exclude_zero=True) mae = eval_results_include[0] Acc1 = eval_results_include[-1] Acc2 = eval_results_exclude[-1] print('Test: MAE: {}, Acc1: {}, Acc2: {}'.format(mae, Acc1, Acc2)) return {'Acc:': Acc2}
1,298
def generate_tiled_html_result(slide_nums, tile_summaries_dict, data_link): """ Generate HTML to view the tiled images. Args: slide_nums: List of slide numbers. tile_summaries_dict: Dictionary of TileSummary objects keyed by slide number. data_link: If True, add link to tile data csv file. """ slide_nums = sorted(slide_nums) if not slide.TILE_SUMMARY_PAGINATE: html = "" html += filter.html_header("Tiles") html += " <table>\n" for slide_num in slide_nums: html += image_row(slide_num, data_link) html += " </table>\n" html += filter.html_footer() text_file = open(os.path.join(slide.TILE_SUMMARY_HTML_DIR, "tiles.html"), "w") text_file.write(html) text_file.close() else: total_len = len(slide_nums) page_size = slide.TILE_SUMMARY_PAGINATION_SIZE num_pages = math.ceil(total_len / page_size) for page_num in range(1, num_pages + 1): start_index = (page_num - 1) * page_size end_index = (page_num * page_size) if (page_num < num_pages) else total_len page_slide_nums = slide_nums[start_index:end_index] html = "" html += filter.html_header("Tiles, Page %d" % page_num) html += " <div style=\"font-size: 20px\">" if page_num > 1: if page_num == 2: html += "<a href=\"tiles.html\">&lt;</a> " else: html += "<a href=\"tiles-%d.html\">&lt;</a> " % (page_num - 1) html += "Page %d" % page_num if page_num < num_pages: html += " <a href=\"tiles-%d.html\">&gt;</a> " % (page_num + 1) html += "</div>\n" html += " <table>\n" for slide_num in page_slide_nums: tile_summary = tile_summaries_dict[slide_num] html += image_row(slide_num, tile_summary, data_link) html += " </table>\n" html += filter.html_footer() if page_num == 1: text_file = open(os.path.join(slide.TILE_SUMMARY_HTML_DIR, "tiles.html"), "w") else: text_file = open(os.path.join(slide.TILE_SUMMARY_HTML_DIR, "tiles-%d.html" % page_num), "w") text_file.write(html) text_file.close()
1,299