body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def _write_metrics(metrics_mngr, tb_mngr, metrics, round_num): 'Atomic metrics writer which inlines logic from MetricsHook class.' if (not isinstance(metrics, dict)): raise TypeError('metrics should be type `dict`.') if (not isinstance(round_num, int)): raise TypeError('round_num should be type `int`.') logging.info('Metrics at round {:d}:\n{!s}'.format(round_num, pprint.pformat(metrics))) metrics_mngr.save_metrics(metrics, round_num) tb_mngr.save_metrics(metrics, round_num)
1,945,417,938,138,571,300
Atomic metrics writer which inlines logic from MetricsHook class.
utils/training_loop.py
_write_metrics
houcharlie/federated
python
def _write_metrics(metrics_mngr, tb_mngr, metrics, round_num): if (not isinstance(metrics, dict)): raise TypeError('metrics should be type `dict`.') if (not isinstance(round_num, int)): raise TypeError('round_num should be type `int`.') logging.info('Metrics at round {:d}:\n{!s}'.format(round_num, pprint.pformat(metrics))) metrics_mngr.save_metrics(metrics, round_num) tb_mngr.save_metrics(metrics, round_num)
def _check_iterative_process_compatibility(iterative_process): 'Checks the compatibility of an iterative process with the training loop.' error_message = 'The iterative_process argument must be of type`tff.templates.IterativeProcess`, and must have an attribute `get_model_weights`, which must be a `tff.Computation`. This computation must accept as input the state of `iterative_process`, and its output must be a nested structure of tensors matching the input shape of `validation_fn`.' compatibility_error = IterativeProcessCompatibilityError(error_message) if (not isinstance(iterative_process, tff.templates.IterativeProcess)): raise compatibility_error if (not hasattr(iterative_process, 'get_model_weights')): raise compatibility_error elif (not callable(iterative_process.get_model_weights)): raise compatibility_error get_model_weights_fn = iterative_process.get_model_weights if (not isinstance(get_model_weights_fn, tff.Computation)): raise compatibility_error input_type = get_model_weights_fn.type_signature.parameter server_state_type = iterative_process.state_type.member server_state_type.is_assignable_from(input_type)
-6,501,721,324,460,617,000
Checks the compatibility of an iterative process with the training loop.
utils/training_loop.py
_check_iterative_process_compatibility
houcharlie/federated
python
def _check_iterative_process_compatibility(iterative_process): error_message = 'The iterative_process argument must be of type`tff.templates.IterativeProcess`, and must have an attribute `get_model_weights`, which must be a `tff.Computation`. This computation must accept as input the state of `iterative_process`, and its output must be a nested structure of tensors matching the input shape of `validation_fn`.' compatibility_error = IterativeProcessCompatibilityError(error_message) if (not isinstance(iterative_process, tff.templates.IterativeProcess)): raise compatibility_error if (not hasattr(iterative_process, 'get_model_weights')): raise compatibility_error elif (not callable(iterative_process.get_model_weights)): raise compatibility_error get_model_weights_fn = iterative_process.get_model_weights if (not isinstance(get_model_weights_fn, tff.Computation)): raise compatibility_error input_type = get_model_weights_fn.type_signature.parameter server_state_type = iterative_process.state_type.member server_state_type.is_assignable_from(input_type)
def run(iterative_process: tff.templates.IterativeProcess, client_datasets_fn: Callable[([int], List[tf.data.Dataset])], validation_fn: Callable[([Any, int], Dict[(str, float)])], total_rounds: int, experiment_name: str, test_fn: Optional[Callable[([Any], Dict[(str, float)])]]=None, root_output_dir: Optional[str]='/tmp/fed_opt', rounds_per_eval: Optional[int]=1, rounds_per_checkpoint: Optional[int]=50, rounds_per_profile: Optional[int]=0): 'Runs federated training for a given `tff.templates.IterativeProcess`.\n\n We assume that the iterative process has the following functional type\n signatures:\n\n * `initialize`: `( -> S@SERVER)` where `S` represents the server state.\n * `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`\n represents the server state, `{B*}` represents the client datasets,\n and `T` represents a python `Mapping` object.\n\n The iterative process must also have a callable attribute `get_model_weights`\n that takes as input the state of the iterative process, and returns a\n `tff.learning.ModelWeights` object.\n\n Args:\n iterative_process: A `tff.templates.IterativeProcess` instance to run.\n client_datasets_fn: Function accepting an integer argument (the round\n number) and returning a list of client datasets to use as federated data\n for that round.\n validation_fn: A callable accepting a `tff.learning.ModelWeights` and the\n current round number, and returning a dict of evaluation metrics. Used to\n compute validation metrics throughout the training process.\n total_rounds: The number of federated training rounds to perform.\n experiment_name: The name of the experiment being run. This will be appended\n to the `root_output_dir` for purposes of writing outputs.\n test_fn: An optional callable accepting a `tff.learning.ModelWeights` and\n returning a dict of test set metrics. Used to compute test metrics at the\n end of the training process.\n root_output_dir: The name of the root output directory for writing\n experiment outputs.\n rounds_per_eval: How often to compute validation metrics.\n rounds_per_checkpoint: How often to checkpoint the iterative process state.\n If you expect the job to restart frequently, this should be small. If no\n interruptions are expected, this can be made larger.\n rounds_per_profile: Experimental setting. If set to a value greater than 0,\n this dictates how often a TensorFlow profiler is run.\n\n Returns:\n The final `state` of the iterative process after training.\n ' _check_iterative_process_compatibility(iterative_process) if (not callable(client_datasets_fn)): raise TypeError('client_datasets_fn should be callable.') if (not callable(validation_fn)): raise TypeError('validation_fn should be callable.') if ((test_fn is not None) and (not callable(test_fn))): raise TypeError('test_fn should be callable.') logging.info('Starting iterative_process training loop...') initial_state = iterative_process.initialize() (checkpoint_mngr, metrics_mngr, tb_mngr, profiler) = _setup_outputs(root_output_dir, experiment_name, rounds_per_profile) logging.info('Asking checkpoint manager to load checkpoint.') (state, round_num) = checkpoint_mngr.load_latest_checkpoint(initial_state) if (state is None): logging.info('Initializing experiment from scratch.') state = initial_state round_num = 0 else: logging.info('Restarted from checkpoint round %d', round_num) round_num += 1 metrics_mngr.clear_metrics(round_num) current_model = iterative_process.get_model_weights(state) loop_start_time = time.time() loop_start_round = round_num while (round_num < total_rounds): data_prep_start_time = time.time() federated_train_data = client_datasets_fn(round_num) train_metrics = {'prepare_datasets_secs': (time.time() - data_prep_start_time)} training_start_time = time.time() prev_model = current_model try: with profiler(round_num): (state, round_metrics) = iterative_process.next(state, federated_train_data) except (tf.errors.FailedPreconditionError, tf.errors.NotFoundError, tf.errors.InternalError) as e: logging.warning('Caught %s exception while running round %d:\n\t%s', type(e), round_num, e) continue current_model = iterative_process.get_model_weights(state) train_metrics['training_secs'] = (time.time() - training_start_time) train_metrics['model_delta_l2_norm'] = _compute_numpy_l2_difference(current_model, prev_model) train_metrics['client_drift'] = state.client_drift train_metrics.update(round_metrics) loop_time = (time.time() - loop_start_time) loop_rounds = ((round_num - loop_start_round) + 1) logging.info('Round {:2d}, {:.2f}s per round in average.'.format(round_num, (loop_time / loop_rounds))) if (((round_num % rounds_per_checkpoint) == 0) or (round_num == (total_rounds - 1))): save_checkpoint_start_time = time.time() checkpoint_mngr.save_checkpoint(state, round_num) train_metrics['save_checkpoint_secs'] = (time.time() - save_checkpoint_start_time) metrics = {'train': train_metrics} if ((round_num % rounds_per_eval) == 0): evaluate_start_time = time.time() validation_metrics = validation_fn(current_model, round_num) validation_metrics['evaluate_secs'] = (time.time() - evaluate_start_time) metrics['eval'] = validation_metrics _write_metrics(metrics_mngr, tb_mngr, metrics, round_num) round_num += 1 metrics = {} evaluate_start_time = time.time() validation_metrics = validation_fn(current_model, round_num) validation_metrics['evaluate_secs'] = (time.time() - evaluate_start_time) metrics['eval'] = validation_metrics if test_fn: test_start_time = time.time() test_metrics = test_fn(current_model) test_metrics['evaluate_secs'] = (time.time() - test_start_time) metrics['test'] = test_metrics _write_metrics(metrics_mngr, tb_mngr, metrics, total_rounds) return state
-629,683,971,590,818,000
Runs federated training for a given `tff.templates.IterativeProcess`. We assume that the iterative process has the following functional type signatures: * `initialize`: `( -> S@SERVER)` where `S` represents the server state. * `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S` represents the server state, `{B*}` represents the client datasets, and `T` represents a python `Mapping` object. The iterative process must also have a callable attribute `get_model_weights` that takes as input the state of the iterative process, and returns a `tff.learning.ModelWeights` object. Args: iterative_process: A `tff.templates.IterativeProcess` instance to run. client_datasets_fn: Function accepting an integer argument (the round number) and returning a list of client datasets to use as federated data for that round. validation_fn: A callable accepting a `tff.learning.ModelWeights` and the current round number, and returning a dict of evaluation metrics. Used to compute validation metrics throughout the training process. total_rounds: The number of federated training rounds to perform. experiment_name: The name of the experiment being run. This will be appended to the `root_output_dir` for purposes of writing outputs. test_fn: An optional callable accepting a `tff.learning.ModelWeights` and returning a dict of test set metrics. Used to compute test metrics at the end of the training process. root_output_dir: The name of the root output directory for writing experiment outputs. rounds_per_eval: How often to compute validation metrics. rounds_per_checkpoint: How often to checkpoint the iterative process state. If you expect the job to restart frequently, this should be small. If no interruptions are expected, this can be made larger. rounds_per_profile: Experimental setting. If set to a value greater than 0, this dictates how often a TensorFlow profiler is run. Returns: The final `state` of the iterative process after training.
utils/training_loop.py
run
houcharlie/federated
python
def run(iterative_process: tff.templates.IterativeProcess, client_datasets_fn: Callable[([int], List[tf.data.Dataset])], validation_fn: Callable[([Any, int], Dict[(str, float)])], total_rounds: int, experiment_name: str, test_fn: Optional[Callable[([Any], Dict[(str, float)])]]=None, root_output_dir: Optional[str]='/tmp/fed_opt', rounds_per_eval: Optional[int]=1, rounds_per_checkpoint: Optional[int]=50, rounds_per_profile: Optional[int]=0): 'Runs federated training for a given `tff.templates.IterativeProcess`.\n\n We assume that the iterative process has the following functional type\n signatures:\n\n * `initialize`: `( -> S@SERVER)` where `S` represents the server state.\n * `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`\n represents the server state, `{B*}` represents the client datasets,\n and `T` represents a python `Mapping` object.\n\n The iterative process must also have a callable attribute `get_model_weights`\n that takes as input the state of the iterative process, and returns a\n `tff.learning.ModelWeights` object.\n\n Args:\n iterative_process: A `tff.templates.IterativeProcess` instance to run.\n client_datasets_fn: Function accepting an integer argument (the round\n number) and returning a list of client datasets to use as federated data\n for that round.\n validation_fn: A callable accepting a `tff.learning.ModelWeights` and the\n current round number, and returning a dict of evaluation metrics. Used to\n compute validation metrics throughout the training process.\n total_rounds: The number of federated training rounds to perform.\n experiment_name: The name of the experiment being run. This will be appended\n to the `root_output_dir` for purposes of writing outputs.\n test_fn: An optional callable accepting a `tff.learning.ModelWeights` and\n returning a dict of test set metrics. Used to compute test metrics at the\n end of the training process.\n root_output_dir: The name of the root output directory for writing\n experiment outputs.\n rounds_per_eval: How often to compute validation metrics.\n rounds_per_checkpoint: How often to checkpoint the iterative process state.\n If you expect the job to restart frequently, this should be small. If no\n interruptions are expected, this can be made larger.\n rounds_per_profile: Experimental setting. If set to a value greater than 0,\n this dictates how often a TensorFlow profiler is run.\n\n Returns:\n The final `state` of the iterative process after training.\n ' _check_iterative_process_compatibility(iterative_process) if (not callable(client_datasets_fn)): raise TypeError('client_datasets_fn should be callable.') if (not callable(validation_fn)): raise TypeError('validation_fn should be callable.') if ((test_fn is not None) and (not callable(test_fn))): raise TypeError('test_fn should be callable.') logging.info('Starting iterative_process training loop...') initial_state = iterative_process.initialize() (checkpoint_mngr, metrics_mngr, tb_mngr, profiler) = _setup_outputs(root_output_dir, experiment_name, rounds_per_profile) logging.info('Asking checkpoint manager to load checkpoint.') (state, round_num) = checkpoint_mngr.load_latest_checkpoint(initial_state) if (state is None): logging.info('Initializing experiment from scratch.') state = initial_state round_num = 0 else: logging.info('Restarted from checkpoint round %d', round_num) round_num += 1 metrics_mngr.clear_metrics(round_num) current_model = iterative_process.get_model_weights(state) loop_start_time = time.time() loop_start_round = round_num while (round_num < total_rounds): data_prep_start_time = time.time() federated_train_data = client_datasets_fn(round_num) train_metrics = {'prepare_datasets_secs': (time.time() - data_prep_start_time)} training_start_time = time.time() prev_model = current_model try: with profiler(round_num): (state, round_metrics) = iterative_process.next(state, federated_train_data) except (tf.errors.FailedPreconditionError, tf.errors.NotFoundError, tf.errors.InternalError) as e: logging.warning('Caught %s exception while running round %d:\n\t%s', type(e), round_num, e) continue current_model = iterative_process.get_model_weights(state) train_metrics['training_secs'] = (time.time() - training_start_time) train_metrics['model_delta_l2_norm'] = _compute_numpy_l2_difference(current_model, prev_model) train_metrics['client_drift'] = state.client_drift train_metrics.update(round_metrics) loop_time = (time.time() - loop_start_time) loop_rounds = ((round_num - loop_start_round) + 1) logging.info('Round {:2d}, {:.2f}s per round in average.'.format(round_num, (loop_time / loop_rounds))) if (((round_num % rounds_per_checkpoint) == 0) or (round_num == (total_rounds - 1))): save_checkpoint_start_time = time.time() checkpoint_mngr.save_checkpoint(state, round_num) train_metrics['save_checkpoint_secs'] = (time.time() - save_checkpoint_start_time) metrics = {'train': train_metrics} if ((round_num % rounds_per_eval) == 0): evaluate_start_time = time.time() validation_metrics = validation_fn(current_model, round_num) validation_metrics['evaluate_secs'] = (time.time() - evaluate_start_time) metrics['eval'] = validation_metrics _write_metrics(metrics_mngr, tb_mngr, metrics, round_num) round_num += 1 metrics = {} evaluate_start_time = time.time() validation_metrics = validation_fn(current_model, round_num) validation_metrics['evaluate_secs'] = (time.time() - evaluate_start_time) metrics['eval'] = validation_metrics if test_fn: test_start_time = time.time() test_metrics = test_fn(current_model) test_metrics['evaluate_secs'] = (time.time() - test_start_time) metrics['test'] = test_metrics _write_metrics(metrics_mngr, tb_mngr, metrics, total_rounds) return state
def setup_inp(inp): 'Convert list of strings into list of lists, with glves/goblins replaced by tuples' grid = [] for (rowI, row) in enumerate(inp.split('\n')): grid.append([x for x in row]) for (colI, col) in enumerate(row): if (col in ['G', 'E']): char_tup = (col, 200, False) grid[rowI][colI] = char_tup return grid
-1,784,923,850,771,478,300
Convert list of strings into list of lists, with glves/goblins replaced by tuples
2018/15/helpme.py
setup_inp
mark-inderhees/aoc
python
def setup_inp(inp): grid = [] for (rowI, row) in enumerate(inp.split('\n')): grid.append([x for x in row]) for (colI, col) in enumerate(row): if (col in ['G', 'E']): char_tup = (col, 200, False) grid[rowI][colI] = char_tup return grid
def move_character(inp, from_row, from_col, to_row, to_col, char): 'Move character on grid, and increment the i value so we can tell we already moved it' inp[from_row][from_col] = '.' inp[to_row][to_col] = (char[0], char[1], True) return inp
-2,405,267,525,196,605,400
Move character on grid, and increment the i value so we can tell we already moved it
2018/15/helpme.py
move_character
mark-inderhees/aoc
python
def move_character(inp, from_row, from_col, to_row, to_col, char): inp[from_row][from_col] = '.' inp[to_row][to_col] = (char[0], char[1], True) return inp
def attack(inp, row, col, enemy, damage=3): '\n Attack weakest adjacent enemy, if one is there\n If multiple weakest enemies, attack in reading order\n Return the modified board, and a boolean indicating whether anyone died\n ' if (not adjacent_enemy(inp, row, col, enemy)): return (inp, False) enemies = {} for coords in [((row - 1), col), ((row + 1), col), (row, (col - 1)), (row, (col + 1))]: if (inp[coords[0]][coords[1]][0] == enemy): enemies[coords] = inp[coords[0]][coords[1]][1] min_hp = min(enemies.values()) enemies = [x for x in enemies if (enemies[x] == min_hp)] enemies.sort() coords = enemies[0] enemy = inp[coords[0]][coords[1]] enemy_pts = (enemy[1] - damage) enemy_tup = (enemy[0], enemy_pts, enemy[2]) if (enemy_pts <= 0): inp[coords[0]][coords[1]] = '.' return (inp, True) else: inp[coords[0]][coords[1]] = enemy_tup return (inp, False)
-7,804,793,766,275,739,000
Attack weakest adjacent enemy, if one is there If multiple weakest enemies, attack in reading order Return the modified board, and a boolean indicating whether anyone died
2018/15/helpme.py
attack
mark-inderhees/aoc
python
def attack(inp, row, col, enemy, damage=3): '\n Attack weakest adjacent enemy, if one is there\n If multiple weakest enemies, attack in reading order\n Return the modified board, and a boolean indicating whether anyone died\n ' if (not adjacent_enemy(inp, row, col, enemy)): return (inp, False) enemies = {} for coords in [((row - 1), col), ((row + 1), col), (row, (col - 1)), (row, (col + 1))]: if (inp[coords[0]][coords[1]][0] == enemy): enemies[coords] = inp[coords[0]][coords[1]][1] min_hp = min(enemies.values()) enemies = [x for x in enemies if (enemies[x] == min_hp)] enemies.sort() coords = enemies[0] enemy = inp[coords[0]][coords[1]] enemy_pts = (enemy[1] - damage) enemy_tup = (enemy[0], enemy_pts, enemy[2]) if (enemy_pts <= 0): inp[coords[0]][coords[1]] = '.' return (inp, True) else: inp[coords[0]][coords[1]] = enemy_tup return (inp, False)
def adjacent_enemy(inp, rowI, colI, enemy): 'Check for enemy in adjacent square' if any(((x[0] == enemy) for x in [inp[(rowI + 1)][colI], inp[(rowI - 1)][colI], inp[rowI][(colI + 1)], inp[rowI][(colI - 1)]])): return True return False
3,321,948,015,826,023,400
Check for enemy in adjacent square
2018/15/helpme.py
adjacent_enemy
mark-inderhees/aoc
python
def adjacent_enemy(inp, rowI, colI, enemy): if any(((x[0] == enemy) for x in [inp[(rowI + 1)][colI], inp[(rowI - 1)][colI], inp[rowI][(colI + 1)], inp[rowI][(colI - 1)]])): return True return False
def get_best_move(best_moves): '\n Takes a list of tuples of\n (first_move, number_of_moves, tile_coordinates), which might look like -\n ((12, 22), 8, (17, 25))\n ((12, 22), 8, (18, 24))\n ((12, 22), 8, (19, 21))\n ((13, 21), 6, (19, 21))\n ((13, 23), 6, (17, 25))\n ((13, 23), 6, (18, 24))\n ((14, 22), 6, (17, 25))\n ((14, 22), 6, (18, 24))\n ((14, 22), 6, (19, 21))\n And filters/sorts them to satisfy all the conditions\n ' if (not best_moves): return None min_steps = min([x[1] for x in best_moves]) best_moves = [x for x in best_moves if (x[1] == min_steps)] best_moves.sort(key=(lambda x: x[2])) best_moves = [x for x in best_moves if (x[2] == best_moves[0][2])] best_moves.sort(key=(lambda x: x[0])) best_moves = [x for x in best_moves if (x[0] == best_moves[0][0])] return best_moves[0][0]
-3,099,320,645,593,120,300
Takes a list of tuples of (first_move, number_of_moves, tile_coordinates), which might look like - ((12, 22), 8, (17, 25)) ((12, 22), 8, (18, 24)) ((12, 22), 8, (19, 21)) ((13, 21), 6, (19, 21)) ((13, 23), 6, (17, 25)) ((13, 23), 6, (18, 24)) ((14, 22), 6, (17, 25)) ((14, 22), 6, (18, 24)) ((14, 22), 6, (19, 21)) And filters/sorts them to satisfy all the conditions
2018/15/helpme.py
get_best_move
mark-inderhees/aoc
python
def get_best_move(best_moves): '\n Takes a list of tuples of\n (first_move, number_of_moves, tile_coordinates), which might look like -\n ((12, 22), 8, (17, 25))\n ((12, 22), 8, (18, 24))\n ((12, 22), 8, (19, 21))\n ((13, 21), 6, (19, 21))\n ((13, 23), 6, (17, 25))\n ((13, 23), 6, (18, 24))\n ((14, 22), 6, (17, 25))\n ((14, 22), 6, (18, 24))\n ((14, 22), 6, (19, 21))\n And filters/sorts them to satisfy all the conditions\n ' if (not best_moves): return None min_steps = min([x[1] for x in best_moves]) best_moves = [x for x in best_moves if (x[1] == min_steps)] best_moves.sort(key=(lambda x: x[2])) best_moves = [x for x in best_moves if (x[2] == best_moves[0][2])] best_moves.sort(key=(lambda x: x[0])) best_moves = [x for x in best_moves if (x[0] == best_moves[0][0])] return best_moves[0][0]
def bfs_move(inp, rowI, colI, hero, enemy): '\n Perform a breadth first search for each adjacent tile\n Although not the most efficient, the approach is still fast and makes it\n easy to sort in such a way that satisfies all the conditions\n ' if adjacent_enemy(inp, rowI, colI, enemy): return None first_moves = [((rowI + 1), colI), ((rowI - 1), colI), (rowI, (colI - 1)), (rowI, (colI + 1))] first_moves = [x for x in first_moves if (inp[x[0]][x[1]] == '.')] best_moves = [] for move in first_moves: (r, c) = move if adjacent_enemy(inp, r, c, enemy): best_moves.append((move, 1, move)) continue seen_coordinates = {(rowI, colI), (r, c)} stack = [((r + 1), c), ((r - 1), c), (r, (c - 1)), (r, (c + 1))] stack = [x for x in stack if ((inp[x[0]][x[1]] == '.') and ((x[0], x[1]) not in seen_coordinates))] i = 1 run = True while run: i += 1 new_stack = [] for tile in stack: if (tile in seen_coordinates): continue seen_coordinates.add(tile) (r, c) = tile if adjacent_enemy(inp, r, c, enemy): best_moves.append((move, i, (r, c))) run = False continue new_tiles = [((r + 1), c), ((r - 1), c), (r, (c - 1)), (r, (c + 1))] new_stack += [x for x in new_tiles if ((inp[x[0]][x[1]] == '.') and ((x[0], x[1]) not in seen_coordinates))] stack = list(set(new_stack)) if (not stack): run = False return get_best_move(best_moves)
7,900,044,209,021,287,000
Perform a breadth first search for each adjacent tile Although not the most efficient, the approach is still fast and makes it easy to sort in such a way that satisfies all the conditions
2018/15/helpme.py
bfs_move
mark-inderhees/aoc
python
def bfs_move(inp, rowI, colI, hero, enemy): '\n Perform a breadth first search for each adjacent tile\n Although not the most efficient, the approach is still fast and makes it\n easy to sort in such a way that satisfies all the conditions\n ' if adjacent_enemy(inp, rowI, colI, enemy): return None first_moves = [((rowI + 1), colI), ((rowI - 1), colI), (rowI, (colI - 1)), (rowI, (colI + 1))] first_moves = [x for x in first_moves if (inp[x[0]][x[1]] == '.')] best_moves = [] for move in first_moves: (r, c) = move if adjacent_enemy(inp, r, c, enemy): best_moves.append((move, 1, move)) continue seen_coordinates = {(rowI, colI), (r, c)} stack = [((r + 1), c), ((r - 1), c), (r, (c - 1)), (r, (c + 1))] stack = [x for x in stack if ((inp[x[0]][x[1]] == '.') and ((x[0], x[1]) not in seen_coordinates))] i = 1 run = True while run: i += 1 new_stack = [] for tile in stack: if (tile in seen_coordinates): continue seen_coordinates.add(tile) (r, c) = tile if adjacent_enemy(inp, r, c, enemy): best_moves.append((move, i, (r, c))) run = False continue new_tiles = [((r + 1), c), ((r - 1), c), (r, (c - 1)), (r, (c + 1))] new_stack += [x for x in new_tiles if ((inp[x[0]][x[1]] == '.') and ((x[0], x[1]) not in seen_coordinates))] stack = list(set(new_stack)) if (not stack): run = False return get_best_move(best_moves)
def reset_moved_bools(inp): "Reset the third value in our character tuples, which tracks whether they've moved in a round" for (rowI, row) in enumerate(inp): for (colI, col) in enumerate(row): if (col[0] in ['G', 'E']): char_tup = (col[0], col[1], False) inp[rowI][colI] = char_tup return inp
-8,201,011,777,282,888,000
Reset the third value in our character tuples, which tracks whether they've moved in a round
2018/15/helpme.py
reset_moved_bools
mark-inderhees/aoc
python
def reset_moved_bools(inp): for (rowI, row) in enumerate(inp): for (colI, col) in enumerate(row): if (col[0] in ['G', 'E']): char_tup = (col[0], col[1], False) inp[rowI][colI] = char_tup return inp
def equal_devision(length, div_num): '\n # 概要\n length を div_num で分割する。\n 端数が出た場合は誤差拡散法を使って上手い具合に分散させる。\n ' base = (length / div_num) ret_array = [base for x in range(div_num)] diff = 0 for idx in range(div_num): diff += math.modf(ret_array[idx])[0] if (diff >= 1.0): diff -= 1.0 ret_array[idx] = int((math.floor(ret_array[idx]) + 1)) else: ret_array[idx] = int(math.floor(ret_array[idx])) diff = (length - sum(ret_array)) if (diff != 0): ret_array[(- 1)] += diff if (length != sum(ret_array)): raise ValueError('the output of equal_division() is abnormal.') return ret_array
-5,392,802,471,796,530,000
# 概要 length を div_num で分割する。 端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
ty_lib/test_pattern_generator2.py
equal_devision
colour-science/sample_code
python
def equal_devision(length, div_num): '\n # 概要\n length を div_num で分割する。\n 端数が出た場合は誤差拡散法を使って上手い具合に分散させる。\n ' base = (length / div_num) ret_array = [base for x in range(div_num)] diff = 0 for idx in range(div_num): diff += math.modf(ret_array[idx])[0] if (diff >= 1.0): diff -= 1.0 ret_array[idx] = int((math.floor(ret_array[idx]) + 1)) else: ret_array[idx] = int(math.floor(ret_array[idx])) diff = (length - sum(ret_array)) if (diff != 0): ret_array[(- 1)] += diff if (length != sum(ret_array)): raise ValueError('the output of equal_division() is abnormal.') return ret_array
def do_matrix(img, mtx): '\n img に対して mtx を適用する。\n ' base_shape = img.shape (r, g, b) = (img[(..., 0)], img[(..., 1)], img[(..., 2)]) ro = (((r * mtx[0][0]) + (g * mtx[0][1])) + (b * mtx[0][2])) go = (((r * mtx[1][0]) + (g * mtx[1][1])) + (b * mtx[1][2])) bo = (((r * mtx[2][0]) + (g * mtx[2][1])) + (b * mtx[2][2])) out_img = np.dstack((ro, go, bo)).reshape(base_shape) return out_img
-4,858,280,892,068,223,000
img に対して mtx を適用する。
ty_lib/test_pattern_generator2.py
do_matrix
colour-science/sample_code
python
def do_matrix(img, mtx): '\n \n ' base_shape = img.shape (r, g, b) = (img[(..., 0)], img[(..., 1)], img[(..., 2)]) ro = (((r * mtx[0][0]) + (g * mtx[0][1])) + (b * mtx[0][2])) go = (((r * mtx[1][0]) + (g * mtx[1][1])) + (b * mtx[1][2])) bo = (((r * mtx[2][0]) + (g * mtx[2][1])) + (b * mtx[2][2])) out_img = np.dstack((ro, go, bo)).reshape(base_shape) return out_img
def _get_cmfs_xy(): '\n xy色度図のプロットのための馬蹄形の外枠のxy値を求める。\n\n Returns\n -------\n array_like\n xy coordinate for chromaticity diagram\n\n ' cmf = CMFS.get(CMFS_NAME) d65_white = D65_WHITE cmf_xy = XYZ_to_xy(cmf.values, d65_white) return cmf_xy
1,856,355,623,035,113,500
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。 Returns ------- array_like xy coordinate for chromaticity diagram
ty_lib/test_pattern_generator2.py
_get_cmfs_xy
colour-science/sample_code
python
def _get_cmfs_xy(): '\n xy色度図のプロットのための馬蹄形の外枠のxy値を求める。\n\n Returns\n -------\n array_like\n xy coordinate for chromaticity diagram\n\n ' cmf = CMFS.get(CMFS_NAME) d65_white = D65_WHITE cmf_xy = XYZ_to_xy(cmf.values, d65_white) return cmf_xy
def get_primaries(name='ITU-R BT.2020'): '\n prmary color の座標を求める\n\n\n Parameters\n ----------\n name : str\n a name of the color space.\n\n Returns\n -------\n array_like\n prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]\n\n ' primaries = RGB_COLOURSPACES[name].primaries primaries = np.append(primaries, [primaries[0, :]], axis=0) rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) return (primaries, rgb)
-1,104,957,472,951,224,800
prmary color の座標を求める Parameters ---------- name : str a name of the color space. Returns ------- array_like prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
ty_lib/test_pattern_generator2.py
get_primaries
colour-science/sample_code
python
def get_primaries(name='ITU-R BT.2020'): '\n prmary color の座標を求める\n\n\n Parameters\n ----------\n name : str\n a name of the color space.\n\n Returns\n -------\n array_like\n prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]\n\n ' primaries = RGB_COLOURSPACES[name].primaries primaries = np.append(primaries, [primaries[0, :]], axis=0) rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) return (primaries, rgb)
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None): "\n xy値からRGB値を算出する。\n いい感じに正規化もしておく。\n\n Parameters\n ----------\n xy : array_like\n xy value.\n name : string\n color space name.\n normalize : string\n normalize method. You can select 'maximum', 'specific' or None.\n\n Returns\n -------\n array_like\n rgb value. the value is normalized.\n " illuminant_XYZ = D65_WHITE illuminant_RGB = D65_WHITE chromatic_adaptation_transform = 'CAT02' large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name) if (normalize == 'specific'): xyY = xy_to_xyY(xy) xyY[(..., 2)] = specific large_xyz = xyY_to_XYZ(xyY) else: large_xyz = xy_to_XYZ(xy) rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB, large_xyz_to_rgb_matrix, chromatic_adaptation_transform) '\n そのままだとビデオレベルが低かったりするので、\n 各ドット毎にRGB値を正規化&最大化する。必要であれば。\n ' if (normalize == 'maximum'): rgb = normalise_maximum(rgb, axis=(- 1)) else: if (np.sum((rgb > 1.0)) > 0): print('warning: over flow has occured at xy_to_rgb') if (np.sum((rgb < 0.0)) > 0): print('warning: under flow has occured at xy_to_rgb') rgb[(rgb < 0)] = 0 rgb[(rgb > 1.0)] = 1.0 return rgb
-2,746,982,639,432,358,000
xy値からRGB値を算出する。 いい感じに正規化もしておく。 Parameters ---------- xy : array_like xy value. name : string color space name. normalize : string normalize method. You can select 'maximum', 'specific' or None. Returns ------- array_like rgb value. the value is normalized.
ty_lib/test_pattern_generator2.py
xy_to_rgb
colour-science/sample_code
python
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None): "\n xy値からRGB値を算出する。\n いい感じに正規化もしておく。\n\n Parameters\n ----------\n xy : array_like\n xy value.\n name : string\n color space name.\n normalize : string\n normalize method. You can select 'maximum', 'specific' or None.\n\n Returns\n -------\n array_like\n rgb value. the value is normalized.\n " illuminant_XYZ = D65_WHITE illuminant_RGB = D65_WHITE chromatic_adaptation_transform = 'CAT02' large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name) if (normalize == 'specific'): xyY = xy_to_xyY(xy) xyY[(..., 2)] = specific large_xyz = xyY_to_XYZ(xyY) else: large_xyz = xy_to_XYZ(xy) rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB, large_xyz_to_rgb_matrix, chromatic_adaptation_transform) '\n そのままだとビデオレベルが低かったりするので、\n 各ドット毎にRGB値を正規化&最大化する。必要であれば。\n ' if (normalize == 'maximum'): rgb = normalise_maximum(rgb, axis=(- 1)) else: if (np.sum((rgb > 1.0)) > 0): print('warning: over flow has occured at xy_to_rgb') if (np.sum((rgb < 0.0)) > 0): print('warning: under flow has occured at xy_to_rgb') rgb[(rgb < 0)] = 0 rgb[(rgb > 1.0)] = 1.0 return rgb
def get_white_point(name): '\n white point を求める。CIE1931ベース。\n ' if (name != 'DCI-P3'): illuminant = RGB_COLOURSPACES[name].illuminant white_point = ILLUMINANTS[CMFS_NAME][illuminant] else: white_point = ILLUMINANTS[CMFS_NAME]['D65'] return white_point
8,828,608,032,943,556,000
white point を求める。CIE1931ベース。
ty_lib/test_pattern_generator2.py
get_white_point
colour-science/sample_code
python
def get_white_point(name): '\n \n ' if (name != 'DCI-P3'): illuminant = RGB_COLOURSPACES[name].illuminant white_point = ILLUMINANTS[CMFS_NAME][illuminant] else: white_point = ILLUMINANTS[CMFS_NAME]['D65'] return white_point
def get_secondaries(name='ITU-R BT.2020'): '\n secondary color の座標を求める\n\n Parameters\n ----------\n name : str\n a name of the color space.\n\n Returns\n -------\n array_like\n secondaries. the order is magenta, yellow, cyan.\n\n ' secondary_rgb = np.array([[1.0, 0.0, 1.0], [1.0, 1.0, 0.0], [0.0, 1.0, 1.0]]) illuminant_XYZ = D65_WHITE illuminant_RGB = D65_WHITE chromatic_adaptation_transform = 'CAT02' rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name) large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB, illuminant_XYZ, rgb_to_xyz_matrix, chromatic_adaptation_transform) xy = XYZ_to_xy(large_xyz, illuminant_XYZ) return (xy, secondary_rgb.reshape((3, 3)))
5,985,841,218,541,587,000
secondary color の座標を求める Parameters ---------- name : str a name of the color space. Returns ------- array_like secondaries. the order is magenta, yellow, cyan.
ty_lib/test_pattern_generator2.py
get_secondaries
colour-science/sample_code
python
def get_secondaries(name='ITU-R BT.2020'): '\n secondary color の座標を求める\n\n Parameters\n ----------\n name : str\n a name of the color space.\n\n Returns\n -------\n array_like\n secondaries. the order is magenta, yellow, cyan.\n\n ' secondary_rgb = np.array([[1.0, 0.0, 1.0], [1.0, 1.0, 0.0], [0.0, 1.0, 1.0]]) illuminant_XYZ = D65_WHITE illuminant_RGB = D65_WHITE chromatic_adaptation_transform = 'CAT02' rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name) large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB, illuminant_XYZ, rgb_to_xyz_matrix, chromatic_adaptation_transform) xy = XYZ_to_xy(large_xyz, illuminant_XYZ) return (xy, secondary_rgb.reshape((3, 3)))
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9, xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0): '\n xy色度図の馬蹄形の画像を生成する\n\n Returns\n -------\n ndarray\n rgb image.\n ' '\n 色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。\n 若干色が薄くなるのが難点。暇があれば改良したい。\n ' color_space = models.ACES_CG_COLOURSPACE cmf_xy = _get_cmfs_xy() "\n 馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。\n ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。\n\n なお、作成したドロネー図は以下のコードでプロット可能。\n 1点補足しておくと、```plt.triplot``` の第三引数は、\n 第一、第二引数から三角形を作成するための **インデックス** のリスト\n になっている。[[0, 1, 2], [2, 4, 3], ...]的な。\n\n ```python\n plt.figure()\n plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')\n plt.title('triplot of Delaunay triangulation')\n plt.show()\n ```\n " triangulation = Delaunay(cmf_xy) '\n ```triangulation.find_simplex()``` で xy がどのインデックスの領域か\n 調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、\n 0以下のリストで領域判定の mask を作ることができる。\n ' (xx, yy) = np.meshgrid(np.linspace(xmin, xmax, samples), np.linspace(ymax, ymin, samples)) xy = np.dstack((xx, yy)) mask = (triangulation.find_simplex(xy) < 0).astype(np.float) if antialiasing: kernel = np.array([[0, 1, 0], [1, 2, 1], [0, 1, 0]]).astype(np.float) kernel /= np.sum(kernel) mask = convolve(mask, kernel) mask = (1 - mask[:, :, np.newaxis]) illuminant_XYZ = D65_WHITE illuminant_RGB = color_space.whitepoint chromatic_adaptation_transform = 'XYZ Scaling' large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix xy[(xy == 0.0)] = 1.0 large_xyz = xy_to_XYZ(xy) rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB, large_xyz_to_rgb_matrix, chromatic_adaptation_transform) '\n そのままだとビデオレベルが低かったりするので、\n 各ドット毎にRGB値を正規化&最大化する。\n ' rgb[(rgb == 0)] = 1.0 rgb = normalise_maximum(rgb, axis=(- 1)) mask_rgb = np.dstack((mask, mask, mask)) rgb *= mask_rgb bg_rgb = np.ones_like(rgb) bg_rgb *= ((1 - mask_rgb) * bg_color) rgb += bg_rgb rgb = (rgb ** (1 / 2.2)) return rgb
9,152,031,279,301,552,000
xy色度図の馬蹄形の画像を生成する Returns ------- ndarray rgb image.
ty_lib/test_pattern_generator2.py
get_chromaticity_image
colour-science/sample_code
python
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9, xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0): '\n xy色度図の馬蹄形の画像を生成する\n\n Returns\n -------\n ndarray\n rgb image.\n ' '\n 色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。\n 若干色が薄くなるのが難点。暇があれば改良したい。\n ' color_space = models.ACES_CG_COLOURSPACE cmf_xy = _get_cmfs_xy() "\n 馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。\n ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。\n\n なお、作成したドロネー図は以下のコードでプロット可能。\n 1点補足しておくと、```plt.triplot``` の第三引数は、\n 第一、第二引数から三角形を作成するための **インデックス** のリスト\n になっている。[[0, 1, 2], [2, 4, 3], ...]的な。\n\n ```python\n plt.figure()\n plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')\n plt.title('triplot of Delaunay triangulation')\n plt.show()\n ```\n " triangulation = Delaunay(cmf_xy) '\n ```triangulation.find_simplex()``` で xy がどのインデックスの領域か\n 調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、\n 0以下のリストで領域判定の mask を作ることができる。\n ' (xx, yy) = np.meshgrid(np.linspace(xmin, xmax, samples), np.linspace(ymax, ymin, samples)) xy = np.dstack((xx, yy)) mask = (triangulation.find_simplex(xy) < 0).astype(np.float) if antialiasing: kernel = np.array([[0, 1, 0], [1, 2, 1], [0, 1, 0]]).astype(np.float) kernel /= np.sum(kernel) mask = convolve(mask, kernel) mask = (1 - mask[:, :, np.newaxis]) illuminant_XYZ = D65_WHITE illuminant_RGB = color_space.whitepoint chromatic_adaptation_transform = 'XYZ Scaling' large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix xy[(xy == 0.0)] = 1.0 large_xyz = xy_to_XYZ(xy) rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB, large_xyz_to_rgb_matrix, chromatic_adaptation_transform) '\n そのままだとビデオレベルが低かったりするので、\n 各ドット毎にRGB値を正規化&最大化する。\n ' rgb[(rgb == 0)] = 1.0 rgb = normalise_maximum(rgb, axis=(- 1)) mask_rgb = np.dstack((mask, mask, mask)) rgb *= mask_rgb bg_rgb = np.ones_like(rgb) bg_rgb *= ((1 - mask_rgb) * bg_color) rgb += bg_rgb rgb = (rgb ** (1 / 2.2)) return rgb
def get_csf_color_image(width=640, height=480, lv1=np.uint16(((np.array([1.0, 1.0, 1.0]) * 1023) * 64)), lv2=np.uint16(((np.array([1.0, 1.0, 1.0]) * 512) * 64)), stripe_num=18): '\n 長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。\n 入力信号レベルは16bitに限定する。\n\n Parameters\n ----------\n width : numeric.\n width of the pattern image.\n height : numeric.\n height of the pattern image.\n lv1 : numeric\n video level 1. this value must be 10bit.\n lv2 : numeric\n video level 2. this value must be 10bit.\n stripe_num : numeric\n number of the stripe.\n\n Returns\n -------\n array_like\n a cms pattern image.\n ' width_list = equal_devision(width, stripe_num) height_list = equal_devision(height, stripe_num) h_pos_list = equal_devision((width // 2), stripe_num) v_pos_list = equal_devision((height // 2), stripe_num) lv1_16bit = lv1 lv2_16bit = lv2 img = np.zeros((height, width, 3), dtype=np.uint16) width_temp = width height_temp = height h_pos_temp = 0 v_pos_temp = 0 for idx in range(stripe_num): lv = (lv1_16bit if ((idx % 2) == 0) else lv2_16bit) temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16) temp_img[:, :] = lv ed_pos_h = (h_pos_temp + width_temp) ed_pos_v = (v_pos_temp + height_temp) img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img width_temp -= width_list[((stripe_num - 1) - idx)] height_temp -= height_list[((stripe_num - 1) - idx)] h_pos_temp += h_pos_list[idx] v_pos_temp += v_pos_list[idx] return img
-7,187,334,406,667,908,000
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。 入力信号レベルは16bitに限定する。 Parameters ---------- width : numeric. width of the pattern image. height : numeric. height of the pattern image. lv1 : numeric video level 1. this value must be 10bit. lv2 : numeric video level 2. this value must be 10bit. stripe_num : numeric number of the stripe. Returns ------- array_like a cms pattern image.
ty_lib/test_pattern_generator2.py
get_csf_color_image
colour-science/sample_code
python
def get_csf_color_image(width=640, height=480, lv1=np.uint16(((np.array([1.0, 1.0, 1.0]) * 1023) * 64)), lv2=np.uint16(((np.array([1.0, 1.0, 1.0]) * 512) * 64)), stripe_num=18): '\n 長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。\n 入力信号レベルは16bitに限定する。\n\n Parameters\n ----------\n width : numeric.\n width of the pattern image.\n height : numeric.\n height of the pattern image.\n lv1 : numeric\n video level 1. this value must be 10bit.\n lv2 : numeric\n video level 2. this value must be 10bit.\n stripe_num : numeric\n number of the stripe.\n\n Returns\n -------\n array_like\n a cms pattern image.\n ' width_list = equal_devision(width, stripe_num) height_list = equal_devision(height, stripe_num) h_pos_list = equal_devision((width // 2), stripe_num) v_pos_list = equal_devision((height // 2), stripe_num) lv1_16bit = lv1 lv2_16bit = lv2 img = np.zeros((height, width, 3), dtype=np.uint16) width_temp = width height_temp = height h_pos_temp = 0 v_pos_temp = 0 for idx in range(stripe_num): lv = (lv1_16bit if ((idx % 2) == 0) else lv2_16bit) temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16) temp_img[:, :] = lv ed_pos_h = (h_pos_temp + width_temp) ed_pos_v = (v_pos_temp + height_temp) img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img width_temp -= width_list[((stripe_num - 1) - idx)] height_temp -= height_list[((stripe_num - 1) - idx)] h_pos_temp += h_pos_list[idx] v_pos_temp += v_pos_list[idx] return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024, antialiasing=True): '\n SONY の HDR説明資料にあるような xyY の図を作る。\n\n Parameters\n ----------\n name : str\n name of the target color space.\n\n Returns\n -------\n None\n\n ' (primary_xy, _) = get_primaries(name=name) triangulation = Delaunay(primary_xy) (xx, yy) = np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples)) xy = np.dstack((xx, yy)) mask = (triangulation.find_simplex(xy) < 0).astype(np.float) if antialiasing: kernel = np.array([[0, 1, 0], [1, 2, 1], [0, 1, 0]]).astype(np.float) kernel /= np.sum(kernel) mask = convolve(mask, kernel) mask = (1 - mask[:, :, np.newaxis]) illuminant_XYZ = D65_WHITE illuminant_RGB = RGB_COLOURSPACES[name].whitepoint chromatic_adaptation_transform = 'CAT02' large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name) rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name) large_xyz = xy_to_XYZ(xy) rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB, large_xyz_to_rgb_matrix, chromatic_adaptation_transform) '\n そのままだとビデオレベルが低かったりするので、\n 各ドット毎にRGB値を正規化&最大化する。\n ' rgb_org = normalise_maximum(rgb, axis=(- 1)) mask_rgb = np.dstack((mask, mask, mask)) rgb = (rgb_org * mask_rgb) rgba = np.dstack((rgb, mask)) large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ, rgb_to_large_xyz_matrix, chromatic_adaptation_transform) large_y = (large_xyz2[(..., 1)] * 1000) large_y[(large_y < 1)] = 1.0 fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.plot_surface(xy[(..., 0)], xy[(..., 1)], np.log10(large_y), rcount=64, ccount=64, facecolors=rgb_org) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('Y') ax.set_zticks([0, 1, 2, 3]) ax.set_zticklabels([1, 10, 100, 1000]) cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0) alpha = np.zeros_like(cie1931_rgb[(..., 0)]) rgb_sum = np.sum(cie1931_rgb, axis=(- 1)) alpha[(rgb_sum > 1e-05)] = 1 cie1931_rgb = np.dstack((cie1931_rgb[(..., 0)], cie1931_rgb[(..., 1)], cie1931_rgb[(..., 2)], alpha)) zz = np.zeros_like(xy[(..., 0)]) ax.plot_surface(xy[(..., 0)], xy[(..., 1)], zz, facecolors=cie1931_rgb) plt.show()
2,697,745,789,533,632,000
SONY の HDR説明資料にあるような xyY の図を作る。 Parameters ---------- name : str name of the target color space. Returns ------- None
ty_lib/test_pattern_generator2.py
plot_xyY_color_space
colour-science/sample_code
python
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024, antialiasing=True): '\n SONY の HDR説明資料にあるような xyY の図を作る。\n\n Parameters\n ----------\n name : str\n name of the target color space.\n\n Returns\n -------\n None\n\n ' (primary_xy, _) = get_primaries(name=name) triangulation = Delaunay(primary_xy) (xx, yy) = np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples)) xy = np.dstack((xx, yy)) mask = (triangulation.find_simplex(xy) < 0).astype(np.float) if antialiasing: kernel = np.array([[0, 1, 0], [1, 2, 1], [0, 1, 0]]).astype(np.float) kernel /= np.sum(kernel) mask = convolve(mask, kernel) mask = (1 - mask[:, :, np.newaxis]) illuminant_XYZ = D65_WHITE illuminant_RGB = RGB_COLOURSPACES[name].whitepoint chromatic_adaptation_transform = 'CAT02' large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name) rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name) large_xyz = xy_to_XYZ(xy) rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB, large_xyz_to_rgb_matrix, chromatic_adaptation_transform) '\n そのままだとビデオレベルが低かったりするので、\n 各ドット毎にRGB値を正規化&最大化する。\n ' rgb_org = normalise_maximum(rgb, axis=(- 1)) mask_rgb = np.dstack((mask, mask, mask)) rgb = (rgb_org * mask_rgb) rgba = np.dstack((rgb, mask)) large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ, rgb_to_large_xyz_matrix, chromatic_adaptation_transform) large_y = (large_xyz2[(..., 1)] * 1000) large_y[(large_y < 1)] = 1.0 fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.plot_surface(xy[(..., 0)], xy[(..., 1)], np.log10(large_y), rcount=64, ccount=64, facecolors=rgb_org) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('Y') ax.set_zticks([0, 1, 2, 3]) ax.set_zticklabels([1, 10, 100, 1000]) cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0) alpha = np.zeros_like(cie1931_rgb[(..., 0)]) rgb_sum = np.sum(cie1931_rgb, axis=(- 1)) alpha[(rgb_sum > 1e-05)] = 1 cie1931_rgb = np.dstack((cie1931_rgb[(..., 0)], cie1931_rgb[(..., 1)], cie1931_rgb[(..., 2)], alpha)) zz = np.zeros_like(xy[(..., 0)]) ax.plot_surface(xy[(..., 0)], xy[(..., 1)], zz, facecolors=cie1931_rgb) plt.show()
def get_3d_grid_cube_format(grid_num=4): '\n # 概要\n (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...\n みたいな配列を返す。\n CUBE形式の3DLUTを作成する時に便利。\n ' base = np.linspace(0, 1, grid_num) ones_x = np.ones((grid_num, grid_num, 1)) ones_y = np.ones((grid_num, 1, grid_num)) ones_z = np.ones((1, grid_num, grid_num)) r_3d = (base[np.newaxis, np.newaxis, :] * ones_x) g_3d = (base[np.newaxis, :, np.newaxis] * ones_y) b_3d = (base[:, np.newaxis, np.newaxis] * ones_z) r_3d = r_3d.flatten() g_3d = g_3d.flatten() b_3d = b_3d.flatten() return np.dstack((r_3d, g_3d, b_3d))
2,705,412,234,827,614,700
# 概要 (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ... みたいな配列を返す。 CUBE形式の3DLUTを作成する時に便利。
ty_lib/test_pattern_generator2.py
get_3d_grid_cube_format
colour-science/sample_code
python
def get_3d_grid_cube_format(grid_num=4): '\n # 概要\n (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...\n みたいな配列を返す。\n CUBE形式の3DLUTを作成する時に便利。\n ' base = np.linspace(0, 1, grid_num) ones_x = np.ones((grid_num, grid_num, 1)) ones_y = np.ones((grid_num, 1, grid_num)) ones_z = np.ones((1, grid_num, grid_num)) r_3d = (base[np.newaxis, np.newaxis, :] * ones_x) g_3d = (base[np.newaxis, :, np.newaxis] * ones_y) b_3d = (base[:, np.newaxis, np.newaxis] * ones_z) r_3d = r_3d.flatten() g_3d = g_3d.flatten() b_3d = b_3d.flatten() return np.dstack((r_3d, g_3d, b_3d))
def gen_step_gradation(width=1024, height=128, step_num=17, bit_depth=10, color=(1.0, 1.0, 1.0), direction='h', debug=False): "\n # 概要\n 階段状に変化するグラデーションパターンを作る。\n なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。\n\n # 注意事項\n 正確に1階調ずつ変化するグラデーションを作る場合は\n ```step_num = (2 ** bit_depth) + 1```\n となるようにパラメータを指定すること。具体例は以下のExample参照。\n\n # Example\n ```\n grad_8 = gen_step_gradation(width=grad_width, height=grad_height,\n step_num=257, bit_depth=8,\n color=(1.0, 1.0, 1.0), direction='h')\n\n grad_10 = gen_step_gradation(width=grad_width, height=grad_height,\n step_num=1025, bit_depth=10,\n color=(1.0, 1.0, 1.0), direction='h')\n ```\n " max = (2 ** bit_depth) if (direction == 'h'): pass else: temp = height height = width width = temp if ((max + 1) != step_num): '\n 1階調ずつの増加では無いパターン。\n 末尾のデータが 256 や 1024 になるため -1 する。\n ' val_list = np.linspace(0, max, step_num) val_list[(- 1)] -= 1 else: '\n 正確に1階調ずつ変化するパターン。\n 末尾のデータが 256 や 1024 になるため除外する。\n ' val_list = np.linspace(0, max, step_num)[0:(- 1)] step_num -= 1 diff = (val_list[1:] - val_list[0:(- 1)]) if (diff == 1).all(): pass else: raise ValueError('calculated value is invalid.') step_length_list = equal_devision(width, step_num) step_bar_list = [] for (step_idx, length) in enumerate(step_length_list): step = [((np.ones(length) * color[c_idx]) * val_list[step_idx]) for c_idx in range(3)] if (direction == 'h'): step = np.dstack(step) step_bar_list.append(step) step_bar = np.hstack(step_bar_list) else: step = np.dstack(step).reshape((length, 1, 3)) step_bar_list.append(step) step_bar = np.vstack(step_bar_list) if (direction == 'h'): img = (step_bar * np.ones((height, 1, 3))) else: img = (step_bar * np.ones((1, height, 3))) if debug: preview_image(img, 'rgb') return img
-6,042,160,212,514,663,000
# 概要 階段状に変化するグラデーションパターンを作る。 なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。 # 注意事項 正確に1階調ずつ変化するグラデーションを作る場合は ```step_num = (2 ** bit_depth) + 1``` となるようにパラメータを指定すること。具体例は以下のExample参照。 # Example ``` grad_8 = gen_step_gradation(width=grad_width, height=grad_height, step_num=257, bit_depth=8, color=(1.0, 1.0, 1.0), direction='h') grad_10 = gen_step_gradation(width=grad_width, height=grad_height, step_num=1025, bit_depth=10, color=(1.0, 1.0, 1.0), direction='h') ```
ty_lib/test_pattern_generator2.py
gen_step_gradation
colour-science/sample_code
python
def gen_step_gradation(width=1024, height=128, step_num=17, bit_depth=10, color=(1.0, 1.0, 1.0), direction='h', debug=False): "\n # 概要\n 階段状に変化するグラデーションパターンを作る。\n なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。\n\n # 注意事項\n 正確に1階調ずつ変化するグラデーションを作る場合は\n ```step_num = (2 ** bit_depth) + 1```\n となるようにパラメータを指定すること。具体例は以下のExample参照。\n\n # Example\n ```\n grad_8 = gen_step_gradation(width=grad_width, height=grad_height,\n step_num=257, bit_depth=8,\n color=(1.0, 1.0, 1.0), direction='h')\n\n grad_10 = gen_step_gradation(width=grad_width, height=grad_height,\n step_num=1025, bit_depth=10,\n color=(1.0, 1.0, 1.0), direction='h')\n ```\n " max = (2 ** bit_depth) if (direction == 'h'): pass else: temp = height height = width width = temp if ((max + 1) != step_num): '\n 1階調ずつの増加では無いパターン。\n 末尾のデータが 256 や 1024 になるため -1 する。\n ' val_list = np.linspace(0, max, step_num) val_list[(- 1)] -= 1 else: '\n 正確に1階調ずつ変化するパターン。\n 末尾のデータが 256 や 1024 になるため除外する。\n ' val_list = np.linspace(0, max, step_num)[0:(- 1)] step_num -= 1 diff = (val_list[1:] - val_list[0:(- 1)]) if (diff == 1).all(): pass else: raise ValueError('calculated value is invalid.') step_length_list = equal_devision(width, step_num) step_bar_list = [] for (step_idx, length) in enumerate(step_length_list): step = [((np.ones(length) * color[c_idx]) * val_list[step_idx]) for c_idx in range(3)] if (direction == 'h'): step = np.dstack(step) step_bar_list.append(step) step_bar = np.hstack(step_bar_list) else: step = np.dstack(step).reshape((length, 1, 3)) step_bar_list.append(step) step_bar = np.vstack(step_bar_list) if (direction == 'h'): img = (step_bar * np.ones((height, 1, 3))) else: img = (step_bar * np.ones((1, height, 3))) if debug: preview_image(img, 'rgb') return img
def merge(img_a, img_b, pos=(0, 0)): '\n img_a に img_b をマージする。\n img_a にデータを上書きする。\n\n pos = (horizontal_st, vertical_st)\n ' b_width = img_b.shape[1] b_height = img_b.shape[0] img_a[pos[1]:(b_height + pos[1]), pos[0]:(b_width + pos[0])] = img_b
2,923,293,055,995,527,000
img_a に img_b をマージする。 img_a にデータを上書きする。 pos = (horizontal_st, vertical_st)
ty_lib/test_pattern_generator2.py
merge
colour-science/sample_code
python
def merge(img_a, img_b, pos=(0, 0)): '\n img_a に img_b をマージする。\n img_a にデータを上書きする。\n\n pos = (horizontal_st, vertical_st)\n ' b_width = img_b.shape[1] b_height = img_b.shape[0] img_a[pos[1]:(b_height + pos[1]), pos[0]:(b_width + pos[0])] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)): '\n 合成する。\n\n Parameters\n ----------\n bg_img : array_like(float, 3-channel)\n image data.\n fg_img : array_like(float, 4-channel)\n image data\n tf : strings\n transfer function\n pos : list(int)\n (pos_h, pos_v)\n ' f_width = fg_img.shape[1] f_height = fg_img.shape[0] bg_merge_area = bg_img[pos[1]:(f_height + pos[1]), pos[0]:(f_width + pos[0])] bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str) fg_linear = tf.eotf_to_luminance(fg_img, tf_str) alpha = (fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]) out_linear = (((1 - alpha) * bg_linear) + fg_linear[:, :, :(- 1)]) out_merge_area = tf.oetf_from_luminance(out_linear, tf_str) bg_img[pos[1]:(f_height + pos[1]), pos[0]:(f_width + pos[0])] = out_merge_area return bg_img
-8,414,558,190,074,198,000
合成する。 Parameters ---------- bg_img : array_like(float, 3-channel) image data. fg_img : array_like(float, 4-channel) image data tf : strings transfer function pos : list(int) (pos_h, pos_v)
ty_lib/test_pattern_generator2.py
merge_with_alpha
colour-science/sample_code
python
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)): '\n 合成する。\n\n Parameters\n ----------\n bg_img : array_like(float, 3-channel)\n image data.\n fg_img : array_like(float, 4-channel)\n image data\n tf : strings\n transfer function\n pos : list(int)\n (pos_h, pos_v)\n ' f_width = fg_img.shape[1] f_height = fg_img.shape[0] bg_merge_area = bg_img[pos[1]:(f_height + pos[1]), pos[0]:(f_width + pos[0])] bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str) fg_linear = tf.eotf_to_luminance(fg_img, tf_str) alpha = (fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]) out_linear = (((1 - alpha) * bg_linear) + fg_linear[:, :, :(- 1)]) out_merge_area = tf.oetf_from_luminance(out_linear, tf_str) bg_img[pos[1]:(f_height + pos[1]), pos[0]:(f_width + pos[0])] = out_merge_area return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])): '\n dot pattern 作る。\n\n Parameters\n ----------\n dot_size : integer\n dot size.\n repeat : integer\n The number of high-low pairs.\n color : array_like\n color value.\n\n Returns\n -------\n array_like\n dot pattern image.\n\n ' pixel_num = ((dot_size * 2) * repeat) even_logic = [(((np.arange(pixel_num) % (dot_size * 2)) - dot_size) < 0)] even_logic = np.dstack((even_logic, even_logic, even_logic)) odd_logic = np.logical_not(even_logic) color = color.reshape((1, 1, 3)) even_line = ((np.ones((1, pixel_num, 3)) * even_logic) * color) odd_line = ((np.ones((1, pixel_num, 3)) * odd_logic) * color) even_block = np.repeat(even_line, dot_size, axis=0) odd_block = np.repeat(odd_line, dot_size, axis=0) pair_block = np.vstack((even_block, odd_block)) img = np.vstack([pair_block for x in range(repeat)]) return img
-1,414,203,125,807,372,500
dot pattern 作る。 Parameters ---------- dot_size : integer dot size. repeat : integer The number of high-low pairs. color : array_like color value. Returns ------- array_like dot pattern image.
ty_lib/test_pattern_generator2.py
dot_pattern
colour-science/sample_code
python
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])): '\n dot pattern 作る。\n\n Parameters\n ----------\n dot_size : integer\n dot size.\n repeat : integer\n The number of high-low pairs.\n color : array_like\n color value.\n\n Returns\n -------\n array_like\n dot pattern image.\n\n ' pixel_num = ((dot_size * 2) * repeat) even_logic = [(((np.arange(pixel_num) % (dot_size * 2)) - dot_size) < 0)] even_logic = np.dstack((even_logic, even_logic, even_logic)) odd_logic = np.logical_not(even_logic) color = color.reshape((1, 1, 3)) even_line = ((np.ones((1, pixel_num, 3)) * even_logic) * color) odd_line = ((np.ones((1, pixel_num, 3)) * odd_logic) * color) even_block = np.repeat(even_line, dot_size, axis=0) odd_block = np.repeat(odd_line, dot_size, axis=0) pair_block = np.vstack((even_block, odd_block)) img = np.vstack([pair_block for x in range(repeat)]) return img
def complex_dot_pattern(kind_num=3, whole_repeat=2, fg_color=np.array([1.0, 1.0, 1.0]), bg_color=np.array([0.15, 0.15, 0.15])): '\n dot pattern 作る。\n\n Parameters\n ----------\n kind_num : integer\n 作成するドットサイズの種類。\n 例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。\n whole_repeat : integer\n 異なる複数種類のドットパターンの組数。\n 例えば、kind_num=3, whole_repeat=2 ならば、\n 1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。\n fg_color : array_like\n foreground color value.\n bg_color : array_like\n background color value.\n reduce : bool\n HDRテストパターンの3840x2160専用。縦横を半分にする。\n\n Returns\n -------\n array_like\n dot pattern image.\n\n ' max_dot_width = (2 ** kind_num) img_list = [] for size_idx in range(kind_num)[::(- 1)]: dot_size = (2 ** size_idx) repeat = (max_dot_width // dot_size) dot_img = dot_pattern(dot_size, repeat, fg_color) img_list.append(dot_img) img_list.append((np.ones_like(dot_img) * bg_color)) line_upper_img = np.hstack(img_list) line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)]) line_lower_img = line_upper_img.copy()[:, ::(- 1), :] h_unit_img = np.vstack((line_upper_img, line_lower_img)) img = np.vstack([h_unit_img for x in range((kind_num * whole_repeat))]) return img
7,652,356,003,343,272,000
dot pattern 作る。 Parameters ---------- kind_num : integer 作成するドットサイズの種類。 例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。 whole_repeat : integer 異なる複数種類のドットパターンの組数。 例えば、kind_num=3, whole_repeat=2 ならば、 1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。 fg_color : array_like foreground color value. bg_color : array_like background color value. reduce : bool HDRテストパターンの3840x2160専用。縦横を半分にする。 Returns ------- array_like dot pattern image.
ty_lib/test_pattern_generator2.py
complex_dot_pattern
colour-science/sample_code
python
def complex_dot_pattern(kind_num=3, whole_repeat=2, fg_color=np.array([1.0, 1.0, 1.0]), bg_color=np.array([0.15, 0.15, 0.15])): '\n dot pattern 作る。\n\n Parameters\n ----------\n kind_num : integer\n 作成するドットサイズの種類。\n 例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。\n whole_repeat : integer\n 異なる複数種類のドットパターンの組数。\n 例えば、kind_num=3, whole_repeat=2 ならば、\n 1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。\n fg_color : array_like\n foreground color value.\n bg_color : array_like\n background color value.\n reduce : bool\n HDRテストパターンの3840x2160専用。縦横を半分にする。\n\n Returns\n -------\n array_like\n dot pattern image.\n\n ' max_dot_width = (2 ** kind_num) img_list = [] for size_idx in range(kind_num)[::(- 1)]: dot_size = (2 ** size_idx) repeat = (max_dot_width // dot_size) dot_img = dot_pattern(dot_size, repeat, fg_color) img_list.append(dot_img) img_list.append((np.ones_like(dot_img) * bg_color)) line_upper_img = np.hstack(img_list) line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)]) line_lower_img = line_upper_img.copy()[:, ::(- 1), :] h_unit_img = np.vstack((line_upper_img, line_lower_img)) img = np.vstack([h_unit_img for x in range((kind_num * whole_repeat))]) return img
def make_csf_color_image(width=640, height=640, lv1=np.array([940, 940, 940], dtype=np.uint16), lv2=np.array([1023, 1023, 1023], dtype=np.uint16), stripe_num=6): '\n 長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。\n 入力信号レベルは10bitに限定する。\n\n Parameters\n ----------\n width : numeric.\n width of the pattern image.\n height : numeric.\n height of the pattern image.\n lv1 : array_like\n video level 1. this value must be 10bit.\n lv2 : array_like\n video level 2. this value must be 10bit.\n stripe_num : numeric\n number of the stripe.\n\n Returns\n -------\n array_like\n a cms pattern image.\n ' width_list = equal_devision(width, stripe_num) height_list = equal_devision(height, stripe_num) h_pos_list = equal_devision((width // 2), stripe_num) v_pos_list = equal_devision((height // 2), stripe_num) img = np.zeros((height, width, 3), dtype=np.uint16) width_temp = width height_temp = height h_pos_temp = 0 v_pos_temp = 0 for idx in range(stripe_num): lv = (lv1 if ((idx % 2) == 0) else lv2) temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16) temp_img = (temp_img * lv.reshape((1, 1, 3))) ed_pos_h = (h_pos_temp + width_temp) ed_pos_v = (v_pos_temp + height_temp) img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img width_temp -= width_list[((stripe_num - 1) - idx)] height_temp -= height_list[((stripe_num - 1) - idx)] h_pos_temp += h_pos_list[idx] v_pos_temp += v_pos_list[idx] return img
6,239,938,353,410,582,000
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。 入力信号レベルは10bitに限定する。 Parameters ---------- width : numeric. width of the pattern image. height : numeric. height of the pattern image. lv1 : array_like video level 1. this value must be 10bit. lv2 : array_like video level 2. this value must be 10bit. stripe_num : numeric number of the stripe. Returns ------- array_like a cms pattern image.
ty_lib/test_pattern_generator2.py
make_csf_color_image
colour-science/sample_code
python
def make_csf_color_image(width=640, height=640, lv1=np.array([940, 940, 940], dtype=np.uint16), lv2=np.array([1023, 1023, 1023], dtype=np.uint16), stripe_num=6): '\n 長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。\n 入力信号レベルは10bitに限定する。\n\n Parameters\n ----------\n width : numeric.\n width of the pattern image.\n height : numeric.\n height of the pattern image.\n lv1 : array_like\n video level 1. this value must be 10bit.\n lv2 : array_like\n video level 2. this value must be 10bit.\n stripe_num : numeric\n number of the stripe.\n\n Returns\n -------\n array_like\n a cms pattern image.\n ' width_list = equal_devision(width, stripe_num) height_list = equal_devision(height, stripe_num) h_pos_list = equal_devision((width // 2), stripe_num) v_pos_list = equal_devision((height // 2), stripe_num) img = np.zeros((height, width, 3), dtype=np.uint16) width_temp = width height_temp = height h_pos_temp = 0 v_pos_temp = 0 for idx in range(stripe_num): lv = (lv1 if ((idx % 2) == 0) else lv2) temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16) temp_img = (temp_img * lv.reshape((1, 1, 3))) ed_pos_h = (h_pos_temp + width_temp) ed_pos_v = (v_pos_temp + height_temp) img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img width_temp -= width_list[((stripe_num - 1) - idx)] height_temp -= height_list[((stripe_num - 1) - idx)] h_pos_temp += h_pos_list[idx] v_pos_temp += v_pos_list[idx] return img
def make_tile_pattern(width=480, height=960, h_tile_num=4, v_tile_num=4, low_level=(940, 940, 940), high_level=(1023, 1023, 1023)): '\n タイル状の縞々パターンを作る\n ' width_array = equal_devision(width, h_tile_num) height_array = equal_devision(height, v_tile_num) high_level = np.array(high_level, dtype=np.uint16) low_level = np.array(low_level, dtype=np.uint16) v_buf = [] for (v_idx, height) in enumerate(height_array): h_buf = [] for (h_idx, width) in enumerate(width_array): tile_judge = (((h_idx + v_idx) % 2) == 0) h_temp = np.zeros((height, width, 3), dtype=np.uint16) h_temp[:, :] = (high_level if tile_judge else low_level) h_buf.append(h_temp) v_buf.append(np.hstack(h_buf)) img = np.vstack(v_buf) return img
-1,309,246,484,004,092,000
タイル状の縞々パターンを作る
ty_lib/test_pattern_generator2.py
make_tile_pattern
colour-science/sample_code
python
def make_tile_pattern(width=480, height=960, h_tile_num=4, v_tile_num=4, low_level=(940, 940, 940), high_level=(1023, 1023, 1023)): '\n \n ' width_array = equal_devision(width, h_tile_num) height_array = equal_devision(height, v_tile_num) high_level = np.array(high_level, dtype=np.uint16) low_level = np.array(low_level, dtype=np.uint16) v_buf = [] for (v_idx, height) in enumerate(height_array): h_buf = [] for (h_idx, width) in enumerate(width_array): tile_judge = (((h_idx + v_idx) % 2) == 0) h_temp = np.zeros((height, width, 3), dtype=np.uint16) h_temp[:, :] = (high_level if tile_judge else low_level) h_buf.append(h_temp) v_buf.append(np.hstack(h_buf)) img = np.vstack(v_buf) return img
def make_ycbcr_checker(height=480, v_tile_num=4): '\n YCbCr係数誤りを確認するテストパターンを作る。\n 正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。\n\n Parameters\n ----------\n height : numeric.\n height of the pattern image.\n v_tile_num : numeric\n number of the tile in the vertical direction.\n\n Note\n ----\n 横長のパターンになる。以下の式が成立する。\n\n ```\n h_tile_num = v_tile_num * 2\n width = height * 2\n ```\n\n Returns\n -------\n array_like\n ycbcr checker image\n ' cyan_img = make_tile_pattern(width=height, height=height, h_tile_num=v_tile_num, v_tile_num=v_tile_num, low_level=[0, 990, 990], high_level=[0, 1023, 1023]) magenta_img = make_tile_pattern(width=height, height=height, h_tile_num=v_tile_num, v_tile_num=v_tile_num, low_level=[990, 0, 312], high_level=[1023, 0, 312]) out_img = np.hstack([cyan_img, magenta_img]) return out_img
-2,052,380,776,987,882,000
YCbCr係数誤りを確認するテストパターンを作る。 正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。 Parameters ---------- height : numeric. height of the pattern image. v_tile_num : numeric number of the tile in the vertical direction. Note ---- 横長のパターンになる。以下の式が成立する。 ``` h_tile_num = v_tile_num * 2 width = height * 2 ``` Returns ------- array_like ycbcr checker image
ty_lib/test_pattern_generator2.py
make_ycbcr_checker
colour-science/sample_code
python
def make_ycbcr_checker(height=480, v_tile_num=4): '\n YCbCr係数誤りを確認するテストパターンを作る。\n 正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。\n\n Parameters\n ----------\n height : numeric.\n height of the pattern image.\n v_tile_num : numeric\n number of the tile in the vertical direction.\n\n Note\n ----\n 横長のパターンになる。以下の式が成立する。\n\n ```\n h_tile_num = v_tile_num * 2\n width = height * 2\n ```\n\n Returns\n -------\n array_like\n ycbcr checker image\n ' cyan_img = make_tile_pattern(width=height, height=height, h_tile_num=v_tile_num, v_tile_num=v_tile_num, low_level=[0, 990, 990], high_level=[0, 1023, 1023]) magenta_img = make_tile_pattern(width=height, height=height, h_tile_num=v_tile_num, v_tile_num=v_tile_num, low_level=[990, 0, 312], high_level=[1023, 0, 312]) out_img = np.hstack([cyan_img, magenta_img]) return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080), block_size=(1 / 4.5), padding=0.01): "\n ColorCheckerをプロットする\n\n Parameters\n ----------\n rgb : array_like\n RGB value of the ColorChecker.\n RGB's shape must be (24, 3).\n rgb2 : array_like\n It's a optional parameter.\n If You want to draw two different ColorCheckers,\n set the RGB value to this variable.\n size : tuple\n canvas size.\n block_size : float\n A each block's size.\n This value is ratio to height of the canvas.\n padding : float\n A padding to the block.\n\n Returns\n -------\n array_like\n A ColorChecker image.\n\n " IMG_HEIGHT = size[1] IMG_WIDTH = size[0] COLOR_CHECKER_SIZE = block_size COLOR_CHECKER_H_NUM = 6 COLOR_CHECKER_V_NUM = 4 COLOR_CHECKER_PADDING = 0.01 COLOR_CHECKER_H_NUM = 6 COLOR_CHECKER_V_NUM = 4 img_height = IMG_HEIGHT img_width = IMG_WIDTH patch_st_h = int(((IMG_WIDTH / 2.0) - ((((IMG_HEIGHT * COLOR_CHECKER_SIZE) * COLOR_CHECKER_H_NUM) / 2.0) + (((IMG_HEIGHT * COLOR_CHECKER_PADDING) * ((COLOR_CHECKER_H_NUM / 2.0) - 0.5)) / 2.0)))) patch_st_v = int(((IMG_HEIGHT / 2.0) - ((((IMG_HEIGHT * COLOR_CHECKER_SIZE) * COLOR_CHECKER_V_NUM) / 2.0) + (((IMG_HEIGHT * COLOR_CHECKER_PADDING) * ((COLOR_CHECKER_V_NUM / 2.0) - 0.5)) / 2.0)))) patch_width = int((img_height * COLOR_CHECKER_SIZE)) patch_height = patch_width patch_space = int((img_height * COLOR_CHECKER_PADDING)) img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8) for idx in range((COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM)): v_idx = (idx // COLOR_CHECKER_H_NUM) h_idx = (idx % COLOR_CHECKER_H_NUM) patch = np.ones((patch_height, patch_width, 3)) patch[:, :] = rgb[idx] st_h = (patch_st_h + ((patch_width + patch_space) * h_idx)) st_v = (patch_st_v + ((patch_height + patch_space) * v_idx)) img_all_patch[st_v:(st_v + patch_height), st_h:(st_h + patch_width)] = patch pt2 = ((st_h + patch_width), st_v) pt3 = (st_h, (st_v + patch_height)) pt4 = ((st_h + patch_width), (st_v + patch_height)) pts = np.array((pt2, pt3, pt4)) sub_color = (rgb[idx].tolist() if (rgb2 is None) else rgb2[idx].tolist()) cv2.fillPoly(img_all_patch, [pts], sub_color) preview_image(img_all_patch) return img_all_patch
6,027,125,347,553,437,000
ColorCheckerをプロットする Parameters ---------- rgb : array_like RGB value of the ColorChecker. RGB's shape must be (24, 3). rgb2 : array_like It's a optional parameter. If You want to draw two different ColorCheckers, set the RGB value to this variable. size : tuple canvas size. block_size : float A each block's size. This value is ratio to height of the canvas. padding : float A padding to the block. Returns ------- array_like A ColorChecker image.
ty_lib/test_pattern_generator2.py
plot_color_checker_image
colour-science/sample_code
python
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080), block_size=(1 / 4.5), padding=0.01): "\n ColorCheckerをプロットする\n\n Parameters\n ----------\n rgb : array_like\n RGB value of the ColorChecker.\n RGB's shape must be (24, 3).\n rgb2 : array_like\n It's a optional parameter.\n If You want to draw two different ColorCheckers,\n set the RGB value to this variable.\n size : tuple\n canvas size.\n block_size : float\n A each block's size.\n This value is ratio to height of the canvas.\n padding : float\n A padding to the block.\n\n Returns\n -------\n array_like\n A ColorChecker image.\n\n " IMG_HEIGHT = size[1] IMG_WIDTH = size[0] COLOR_CHECKER_SIZE = block_size COLOR_CHECKER_H_NUM = 6 COLOR_CHECKER_V_NUM = 4 COLOR_CHECKER_PADDING = 0.01 COLOR_CHECKER_H_NUM = 6 COLOR_CHECKER_V_NUM = 4 img_height = IMG_HEIGHT img_width = IMG_WIDTH patch_st_h = int(((IMG_WIDTH / 2.0) - ((((IMG_HEIGHT * COLOR_CHECKER_SIZE) * COLOR_CHECKER_H_NUM) / 2.0) + (((IMG_HEIGHT * COLOR_CHECKER_PADDING) * ((COLOR_CHECKER_H_NUM / 2.0) - 0.5)) / 2.0)))) patch_st_v = int(((IMG_HEIGHT / 2.0) - ((((IMG_HEIGHT * COLOR_CHECKER_SIZE) * COLOR_CHECKER_V_NUM) / 2.0) + (((IMG_HEIGHT * COLOR_CHECKER_PADDING) * ((COLOR_CHECKER_V_NUM / 2.0) - 0.5)) / 2.0)))) patch_width = int((img_height * COLOR_CHECKER_SIZE)) patch_height = patch_width patch_space = int((img_height * COLOR_CHECKER_PADDING)) img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8) for idx in range((COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM)): v_idx = (idx // COLOR_CHECKER_H_NUM) h_idx = (idx % COLOR_CHECKER_H_NUM) patch = np.ones((patch_height, patch_width, 3)) patch[:, :] = rgb[idx] st_h = (patch_st_h + ((patch_width + patch_space) * h_idx)) st_v = (patch_st_v + ((patch_height + patch_space) * v_idx)) img_all_patch[st_v:(st_v + patch_height), st_h:(st_h + patch_width)] = patch pt2 = ((st_h + patch_width), st_v) pt3 = (st_h, (st_v + patch_height)) pt4 = ((st_h + patch_width), (st_v + patch_height)) pts = np.array((pt2, pt3, pt4)) sub_color = (rgb[idx].tolist() if (rgb2 is None) else rgb2[idx].tolist()) cv2.fillPoly(img_all_patch, [pts], sub_color) preview_image(img_all_patch) return img_all_patch
def get_log10_x_scale(sample_num=8, ref_val=1.0, min_exposure=(- 1), max_exposure=6): '\n Log10スケールのx軸データを作る。\n\n Examples\n --------\n >>> get_log2_x_scale(\n ... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)\n array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02\n 1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])\n ' x_min = np.log10((ref_val * (10 ** min_exposure))) x_max = np.log10((ref_val * (10 ** max_exposure))) x = np.linspace(x_min, x_max, sample_num) return (10.0 ** x)
-7,178,071,663,818,056,000
Log10スケールのx軸データを作る。 Examples -------- >>> get_log2_x_scale( ... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6) array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02 1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
ty_lib/test_pattern_generator2.py
get_log10_x_scale
colour-science/sample_code
python
def get_log10_x_scale(sample_num=8, ref_val=1.0, min_exposure=(- 1), max_exposure=6): '\n Log10スケールのx軸データを作る。\n\n Examples\n --------\n >>> get_log2_x_scale(\n ... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)\n array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02\n 1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])\n ' x_min = np.log10((ref_val * (10 ** min_exposure))) x_max = np.log10((ref_val * (10 ** max_exposure))) x = np.linspace(x_min, x_max, sample_num) return (10.0 ** x)
def get_log2_x_scale(sample_num=32, ref_val=1.0, min_exposure=(- 6.5), max_exposure=6.5): '\n Log2スケールのx軸データを作る。\n\n Examples\n --------\n >>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)\n array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725\n 1.36079 2.5198421 4.66611616 8.64047791 16. ]])\n ' x_min = np.log2((ref_val * (2 ** min_exposure))) x_max = np.log2((ref_val * (2 ** max_exposure))) x = np.linspace(x_min, x_max, sample_num) return (2.0 ** x)
-5,210,454,686,189,132,000
Log2スケールのx軸データを作る。 Examples -------- >>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0) array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725 1.36079 2.5198421 4.66611616 8.64047791 16. ]])
ty_lib/test_pattern_generator2.py
get_log2_x_scale
colour-science/sample_code
python
def get_log2_x_scale(sample_num=32, ref_val=1.0, min_exposure=(- 6.5), max_exposure=6.5): '\n Log2スケールのx軸データを作る。\n\n Examples\n --------\n >>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)\n array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725\n 1.36079 2.5198421 4.66611616 8.64047791 16. ]])\n ' x_min = np.log2((ref_val * (2 ** min_exposure))) x_max = np.log2((ref_val * (2 ** max_exposure))) x = np.linspace(x_min, x_max, sample_num) return (2.0 ** x)
def shaper_func_linear_to_log2(x, mid_gray=0.18, min_exposure=(- 6.5), max_exposure=6.5): '\n ACESutil.Lin_to_Log2_param.ctl を参考に作成。\n https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl\n\n Parameters\n ----------\n x : array_like\n linear data.\n mid_gray : float\n 18% gray value on linear scale.\n min_exposure : float\n minimum value on log scale.\n max_exposure : float\n maximum value on log scale.\n\n Returns\n -------\n array_like\n log2 value that is transformed from linear x value.\n\n Examples\n --------\n >>> shaper_func_linear_to_log2(\n ... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)\n 0.5\n >>> shaper_func_linear_to_log2(\n ... x=np.array([0.00198873782209, 16.2917402385])\n ... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)\n array([ 1.58232402e-13 1.00000000e+00])\n ' y = np.log2((x / mid_gray)) y_normalized = ((y - min_exposure) / (max_exposure - min_exposure)) y_normalized[(y_normalized < 0)] = 0 return y_normalized
-4,971,467,536,164,493,000
ACESutil.Lin_to_Log2_param.ctl を参考に作成。 https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl Parameters ---------- x : array_like linear data. mid_gray : float 18% gray value on linear scale. min_exposure : float minimum value on log scale. max_exposure : float maximum value on log scale. Returns ------- array_like log2 value that is transformed from linear x value. Examples -------- >>> shaper_func_linear_to_log2( ... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5) 0.5 >>> shaper_func_linear_to_log2( ... x=np.array([0.00198873782209, 16.2917402385]) ... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5) array([ 1.58232402e-13 1.00000000e+00])
ty_lib/test_pattern_generator2.py
shaper_func_linear_to_log2
colour-science/sample_code
python
def shaper_func_linear_to_log2(x, mid_gray=0.18, min_exposure=(- 6.5), max_exposure=6.5): '\n ACESutil.Lin_to_Log2_param.ctl を参考に作成。\n https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl\n\n Parameters\n ----------\n x : array_like\n linear data.\n mid_gray : float\n 18% gray value on linear scale.\n min_exposure : float\n minimum value on log scale.\n max_exposure : float\n maximum value on log scale.\n\n Returns\n -------\n array_like\n log2 value that is transformed from linear x value.\n\n Examples\n --------\n >>> shaper_func_linear_to_log2(\n ... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)\n 0.5\n >>> shaper_func_linear_to_log2(\n ... x=np.array([0.00198873782209, 16.2917402385])\n ... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)\n array([ 1.58232402e-13 1.00000000e+00])\n ' y = np.log2((x / mid_gray)) y_normalized = ((y - min_exposure) / (max_exposure - min_exposure)) y_normalized[(y_normalized < 0)] = 0 return y_normalized
def shaper_func_log2_to_linear(x, mid_gray=0.18, min_exposure=(- 6.5), max_exposure=6.5): '\n ACESutil.Log2_to_Lin_param.ctl を参考に作成。\n https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl\n\n Log2空間の補足は shaper_func_linear_to_log2() の説明を参照\n\n Examples\n --------\n >>> x = np.array([0.0, 1.0])\n >>> shaper_func_log2_to_linear(\n ... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)\n array([0.00198873782209, 16.2917402385])\n ' x_re_scale = ((x * (max_exposure - min_exposure)) + min_exposure) y = ((2.0 ** x_re_scale) * mid_gray) return y
5,381,534,480,915,387,000
ACESutil.Log2_to_Lin_param.ctl を参考に作成。 https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl Log2空間の補足は shaper_func_linear_to_log2() の説明を参照 Examples -------- >>> x = np.array([0.0, 1.0]) >>> shaper_func_log2_to_linear( ... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5) array([0.00198873782209, 16.2917402385])
ty_lib/test_pattern_generator2.py
shaper_func_log2_to_linear
colour-science/sample_code
python
def shaper_func_log2_to_linear(x, mid_gray=0.18, min_exposure=(- 6.5), max_exposure=6.5): '\n ACESutil.Log2_to_Lin_param.ctl を参考に作成。\n https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl\n\n Log2空間の補足は shaper_func_linear_to_log2() の説明を参照\n\n Examples\n --------\n >>> x = np.array([0.0, 1.0])\n >>> shaper_func_log2_to_linear(\n ... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)\n array([0.00198873782209, 16.2917402385])\n ' x_re_scale = ((x * (max_exposure - min_exposure)) + min_exposure) y = ((2.0 ** x_re_scale) * mid_gray) return y
def draw_straight_line(img, pt1, pt2, color, thickness): '\n 直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。\n\n Parameters\n ----------\n img : array_like\n image data.\n pt1 : list(pos_h, pos_v)\n start point.\n pt2 : list(pos_h, pos_v)\n end point.\n color : array_like\n color\n thickness : int\n thickness.\n\n Returns\n -------\n array_like\n image data with line.\n\n Notes\n -----\n thickness のパラメータは pt1 の点から右下方向に効きます。\n pt1 を中心として太さではない事に注意。\n\n Examples\n --------\n >>> pt1 = (0, 0)\n >>> pt2 = (1920, 0)\n >>> color = (940, 940, 940)\n >>> thickness = 4\n >>> draw_straight_line(img, pt1, pt2, color, thickness)\n ' if ((pt1[0] != pt2[0]) and (pt1[1] != pt2[1])): raise ValueError('invalid pt1, pt2 parameters') if (pt1[0] == pt2[0]): thickness_direction = 'h' else: thickness_direction = 'v' if (thickness_direction == 'h'): for h_idx in range(thickness): img[pt1[1]:pt2[1], (pt1[0] + h_idx), :] = color elif (thickness_direction == 'v'): for v_idx in range(thickness): img[(pt1[1] + v_idx), pt1[0]:pt2[0], :] = color
-1,859,524,669,991,116,500
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。 Parameters ---------- img : array_like image data. pt1 : list(pos_h, pos_v) start point. pt2 : list(pos_h, pos_v) end point. color : array_like color thickness : int thickness. Returns ------- array_like image data with line. Notes ----- thickness のパラメータは pt1 の点から右下方向に効きます。 pt1 を中心として太さではない事に注意。 Examples -------- >>> pt1 = (0, 0) >>> pt2 = (1920, 0) >>> color = (940, 940, 940) >>> thickness = 4 >>> draw_straight_line(img, pt1, pt2, color, thickness)
ty_lib/test_pattern_generator2.py
draw_straight_line
colour-science/sample_code
python
def draw_straight_line(img, pt1, pt2, color, thickness): '\n 直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。\n\n Parameters\n ----------\n img : array_like\n image data.\n pt1 : list(pos_h, pos_v)\n start point.\n pt2 : list(pos_h, pos_v)\n end point.\n color : array_like\n color\n thickness : int\n thickness.\n\n Returns\n -------\n array_like\n image data with line.\n\n Notes\n -----\n thickness のパラメータは pt1 の点から右下方向に効きます。\n pt1 を中心として太さではない事に注意。\n\n Examples\n --------\n >>> pt1 = (0, 0)\n >>> pt2 = (1920, 0)\n >>> color = (940, 940, 940)\n >>> thickness = 4\n >>> draw_straight_line(img, pt1, pt2, color, thickness)\n ' if ((pt1[0] != pt2[0]) and (pt1[1] != pt2[1])): raise ValueError('invalid pt1, pt2 parameters') if (pt1[0] == pt2[0]): thickness_direction = 'h' else: thickness_direction = 'v' if (thickness_direction == 'h'): for h_idx in range(thickness): img[pt1[1]:pt2[1], (pt1[0] + h_idx), :] = color elif (thickness_direction == 'v'): for v_idx in range(thickness): img[(pt1[1] + v_idx), pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width): '\n img に対して外枠線を引く\n\n Parameters\n ----------\n img : array_like\n image data.\n fg_color : array_like\n color\n outline_width : int\n thickness.\n\n Returns\n -------\n array_like\n image data with line.\n\n Examples\n --------\n >>> img = np.zeros((1080, 1920, 3))\n >>> color = (940, 940, 940)\n >>> thickness = 2\n >>> draw_outline(img, color, thickness)\n ' width = img.shape[1] height = img.shape[0] pt1 = (0, 0) pt2 = (width, 0) draw_straight_line(img, pt1, pt2, fg_color, outline_width) pt1 = (0, 0) pt2 = (0, height) draw_straight_line(img, pt1, pt2, fg_color, outline_width) pt1 = ((width - outline_width), 0) pt2 = ((width - outline_width), height) draw_straight_line(img, pt1, pt2, fg_color, outline_width) pt1 = (0, (height - outline_width)) pt2 = (width, (height - outline_width)) draw_straight_line(img, pt1, pt2, fg_color, outline_width)
6,796,354,785,387,014,000
img に対して外枠線を引く Parameters ---------- img : array_like image data. fg_color : array_like color outline_width : int thickness. Returns ------- array_like image data with line. Examples -------- >>> img = np.zeros((1080, 1920, 3)) >>> color = (940, 940, 940) >>> thickness = 2 >>> draw_outline(img, color, thickness)
ty_lib/test_pattern_generator2.py
draw_outline
colour-science/sample_code
python
def draw_outline(img, fg_color, outline_width): '\n img に対して外枠線を引く\n\n Parameters\n ----------\n img : array_like\n image data.\n fg_color : array_like\n color\n outline_width : int\n thickness.\n\n Returns\n -------\n array_like\n image data with line.\n\n Examples\n --------\n >>> img = np.zeros((1080, 1920, 3))\n >>> color = (940, 940, 940)\n >>> thickness = 2\n >>> draw_outline(img, color, thickness)\n ' width = img.shape[1] height = img.shape[0] pt1 = (0, 0) pt2 = (width, 0) draw_straight_line(img, pt1, pt2, fg_color, outline_width) pt1 = (0, 0) pt2 = (0, height) draw_straight_line(img, pt1, pt2, fg_color, outline_width) pt1 = ((width - outline_width), 0) pt2 = ((width - outline_width), height) draw_straight_line(img, pt1, pt2, fg_color, outline_width) pt1 = (0, (height - outline_width)) pt2 = (width, (height - outline_width)) draw_straight_line(img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function): '\n 輝度[cd/m2] から code value の RGB値に変換する。\n luminance の単位は [cd/m2]。無彩色である。\n\n Examples\n --------\n >>> convert_luminance_to_color_value(100, tf.GAMMA24)\n >>> [ 1.0 1.0 1.0 ]\n >>> convert_luminance_to_color_value(100, tf.ST2084)\n >>> [ 0.50807842 0.50807842 0.50807842 ]\n ' code_value = convert_luminance_to_code_value(luminance, transfer_function) return np.array([code_value, code_value, code_value])
-9,198,726,890,243,419,000
輝度[cd/m2] から code value の RGB値に変換する。 luminance の単位は [cd/m2]。無彩色である。 Examples -------- >>> convert_luminance_to_color_value(100, tf.GAMMA24) >>> [ 1.0 1.0 1.0 ] >>> convert_luminance_to_color_value(100, tf.ST2084) >>> [ 0.50807842 0.50807842 0.50807842 ]
ty_lib/test_pattern_generator2.py
convert_luminance_to_color_value
colour-science/sample_code
python
def convert_luminance_to_color_value(luminance, transfer_function): '\n 輝度[cd/m2] から code value の RGB値に変換する。\n luminance の単位は [cd/m2]。無彩色である。\n\n Examples\n --------\n >>> convert_luminance_to_color_value(100, tf.GAMMA24)\n >>> [ 1.0 1.0 1.0 ]\n >>> convert_luminance_to_color_value(100, tf.ST2084)\n >>> [ 0.50807842 0.50807842 0.50807842 ]\n ' code_value = convert_luminance_to_code_value(luminance, transfer_function) return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function): '\n 輝度[cd/m2] から code value に変換する。\n luminance の単位は [cd/m2]\n ' return tf.oetf_from_luminance(luminance, transfer_function)
5,017,816,043,957,604,000
輝度[cd/m2] から code value に変換する。 luminance の単位は [cd/m2]
ty_lib/test_pattern_generator2.py
convert_luminance_to_code_value
colour-science/sample_code
python
def convert_luminance_to_code_value(luminance, transfer_function): '\n 輝度[cd/m2] から code value に変換する。\n luminance の単位は [cd/m2]\n ' return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3): '\n 以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの\n RGB値のリストを得る。\n https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png\n\n 得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で\n 得られる変換テーブルを使った変換が必要。\n 本関数はまさにその変換を行う。\n ' base = np.arange((outmost_num ** 2)).reshape((outmost_num, outmost_num)) t_idx = ((outmost_num - current_num) // 2) trimmed = base[t_idx:(t_idx + current_num), t_idx:(t_idx + current_num)] half_num = (current_num // 2) conv_idx = [] for idx in range(half_num): val = ((((current_num ** 2) // 2) + half_num) - (current_num * idx)) conv_idx.append(val) for idx in range(current_num)[::(- 1)]: conv_idx.append(idx) for idx in range(1, (current_num - 1)): conv_idx.append((idx * current_num)) for idx in range(current_num): val = (((current_num ** 2) - current_num) + idx) conv_idx.append(val) for idx in range(1, half_num): val = (((current_num ** 2) - 1) - (idx * current_num)) conv_idx.append(val) conv_idx = trimmed.flatten()[conv_idx] return conv_idx
-7,178,791,033,226,871,000
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの RGB値のリストを得る。 https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png 得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で 得られる変換テーブルを使った変換が必要。 本関数はまさにその変換を行う。
ty_lib/test_pattern_generator2.py
calc_rad_patch_idx2
colour-science/sample_code
python
def calc_rad_patch_idx2(outmost_num=5, current_num=3): '\n 以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの\n RGB値のリストを得る。\n https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png\n\n 得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で\n 得られる変換テーブルを使った変換が必要。\n 本関数はまさにその変換を行う。\n ' base = np.arange((outmost_num ** 2)).reshape((outmost_num, outmost_num)) t_idx = ((outmost_num - current_num) // 2) trimmed = base[t_idx:(t_idx + current_num), t_idx:(t_idx + current_num)] half_num = (current_num // 2) conv_idx = [] for idx in range(half_num): val = ((((current_num ** 2) // 2) + half_num) - (current_num * idx)) conv_idx.append(val) for idx in range(current_num)[::(- 1)]: conv_idx.append(idx) for idx in range(1, (current_num - 1)): conv_idx.append((idx * current_num)) for idx in range(current_num): val = (((current_num ** 2) - current_num) + idx) conv_idx.append(val) for idx in range(1, half_num): val = (((current_num ** 2) - 1) - (idx * current_num)) conv_idx.append(val) conv_idx = trimmed.flatten()[conv_idx] return conv_idx
def _calc_rgb_from_same_lstar_radial_data(lstar, temp_chroma, current_num, color_space): '\n 以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの\n RGB値のリストを得る。\n https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png\n\n 得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で\n 得られる変換テーブルを使った変換が必要。\n ' current_patch_num = (((current_num - 1) * 4) if (current_num > 1) else 1) rad = np.linspace(0, (2 * np.pi), current_patch_num, endpoint=False) ll = (np.ones(current_patch_num) * lstar) aa = (np.cos(rad) * temp_chroma) bb = (np.sin(rad) * temp_chroma) lab = np.dstack((ll, aa, bb)) large_xyz = Lab_to_XYZ(lab) rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE, color_space.XYZ_to_RGB_matrix) return np.clip(rgb, 0.0, 1.0)
5,056,633,246,826,132,000
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの RGB値のリストを得る。 https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png 得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で 得られる変換テーブルを使った変換が必要。
ty_lib/test_pattern_generator2.py
_calc_rgb_from_same_lstar_radial_data
colour-science/sample_code
python
def _calc_rgb_from_same_lstar_radial_data(lstar, temp_chroma, current_num, color_space): '\n 以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの\n RGB値のリストを得る。\n https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png\n\n 得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で\n 得られる変換テーブルを使った変換が必要。\n ' current_patch_num = (((current_num - 1) * 4) if (current_num > 1) else 1) rad = np.linspace(0, (2 * np.pi), current_patch_num, endpoint=False) ll = (np.ones(current_patch_num) * lstar) aa = (np.cos(rad) * temp_chroma) bb = (np.sin(rad) * temp_chroma) lab = np.dstack((ll, aa, bb)) large_xyz = Lab_to_XYZ(lab) rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE, color_space.XYZ_to_RGB_matrix) return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(lstar=58, chroma=32.5, outmost_num=9, color_space=BT709_COLOURSPACE, transfer_function=tf.GAMMA24): '\n 以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの\n RGB値のリストを得る。\n https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png\n\n 得られた RGB値のリストは最初のデータが画像左上の緑データ、\n 最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。\n\n よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、\n 右下に向かって並べていけば良い。\n ' patch_num = (outmost_num ** 2) transfer_function = tf.GAMMA24 rgb_list = np.ones((patch_num, 3)) current_num_list = range(1, (outmost_num + 1), 2) chroma_list = np.linspace(0, chroma, len(current_num_list)) for (temp_chroma, current_num) in zip(chroma_list, current_num_list): current_patch_num = (((current_num - 1) * 4) if (current_num > 1) else 1) rgb = _calc_rgb_from_same_lstar_radial_data(lstar, temp_chroma, current_num, color_space) rgb = np.reshape(rgb, (current_patch_num, 3)) rgb = tf.oetf(rgb, transfer_function) conv_idx = calc_rad_patch_idx2(outmost_num=outmost_num, current_num=current_num) for idx in range(current_patch_num): rgb_list[conv_idx[idx]] = rgb[idx] return rgb_list
-4,732,374,722,857,101,000
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの RGB値のリストを得る。 https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png 得られた RGB値のリストは最初のデータが画像左上の緑データ、 最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。 よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、 右下に向かって並べていけば良い。
ty_lib/test_pattern_generator2.py
calc_same_lstar_radial_color_patch_data
colour-science/sample_code
python
def calc_same_lstar_radial_color_patch_data(lstar=58, chroma=32.5, outmost_num=9, color_space=BT709_COLOURSPACE, transfer_function=tf.GAMMA24): '\n 以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの\n RGB値のリストを得る。\n https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png\n\n 得られた RGB値のリストは最初のデータが画像左上の緑データ、\n 最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。\n\n よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、\n 右下に向かって並べていけば良い。\n ' patch_num = (outmost_num ** 2) transfer_function = tf.GAMMA24 rgb_list = np.ones((patch_num, 3)) current_num_list = range(1, (outmost_num + 1), 2) chroma_list = np.linspace(0, chroma, len(current_num_list)) for (temp_chroma, current_num) in zip(chroma_list, current_num_list): current_patch_num = (((current_num - 1) * 4) if (current_num > 1) else 1) rgb = _calc_rgb_from_same_lstar_radial_data(lstar, temp_chroma, current_num, color_space) rgb = np.reshape(rgb, (current_patch_num, 3)) rgb = tf.oetf(rgb, transfer_function) conv_idx = calc_rad_patch_idx2(outmost_num=outmost_num, current_num=current_num) for idx in range(current_patch_num): rgb_list[conv_idx[idx]] = rgb[idx] return rgb_list
def get_accelerated_x_1x(sample_num=64): '\n 単調増加ではなく、加速度が 0→1→0 となるような x を作る\n\n Parameters\n ----------\n sample_num : int\n the number of the sample.\n\n Returns\n -------\n array_like\n accelerated value list\n\n Examples\n --------\n >>> x0 = np.linspace(0, 1, 8)\n >>> x1 = get_accelerated_x_1x(8)\n >>> print(x0)\n >>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]\n >>> print(x1)\n >>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]\n ' rad = np.linspace(((- 0.5) * np.pi), (0.5 * np.pi), sample_num) x = ((np.sin(rad) + 1) / 2) return x
-6,556,408,200,352,419,000
単調増加ではなく、加速度が 0→1→0 となるような x を作る Parameters ---------- sample_num : int the number of the sample. Returns ------- array_like accelerated value list Examples -------- >>> x0 = np.linspace(0, 1, 8) >>> x1 = get_accelerated_x_1x(8) >>> print(x0) >>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ] >>> print(x1) >>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
ty_lib/test_pattern_generator2.py
get_accelerated_x_1x
colour-science/sample_code
python
def get_accelerated_x_1x(sample_num=64): '\n 単調増加ではなく、加速度が 0→1→0 となるような x を作る\n\n Parameters\n ----------\n sample_num : int\n the number of the sample.\n\n Returns\n -------\n array_like\n accelerated value list\n\n Examples\n --------\n >>> x0 = np.linspace(0, 1, 8)\n >>> x1 = get_accelerated_x_1x(8)\n >>> print(x0)\n >>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]\n >>> print(x1)\n >>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]\n ' rad = np.linspace(((- 0.5) * np.pi), (0.5 * np.pi), sample_num) x = ((np.sin(rad) + 1) / 2) return x
def get_accelerated_x_2x(sample_num=64): '\n 単調増加ではなく、加速度が 0→1→0 となるような x を作る。\n 加速度が `get_accelerated_x_1x` の2倍!!\n\n Parameters\n ----------\n sample_num : int\n the number of the sample.\n\n Returns\n -------\n array_like\n accelerated value list\n\n Examples\n --------\n >>> x0 = np.linspace(0, 1, 8)\n >>> x2 = get_accelerated_x_2x(8)\n >>> print(x0)\n >>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]\n >>> print(x2)\n >>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]\n ' rad = np.linspace(((- 0.5) * np.pi), (0.5 * np.pi), sample_num) rad = ((np.sin(rad) * 0.5) * np.pi) x = ((np.sin(rad) + 1) / 2) return x
8,900,704,438,845,922,000
単調増加ではなく、加速度が 0→1→0 となるような x を作る。 加速度が `get_accelerated_x_1x` の2倍!! Parameters ---------- sample_num : int the number of the sample. Returns ------- array_like accelerated value list Examples -------- >>> x0 = np.linspace(0, 1, 8) >>> x2 = get_accelerated_x_2x(8) >>> print(x0) >>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ] >>> print(x2) >>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
ty_lib/test_pattern_generator2.py
get_accelerated_x_2x
colour-science/sample_code
python
def get_accelerated_x_2x(sample_num=64): '\n 単調増加ではなく、加速度が 0→1→0 となるような x を作る。\n 加速度が `get_accelerated_x_1x` の2倍!!\n\n Parameters\n ----------\n sample_num : int\n the number of the sample.\n\n Returns\n -------\n array_like\n accelerated value list\n\n Examples\n --------\n >>> x0 = np.linspace(0, 1, 8)\n >>> x2 = get_accelerated_x_2x(8)\n >>> print(x0)\n >>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]\n >>> print(x2)\n >>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]\n ' rad = np.linspace(((- 0.5) * np.pi), (0.5 * np.pi), sample_num) rad = ((np.sin(rad) * 0.5) * np.pi) x = ((np.sin(rad) + 1) / 2) return x
def get_accelerated_x_4x(sample_num=64): '\n 単調増加ではなく、加速度が 0→1→0 となるような x を作る。\n 加速度が `get_accelerated_x_1x` の4倍!!\n\n Parameters\n ----------\n sample_num : int\n the number of the sample.\n\n Returns\n -------\n array_like\n accelerated value list\n ' rad = np.linspace(((- 0.5) * np.pi), (0.5 * np.pi), sample_num) rad = ((np.sin(rad) * 0.5) * np.pi) rad = ((np.sin(rad) * 0.5) * np.pi) x = ((np.sin(rad) + 1) / 2) return x
1,836,093,452,671,727,900
単調増加ではなく、加速度が 0→1→0 となるような x を作る。 加速度が `get_accelerated_x_1x` の4倍!! Parameters ---------- sample_num : int the number of the sample. Returns ------- array_like accelerated value list
ty_lib/test_pattern_generator2.py
get_accelerated_x_4x
colour-science/sample_code
python
def get_accelerated_x_4x(sample_num=64): '\n 単調増加ではなく、加速度が 0→1→0 となるような x を作る。\n 加速度が `get_accelerated_x_1x` の4倍!!\n\n Parameters\n ----------\n sample_num : int\n the number of the sample.\n\n Returns\n -------\n array_like\n accelerated value list\n ' rad = np.linspace(((- 0.5) * np.pi), (0.5 * np.pi), sample_num) rad = ((np.sin(rad) * 0.5) * np.pi) rad = ((np.sin(rad) * 0.5) * np.pi) x = ((np.sin(rad) + 1) / 2) return x
def get_accelerated_x_8x(sample_num=64): '\n 単調増加ではなく、加速度が 0→1→0 となるような x を作る。\n 加速度が `get_accelerated_x_1x` の4倍!!\n\n Parameters\n ----------\n sample_num : int\n the number of the sample.\n\n Returns\n -------\n array_like\n accelerated value list\n ' rad = np.linspace(((- 0.5) * np.pi), (0.5 * np.pi), sample_num) rad = ((np.sin(rad) * 0.5) * np.pi) rad = ((np.sin(rad) * 0.5) * np.pi) rad = ((np.sin(rad) * 0.5) * np.pi) x = ((np.sin(rad) + 1) / 2) return x
4,805,954,959,950,164,000
単調増加ではなく、加速度が 0→1→0 となるような x を作る。 加速度が `get_accelerated_x_1x` の4倍!! Parameters ---------- sample_num : int the number of the sample. Returns ------- array_like accelerated value list
ty_lib/test_pattern_generator2.py
get_accelerated_x_8x
colour-science/sample_code
python
def get_accelerated_x_8x(sample_num=64): '\n 単調増加ではなく、加速度が 0→1→0 となるような x を作る。\n 加速度が `get_accelerated_x_1x` の4倍!!\n\n Parameters\n ----------\n sample_num : int\n the number of the sample.\n\n Returns\n -------\n array_like\n accelerated value list\n ' rad = np.linspace(((- 0.5) * np.pi), (0.5 * np.pi), sample_num) rad = ((np.sin(rad) * 0.5) * np.pi) rad = ((np.sin(rad) * 0.5) * np.pi) rad = ((np.sin(rad) * 0.5) * np.pi) x = ((np.sin(rad) + 1) / 2) return x
def generate_color_checker_rgb_value(color_space=BT709_COLOURSPACE, target_white=D65_WHITE): '\n Generate the 24 RGB values of the color checker.\n\n Parameters\n ----------\n color_space : color space\n color space object in `colour` module.\n\n target_white : array_like\n the xy values of the white point of target color space.\n\n Returns\n -------\n array_like\n 24 RGB values. This is linear. OETF is not applied.\n\n Examples\n --------\n >>> generate_color_checker_rgb_value(\n ... color_space=colour.models.BT709_COLOURSPACE,\n ... target_white=[0.3127, 0.3290])\n >>> [[ 0.17289286 0.08205728 0.05714562]\n >>> [ 0.5680292 0.29250401 0.21951748]\n >>> [ 0.10435534 0.19656108 0.32958666]\n >>> [ 0.1008804 0.14839018 0.05327639]\n >>> [ 0.22303549 0.2169701 0.43166537]\n >>> [ 0.10715338 0.513512 0.41415978]\n >>> [ 0.74639182 0.20020473 0.03081343]\n >>> [ 0.05947812 0.10659045 0.39897686]\n >>> [ 0.5673215 0.08485376 0.11945382]\n >>> [ 0.11177253 0.04285397 0.14166202]\n >>> [ 0.34250836 0.5062777 0.0557734 ]\n >>> [ 0.79262553 0.35803886 0.025485 ]\n >>> [ 0.01864598 0.05139665 0.28886469]\n >>> [ 0.054392 0.29876719 0.07187681]\n >>> [ 0.45628547 0.03075684 0.04092033]\n >>> [ 0.85379178 0.56503558 0.01475575]\n >>> [ 0.53533883 0.09006355 0.3047824 ]\n >>> [-0.03662977 0.24753781 0.39824679]\n >>> [ 0.91177068 0.91497623 0.89427332]\n >>> [ 0.57973934 0.59203191 0.59370647]\n >>> [ 0.35495537 0.36538027 0.36772001]\n >>> [ 0.19009594 0.19180133 0.19316719]\n >>> [ 0.08524707 0.08890587 0.09255774]\n >>> [ 0.03038879 0.03118623 0.03279615]]\n ' colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005') (_name, data, whitepoint) = colour_checker_param temp_xyY = [] for key in data.keys(): temp_xyY.append(data[key]) temp_xyY = np.array(temp_xyY) large_xyz = xyY_to_XYZ(temp_xyY) rgb_white_point = D65_WHITE illuminant_XYZ = whitepoint illuminant_RGB = rgb_white_point chromatic_adaptation_transform = 'CAT02' large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB, large_xyz_to_rgb_matrix, chromatic_adaptation_transform) return rgb
6,887,916,821,628,261,000
Generate the 24 RGB values of the color checker. Parameters ---------- color_space : color space color space object in `colour` module. target_white : array_like the xy values of the white point of target color space. Returns ------- array_like 24 RGB values. This is linear. OETF is not applied. Examples -------- >>> generate_color_checker_rgb_value( ... color_space=colour.models.BT709_COLOURSPACE, ... target_white=[0.3127, 0.3290]) >>> [[ 0.17289286 0.08205728 0.05714562] >>> [ 0.5680292 0.29250401 0.21951748] >>> [ 0.10435534 0.19656108 0.32958666] >>> [ 0.1008804 0.14839018 0.05327639] >>> [ 0.22303549 0.2169701 0.43166537] >>> [ 0.10715338 0.513512 0.41415978] >>> [ 0.74639182 0.20020473 0.03081343] >>> [ 0.05947812 0.10659045 0.39897686] >>> [ 0.5673215 0.08485376 0.11945382] >>> [ 0.11177253 0.04285397 0.14166202] >>> [ 0.34250836 0.5062777 0.0557734 ] >>> [ 0.79262553 0.35803886 0.025485 ] >>> [ 0.01864598 0.05139665 0.28886469] >>> [ 0.054392 0.29876719 0.07187681] >>> [ 0.45628547 0.03075684 0.04092033] >>> [ 0.85379178 0.56503558 0.01475575] >>> [ 0.53533883 0.09006355 0.3047824 ] >>> [-0.03662977 0.24753781 0.39824679] >>> [ 0.91177068 0.91497623 0.89427332] >>> [ 0.57973934 0.59203191 0.59370647] >>> [ 0.35495537 0.36538027 0.36772001] >>> [ 0.19009594 0.19180133 0.19316719] >>> [ 0.08524707 0.08890587 0.09255774] >>> [ 0.03038879 0.03118623 0.03279615]]
ty_lib/test_pattern_generator2.py
generate_color_checker_rgb_value
colour-science/sample_code
python
def generate_color_checker_rgb_value(color_space=BT709_COLOURSPACE, target_white=D65_WHITE): '\n Generate the 24 RGB values of the color checker.\n\n Parameters\n ----------\n color_space : color space\n color space object in `colour` module.\n\n target_white : array_like\n the xy values of the white point of target color space.\n\n Returns\n -------\n array_like\n 24 RGB values. This is linear. OETF is not applied.\n\n Examples\n --------\n >>> generate_color_checker_rgb_value(\n ... color_space=colour.models.BT709_COLOURSPACE,\n ... target_white=[0.3127, 0.3290])\n >>> [[ 0.17289286 0.08205728 0.05714562]\n >>> [ 0.5680292 0.29250401 0.21951748]\n >>> [ 0.10435534 0.19656108 0.32958666]\n >>> [ 0.1008804 0.14839018 0.05327639]\n >>> [ 0.22303549 0.2169701 0.43166537]\n >>> [ 0.10715338 0.513512 0.41415978]\n >>> [ 0.74639182 0.20020473 0.03081343]\n >>> [ 0.05947812 0.10659045 0.39897686]\n >>> [ 0.5673215 0.08485376 0.11945382]\n >>> [ 0.11177253 0.04285397 0.14166202]\n >>> [ 0.34250836 0.5062777 0.0557734 ]\n >>> [ 0.79262553 0.35803886 0.025485 ]\n >>> [ 0.01864598 0.05139665 0.28886469]\n >>> [ 0.054392 0.29876719 0.07187681]\n >>> [ 0.45628547 0.03075684 0.04092033]\n >>> [ 0.85379178 0.56503558 0.01475575]\n >>> [ 0.53533883 0.09006355 0.3047824 ]\n >>> [-0.03662977 0.24753781 0.39824679]\n >>> [ 0.91177068 0.91497623 0.89427332]\n >>> [ 0.57973934 0.59203191 0.59370647]\n >>> [ 0.35495537 0.36538027 0.36772001]\n >>> [ 0.19009594 0.19180133 0.19316719]\n >>> [ 0.08524707 0.08890587 0.09255774]\n >>> [ 0.03038879 0.03118623 0.03279615]]\n ' colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005') (_name, data, whitepoint) = colour_checker_param temp_xyY = [] for key in data.keys(): temp_xyY.append(data[key]) temp_xyY = np.array(temp_xyY) large_xyz = xyY_to_XYZ(temp_xyY) rgb_white_point = D65_WHITE illuminant_XYZ = whitepoint illuminant_RGB = rgb_white_point chromatic_adaptation_transform = 'CAT02' large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB, large_xyz_to_rgb_matrix, chromatic_adaptation_transform) return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01): '\n 6x4 の カラーチェッカーの画像を作る。\n Height は Width から自動計算される。padding_rate で少し値が変わる。\n ' h_patch_num = 6 v_patch_num = 4 each_padding = int(((width * padding_rate) + 0.5)) h_padding_total = (each_padding * (h_patch_num + 1)) h_patch_width_total = (width - h_padding_total) patch_height = (h_patch_width_total // h_patch_num) height = ((patch_height * v_patch_num) + (each_padding * (v_patch_num + 1))) patch_width_list = equal_devision(h_patch_width_total, h_patch_num) img = np.zeros((height, width, 3)) for v_idx in range(v_patch_num): h_pos_st = each_padding v_pos_st = (each_padding + (v_idx * (patch_height + each_padding))) for h_idx in range(h_patch_num): rgb_idx = ((v_idx * h_patch_num) + h_idx) pos = (h_pos_st, v_pos_st) patch_img = (np.ones((patch_height, patch_width_list[h_idx], 3)) * rgb[rgb_idx]) merge(img, patch_img, pos) h_pos_st += (patch_width_list[h_idx] + each_padding) return img
2,040,358,170,524,852,500
6x4 の カラーチェッカーの画像を作る。 Height は Width から自動計算される。padding_rate で少し値が変わる。
ty_lib/test_pattern_generator2.py
make_color_checker_image
colour-science/sample_code
python
def make_color_checker_image(rgb, width=1920, padding_rate=0.01): '\n 6x4 の カラーチェッカーの画像を作る。\n Height は Width から自動計算される。padding_rate で少し値が変わる。\n ' h_patch_num = 6 v_patch_num = 4 each_padding = int(((width * padding_rate) + 0.5)) h_padding_total = (each_padding * (h_patch_num + 1)) h_patch_width_total = (width - h_padding_total) patch_height = (h_patch_width_total // h_patch_num) height = ((patch_height * v_patch_num) + (each_padding * (v_patch_num + 1))) patch_width_list = equal_devision(h_patch_width_total, h_patch_num) img = np.zeros((height, width, 3)) for v_idx in range(v_patch_num): h_pos_st = each_padding v_pos_st = (each_padding + (v_idx * (patch_height + each_padding))) for h_idx in range(h_patch_num): rgb_idx = ((v_idx * h_patch_num) + h_idx) pos = (h_pos_st, v_pos_st) patch_img = (np.ones((patch_height, patch_width_list[h_idx], 3)) * rgb[rgb_idx]) merge(img, patch_img, pos) h_pos_st += (patch_width_list[h_idx] + each_padding) return img
def calc_st_pos_for_centering(bg_size, fg_size): '\n Calculate start postion for centering.\n\n Parameters\n ----------\n bg_size : touple(int)\n (width, height) of the background image.\n\n fg_size : touple(int)\n (width, height) of the foreground image.\n\n Returns\n -------\n touple (int)\n (st_pos_h, st_pos_v)\n\n Examples\n --------\n >>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))\n >>> (640, 300)\n ' bg_width = bg_size[0] bg_height = bg_size[1] fg_width = fg_size[0] fg_height = fg_size[1] st_pos_h = ((bg_width // 2) - (fg_width // 2)) st_pos_v = ((bg_height // 2) - (fg_height // 2)) return (st_pos_h, st_pos_v)
-4,828,180,079,277,697,000
Calculate start postion for centering. Parameters ---------- bg_size : touple(int) (width, height) of the background image. fg_size : touple(int) (width, height) of the foreground image. Returns ------- touple (int) (st_pos_h, st_pos_v) Examples -------- >>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480)) >>> (640, 300)
ty_lib/test_pattern_generator2.py
calc_st_pos_for_centering
colour-science/sample_code
python
def calc_st_pos_for_centering(bg_size, fg_size): '\n Calculate start postion for centering.\n\n Parameters\n ----------\n bg_size : touple(int)\n (width, height) of the background image.\n\n fg_size : touple(int)\n (width, height) of the foreground image.\n\n Returns\n -------\n touple (int)\n (st_pos_h, st_pos_v)\n\n Examples\n --------\n >>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))\n >>> (640, 300)\n ' bg_width = bg_size[0] bg_height = bg_size[1] fg_width = fg_size[0] fg_height = fg_size[1] st_pos_h = ((bg_width // 2) - (fg_width // 2)) st_pos_v = ((bg_height // 2) - (fg_height // 2)) return (st_pos_h, st_pos_v)
def get_size_from_image(img): '\n `calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。\n ' return (img.shape[1], img.shape[0])
2,285,655,585,279,918,300
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
ty_lib/test_pattern_generator2.py
get_size_from_image
colour-science/sample_code
python
def get_size_from_image(img): '\n \n ' return (img.shape[1], img.shape[0])
def _Args(parser, deprecate_maintenance_policy=False, container_mount_enabled=False): 'Add flags shared by all release tracks.' parser.display_info.AddFormat(instances_flags.DEFAULT_LIST_FORMAT) metadata_utils.AddMetadataArgs(parser) instances_flags.AddDiskArgs(parser, True, container_mount_enabled=container_mount_enabled) instances_flags.AddCreateDiskArgs(parser, container_mount_enabled=container_mount_enabled) instances_flags.AddCanIpForwardArgs(parser) instances_flags.AddAddressArgs(parser, instances=True) instances_flags.AddMachineTypeArgs(parser) instances_flags.AddMaintenancePolicyArgs(parser, deprecate=deprecate_maintenance_policy) instances_flags.AddNoRestartOnFailureArgs(parser) instances_flags.AddPreemptibleVmArgs(parser) instances_flags.AddServiceAccountAndScopeArgs(parser, False) instances_flags.AddTagsArgs(parser) instances_flags.AddCustomMachineTypeArgs(parser) instances_flags.AddNetworkArgs(parser) instances_flags.AddPrivateNetworkIpArgs(parser) instances_flags.AddKonletArgs(parser) instances_flags.AddPublicDnsArgs(parser, instance=True) instances_flags.AddPublicPtrArgs(parser, instance=True) instances_flags.AddImageArgs(parser) labels_util.AddCreateLabelsFlags(parser) parser.add_argument('--description', help='Specifies a textual description of the instances.') instances_flags.INSTANCES_ARG.AddArgument(parser, operation_type='create') CreateWithContainer.SOURCE_INSTANCE_TEMPLATE = instances_flags.MakeSourceInstanceTemplateArg() CreateWithContainer.SOURCE_INSTANCE_TEMPLATE.AddArgument(parser) parser.display_info.AddCacheUpdater(completers.InstancesCompleter)
-7,180,734,195,565,543,000
Add flags shared by all release tracks.
lib/surface/compute/instances/create_with_container.py
_Args
bshaffer/google-cloud-sdk
python
def _Args(parser, deprecate_maintenance_policy=False, container_mount_enabled=False): parser.display_info.AddFormat(instances_flags.DEFAULT_LIST_FORMAT) metadata_utils.AddMetadataArgs(parser) instances_flags.AddDiskArgs(parser, True, container_mount_enabled=container_mount_enabled) instances_flags.AddCreateDiskArgs(parser, container_mount_enabled=container_mount_enabled) instances_flags.AddCanIpForwardArgs(parser) instances_flags.AddAddressArgs(parser, instances=True) instances_flags.AddMachineTypeArgs(parser) instances_flags.AddMaintenancePolicyArgs(parser, deprecate=deprecate_maintenance_policy) instances_flags.AddNoRestartOnFailureArgs(parser) instances_flags.AddPreemptibleVmArgs(parser) instances_flags.AddServiceAccountAndScopeArgs(parser, False) instances_flags.AddTagsArgs(parser) instances_flags.AddCustomMachineTypeArgs(parser) instances_flags.AddNetworkArgs(parser) instances_flags.AddPrivateNetworkIpArgs(parser) instances_flags.AddKonletArgs(parser) instances_flags.AddPublicDnsArgs(parser, instance=True) instances_flags.AddPublicPtrArgs(parser, instance=True) instances_flags.AddImageArgs(parser) labels_util.AddCreateLabelsFlags(parser) parser.add_argument('--description', help='Specifies a textual description of the instances.') instances_flags.INSTANCES_ARG.AddArgument(parser, operation_type='create') CreateWithContainer.SOURCE_INSTANCE_TEMPLATE = instances_flags.MakeSourceInstanceTemplateArg() CreateWithContainer.SOURCE_INSTANCE_TEMPLATE.AddArgument(parser) parser.display_info.AddCacheUpdater(completers.InstancesCompleter)
@staticmethod def Args(parser): 'Register parser args.' _Args(parser) instances_flags.AddNetworkTierArgs(parser, instance=True) instances_flags.AddMinCpuPlatformArgs(parser, base.ReleaseTrack.GA)
469,272,078,714,869,000
Register parser args.
lib/surface/compute/instances/create_with_container.py
Args
bshaffer/google-cloud-sdk
python
@staticmethod def Args(parser): _Args(parser) instances_flags.AddNetworkTierArgs(parser, instance=True) instances_flags.AddMinCpuPlatformArgs(parser, base.ReleaseTrack.GA)
@staticmethod def Args(parser): 'Register parser args.' _Args(parser, container_mount_enabled=True) instances_flags.AddNetworkTierArgs(parser, instance=True) instances_flags.AddContainerMountDiskFlag(parser) instances_flags.AddLocalSsdArgsWithSize(parser) instances_flags.AddMinCpuPlatformArgs(parser, base.ReleaseTrack.BETA)
-3,262,381,641,413,971,000
Register parser args.
lib/surface/compute/instances/create_with_container.py
Args
bshaffer/google-cloud-sdk
python
@staticmethod def Args(parser): _Args(parser, container_mount_enabled=True) instances_flags.AddNetworkTierArgs(parser, instance=True) instances_flags.AddContainerMountDiskFlag(parser) instances_flags.AddLocalSsdArgsWithSize(parser) instances_flags.AddMinCpuPlatformArgs(parser, base.ReleaseTrack.BETA)
def deprecated_arg_names(arg_mapping: Mapping[(str, str)]): '\n Decorator which marks a functions keyword arguments as deprecated. It will\n result in a warning being emitted when the deprecated keyword argument is\n used, and the function being called with the new argument.\n\n Parameters\n ----------\n arg_mapping\n Mapping from deprecated argument name to current argument name.\n ' def decorator(func): @wraps(func) def func_wrapper(*args, **kwargs): warnings.simplefilter('always', DeprecationWarning) for (old, new) in arg_mapping.items(): if (old in kwargs): warnings.warn(f"Keyword argument '{old}' has been deprecated in favour of '{new}'. '{old}' will be removed in a future version.", category=DeprecationWarning, stacklevel=2) val = kwargs.pop(old) kwargs[new] = val warnings.simplefilter('default', DeprecationWarning) return func(*args, **kwargs) return func_wrapper return decorator
7,597,204,064,448,840,000
Decorator which marks a functions keyword arguments as deprecated. It will result in a warning being emitted when the deprecated keyword argument is used, and the function being called with the new argument. Parameters ---------- arg_mapping Mapping from deprecated argument name to current argument name.
scanpy/_utils.py
deprecated_arg_names
VolkerBergen/scanpy
python
def deprecated_arg_names(arg_mapping: Mapping[(str, str)]): '\n Decorator which marks a functions keyword arguments as deprecated. It will\n result in a warning being emitted when the deprecated keyword argument is\n used, and the function being called with the new argument.\n\n Parameters\n ----------\n arg_mapping\n Mapping from deprecated argument name to current argument name.\n ' def decorator(func): @wraps(func) def func_wrapper(*args, **kwargs): warnings.simplefilter('always', DeprecationWarning) for (old, new) in arg_mapping.items(): if (old in kwargs): warnings.warn(f"Keyword argument '{old}' has been deprecated in favour of '{new}'. '{old}' will be removed in a future version.", category=DeprecationWarning, stacklevel=2) val = kwargs.pop(old) kwargs[new] = val warnings.simplefilter('default', DeprecationWarning) return func(*args, **kwargs) return func_wrapper return decorator
def _doc_params(**kwds): ' Docstrings should start with "" in the first line for proper formatting.\n ' def dec(obj): obj.__orig_doc__ = obj.__doc__ obj.__doc__ = dedent(obj.__doc__).format_map(kwds) return obj return dec
4,215,397,845,106,700,000
Docstrings should start with "" in the first line for proper formatting.
scanpy/_utils.py
_doc_params
VolkerBergen/scanpy
python
def _doc_params(**kwds): ' \n ' def dec(obj): obj.__orig_doc__ = obj.__doc__ obj.__doc__ = dedent(obj.__doc__).format_map(kwds) return obj return dec
def _check_array_function_arguments(**kwargs): 'Checks for invalid arguments when an array is passed.\n\n Helper for functions that work on either AnnData objects or array-likes.\n ' invalid_args = [k for (k, v) in kwargs.items() if (v is not None)] if (len(invalid_args) > 0): raise TypeError(f'Arguments {invalid_args} are only valid if an AnnData object is passed.')
5,586,756,840,675,026,000
Checks for invalid arguments when an array is passed. Helper for functions that work on either AnnData objects or array-likes.
scanpy/_utils.py
_check_array_function_arguments
VolkerBergen/scanpy
python
def _check_array_function_arguments(**kwargs): 'Checks for invalid arguments when an array is passed.\n\n Helper for functions that work on either AnnData objects or array-likes.\n ' invalid_args = [k for (k, v) in kwargs.items() if (v is not None)] if (len(invalid_args) > 0): raise TypeError(f'Arguments {invalid_args} are only valid if an AnnData object is passed.')
def _check_use_raw(adata: AnnData, use_raw: Union[(None, bool)]) -> bool: '\n Normalize checking `use_raw`.\n\n My intentention here is to also provide a single place to throw a deprecation warning from in future.\n ' if (use_raw is not None): return use_raw elif (adata.raw is not None): return True else: return False
-2,252,908,537,909,248,500
Normalize checking `use_raw`. My intentention here is to also provide a single place to throw a deprecation warning from in future.
scanpy/_utils.py
_check_use_raw
VolkerBergen/scanpy
python
def _check_use_raw(adata: AnnData, use_raw: Union[(None, bool)]) -> bool: '\n Normalize checking `use_raw`.\n\n My intentention here is to also provide a single place to throw a deprecation warning from in future.\n ' if (use_raw is not None): return use_raw elif (adata.raw is not None): return True else: return False
def get_igraph_from_adjacency(adjacency, directed=None): 'Get igraph graph from adjacency matrix.' import igraph as ig (sources, targets) = adjacency.nonzero() weights = adjacency[(sources, targets)] if isinstance(weights, np.matrix): weights = weights.A1 g = ig.Graph(directed=directed) g.add_vertices(adjacency.shape[0]) g.add_edges(list(zip(sources, targets))) try: g.es['weight'] = weights except: pass if (g.vcount() != adjacency.shape[0]): logg.warning(f'The constructed graph has only {g.vcount()} nodes. Your adjacency matrix contained redundant nodes.') return g
-752,489,011,673,892,500
Get igraph graph from adjacency matrix.
scanpy/_utils.py
get_igraph_from_adjacency
VolkerBergen/scanpy
python
def get_igraph_from_adjacency(adjacency, directed=None): import igraph as ig (sources, targets) = adjacency.nonzero() weights = adjacency[(sources, targets)] if isinstance(weights, np.matrix): weights = weights.A1 g = ig.Graph(directed=directed) g.add_vertices(adjacency.shape[0]) g.add_edges(list(zip(sources, targets))) try: g.es['weight'] = weights except: pass if (g.vcount() != adjacency.shape[0]): logg.warning(f'The constructed graph has only {g.vcount()} nodes. Your adjacency matrix contained redundant nodes.') return g
def compute_association_matrix_of_groups(adata: AnnData, prediction: str, reference: str, normalization: Literal[('prediction', 'reference')]='prediction', threshold: float=0.01, max_n_names: Optional[int]=2): 'Compute overlaps between groups.\n\n See ``identify_groups`` for identifying the groups.\n\n Parameters\n ----------\n adata\n prediction\n Field name of adata.obs.\n reference\n Field name of adata.obs.\n normalization\n Whether to normalize with respect to the predicted groups or the\n reference groups.\n threshold\n Do not consider associations whose overlap is below this fraction.\n max_n_names\n Control how many reference names you want to be associated with per\n predicted name. Set to `None`, if you want all.\n\n Returns\n -------\n asso_names\n List of associated reference names\n (`max_n_names` for each predicted name).\n asso_matrix\n Matrix where rows correspond to the predicted labels and columns to the\n reference labels, entries are proportional to degree of association.\n ' if (normalization not in {'prediction', 'reference'}): raise ValueError('`normalization` needs to be either "prediction" or "reference".') sanitize_anndata(adata) cats = adata.obs[reference].cat.categories for cat in cats: if (cat in settings.categories_to_ignore): logg.info(f'Ignoring category {cat!r} as it’s in `settings.categories_to_ignore`.') asso_names = [] asso_matrix = [] for (ipred_group, pred_group) in enumerate(adata.obs[prediction].cat.categories): if ('?' in pred_group): pred_group = str(ipred_group) mask_pred = (adata.obs[prediction].values == pred_group) mask_pred_int = mask_pred.astype(np.int8) asso_matrix += [[]] for ref_group in adata.obs[reference].cat.categories: mask_ref = (adata.obs[reference].values == ref_group).astype(np.int8) mask_ref_or_pred = mask_ref.copy() mask_ref_or_pred[mask_pred] = 1 if (normalization == 'prediction'): ratio_contained = ((np.sum(mask_pred_int) - np.sum((mask_ref_or_pred - mask_ref))) / np.sum(mask_pred_int)) else: ratio_contained = ((np.sum(mask_ref) - np.sum((mask_ref_or_pred - mask_pred_int))) / np.sum(mask_ref)) asso_matrix[(- 1)] += [ratio_contained] name_list_pred = [(cats[i] if (cats[i] not in settings.categories_to_ignore) else '') for i in np.argsort(asso_matrix[(- 1)])[::(- 1)] if (asso_matrix[(- 1)][i] > threshold)] asso_names += ['\n'.join(name_list_pred[:max_n_names])] Result = namedtuple('compute_association_matrix_of_groups', ['asso_names', 'asso_matrix']) return Result(asso_names=asso_names, asso_matrix=np.array(asso_matrix))
-2,052,598,117,330,322,400
Compute overlaps between groups. See ``identify_groups`` for identifying the groups. Parameters ---------- adata prediction Field name of adata.obs. reference Field name of adata.obs. normalization Whether to normalize with respect to the predicted groups or the reference groups. threshold Do not consider associations whose overlap is below this fraction. max_n_names Control how many reference names you want to be associated with per predicted name. Set to `None`, if you want all. Returns ------- asso_names List of associated reference names (`max_n_names` for each predicted name). asso_matrix Matrix where rows correspond to the predicted labels and columns to the reference labels, entries are proportional to degree of association.
scanpy/_utils.py
compute_association_matrix_of_groups
VolkerBergen/scanpy
python
def compute_association_matrix_of_groups(adata: AnnData, prediction: str, reference: str, normalization: Literal[('prediction', 'reference')]='prediction', threshold: float=0.01, max_n_names: Optional[int]=2): 'Compute overlaps between groups.\n\n See ``identify_groups`` for identifying the groups.\n\n Parameters\n ----------\n adata\n prediction\n Field name of adata.obs.\n reference\n Field name of adata.obs.\n normalization\n Whether to normalize with respect to the predicted groups or the\n reference groups.\n threshold\n Do not consider associations whose overlap is below this fraction.\n max_n_names\n Control how many reference names you want to be associated with per\n predicted name. Set to `None`, if you want all.\n\n Returns\n -------\n asso_names\n List of associated reference names\n (`max_n_names` for each predicted name).\n asso_matrix\n Matrix where rows correspond to the predicted labels and columns to the\n reference labels, entries are proportional to degree of association.\n ' if (normalization not in {'prediction', 'reference'}): raise ValueError('`normalization` needs to be either "prediction" or "reference".') sanitize_anndata(adata) cats = adata.obs[reference].cat.categories for cat in cats: if (cat in settings.categories_to_ignore): logg.info(f'Ignoring category {cat!r} as it’s in `settings.categories_to_ignore`.') asso_names = [] asso_matrix = [] for (ipred_group, pred_group) in enumerate(adata.obs[prediction].cat.categories): if ('?' in pred_group): pred_group = str(ipred_group) mask_pred = (adata.obs[prediction].values == pred_group) mask_pred_int = mask_pred.astype(np.int8) asso_matrix += [[]] for ref_group in adata.obs[reference].cat.categories: mask_ref = (adata.obs[reference].values == ref_group).astype(np.int8) mask_ref_or_pred = mask_ref.copy() mask_ref_or_pred[mask_pred] = 1 if (normalization == 'prediction'): ratio_contained = ((np.sum(mask_pred_int) - np.sum((mask_ref_or_pred - mask_ref))) / np.sum(mask_pred_int)) else: ratio_contained = ((np.sum(mask_ref) - np.sum((mask_ref_or_pred - mask_pred_int))) / np.sum(mask_ref)) asso_matrix[(- 1)] += [ratio_contained] name_list_pred = [(cats[i] if (cats[i] not in settings.categories_to_ignore) else ) for i in np.argsort(asso_matrix[(- 1)])[::(- 1)] if (asso_matrix[(- 1)][i] > threshold)] asso_names += ['\n'.join(name_list_pred[:max_n_names])] Result = namedtuple('compute_association_matrix_of_groups', ['asso_names', 'asso_matrix']) return Result(asso_names=asso_names, asso_matrix=np.array(asso_matrix))
def identify_groups(ref_labels, pred_labels, return_overlaps=False): 'Which predicted label explains which reference label?\n\n A predicted label explains the reference label which maximizes the minimum\n of ``relative_overlaps_pred`` and ``relative_overlaps_ref``.\n\n Compare this with ``compute_association_matrix_of_groups``.\n\n Returns\n -------\n A dictionary of length ``len(np.unique(ref_labels))`` that stores for each\n reference label the predicted label that best explains it.\n\n If ``return_overlaps`` is ``True``, this will in addition return the overlap\n of the reference group with the predicted group; normalized with respect to\n the reference group size and the predicted group size, respectively.\n ' (ref_unique, ref_counts) = np.unique(ref_labels, return_counts=True) ref_dict = dict(zip(ref_unique, ref_counts)) (pred_unique, pred_counts) = np.unique(pred_labels, return_counts=True) pred_dict = dict(zip(pred_unique, pred_counts)) associated_predictions = {} associated_overlaps = {} for ref_label in ref_unique: (sub_pred_unique, sub_pred_counts) = np.unique(pred_labels[(ref_label == ref_labels)], return_counts=True) relative_overlaps_pred = [(sub_pred_counts[i] / pred_dict[n]) for (i, n) in enumerate(sub_pred_unique)] relative_overlaps_ref = [(sub_pred_counts[i] / ref_dict[ref_label]) for (i, n) in enumerate(sub_pred_unique)] relative_overlaps = np.c_[(relative_overlaps_pred, relative_overlaps_ref)] relative_overlaps_min = np.min(relative_overlaps, axis=1) pred_best_index = np.argsort(relative_overlaps_min)[::(- 1)] associated_predictions[ref_label] = sub_pred_unique[pred_best_index] associated_overlaps[ref_label] = relative_overlaps[pred_best_index] if return_overlaps: return (associated_predictions, associated_overlaps) else: return associated_predictions
8,182,894,112,265,649,000
Which predicted label explains which reference label? A predicted label explains the reference label which maximizes the minimum of ``relative_overlaps_pred`` and ``relative_overlaps_ref``. Compare this with ``compute_association_matrix_of_groups``. Returns ------- A dictionary of length ``len(np.unique(ref_labels))`` that stores for each reference label the predicted label that best explains it. If ``return_overlaps`` is ``True``, this will in addition return the overlap of the reference group with the predicted group; normalized with respect to the reference group size and the predicted group size, respectively.
scanpy/_utils.py
identify_groups
VolkerBergen/scanpy
python
def identify_groups(ref_labels, pred_labels, return_overlaps=False): 'Which predicted label explains which reference label?\n\n A predicted label explains the reference label which maximizes the minimum\n of ``relative_overlaps_pred`` and ``relative_overlaps_ref``.\n\n Compare this with ``compute_association_matrix_of_groups``.\n\n Returns\n -------\n A dictionary of length ``len(np.unique(ref_labels))`` that stores for each\n reference label the predicted label that best explains it.\n\n If ``return_overlaps`` is ``True``, this will in addition return the overlap\n of the reference group with the predicted group; normalized with respect to\n the reference group size and the predicted group size, respectively.\n ' (ref_unique, ref_counts) = np.unique(ref_labels, return_counts=True) ref_dict = dict(zip(ref_unique, ref_counts)) (pred_unique, pred_counts) = np.unique(pred_labels, return_counts=True) pred_dict = dict(zip(pred_unique, pred_counts)) associated_predictions = {} associated_overlaps = {} for ref_label in ref_unique: (sub_pred_unique, sub_pred_counts) = np.unique(pred_labels[(ref_label == ref_labels)], return_counts=True) relative_overlaps_pred = [(sub_pred_counts[i] / pred_dict[n]) for (i, n) in enumerate(sub_pred_unique)] relative_overlaps_ref = [(sub_pred_counts[i] / ref_dict[ref_label]) for (i, n) in enumerate(sub_pred_unique)] relative_overlaps = np.c_[(relative_overlaps_pred, relative_overlaps_ref)] relative_overlaps_min = np.min(relative_overlaps, axis=1) pred_best_index = np.argsort(relative_overlaps_min)[::(- 1)] associated_predictions[ref_label] = sub_pred_unique[pred_best_index] associated_overlaps[ref_label] = relative_overlaps[pred_best_index] if return_overlaps: return (associated_predictions, associated_overlaps) else: return associated_predictions
def sanitize_anndata(adata): 'Transform string annotations to categoricals.' adata._sanitize()
4,622,148,062,683,588,000
Transform string annotations to categoricals.
scanpy/_utils.py
sanitize_anndata
VolkerBergen/scanpy
python
def sanitize_anndata(adata): adata._sanitize()
def moving_average(a: np.ndarray, n: int): 'Moving average over one-dimensional array.\n\n Parameters\n ----------\n a\n One-dimensional array.\n n\n Number of entries to average over. n=2 means averaging over the currrent\n the previous entry.\n\n Returns\n -------\n An array view storing the moving average.\n ' ret = np.cumsum(a, dtype=float) ret[n:] = (ret[n:] - ret[:(- n)]) return (ret[(n - 1):] / n)
-7,559,639,221,853,936,000
Moving average over one-dimensional array. Parameters ---------- a One-dimensional array. n Number of entries to average over. n=2 means averaging over the currrent the previous entry. Returns ------- An array view storing the moving average.
scanpy/_utils.py
moving_average
VolkerBergen/scanpy
python
def moving_average(a: np.ndarray, n: int): 'Moving average over one-dimensional array.\n\n Parameters\n ----------\n a\n One-dimensional array.\n n\n Number of entries to average over. n=2 means averaging over the currrent\n the previous entry.\n\n Returns\n -------\n An array view storing the moving average.\n ' ret = np.cumsum(a, dtype=float) ret[n:] = (ret[n:] - ret[:(- n)]) return (ret[(n - 1):] / n)
def update_params(old_params: Mapping[(str, Any)], new_params: Mapping[(str, Any)], check=False) -> Dict[(str, Any)]: ' Update old_params with new_params.\n\n If check==False, this merely adds and overwrites the content of old_params.\n\n If check==True, this only allows updating of parameters that are already\n present in old_params.\n\n Parameters\n ----------\n old_params\n new_params\n check\n\n Returns\n -------\n updated_params\n ' updated_params = dict(old_params) if new_params: for (key, val) in new_params.items(): if ((key not in old_params) and check): raise ValueError((((("'" + key) + "' is not a valid parameter key, ") + 'consider one of \n') + str(list(old_params.keys())))) if (val is not None): updated_params[key] = val return updated_params
-392,503,575,934,761,200
Update old_params with new_params. If check==False, this merely adds and overwrites the content of old_params. If check==True, this only allows updating of parameters that are already present in old_params. Parameters ---------- old_params new_params check Returns ------- updated_params
scanpy/_utils.py
update_params
VolkerBergen/scanpy
python
def update_params(old_params: Mapping[(str, Any)], new_params: Mapping[(str, Any)], check=False) -> Dict[(str, Any)]: ' Update old_params with new_params.\n\n If check==False, this merely adds and overwrites the content of old_params.\n\n If check==True, this only allows updating of parameters that are already\n present in old_params.\n\n Parameters\n ----------\n old_params\n new_params\n check\n\n Returns\n -------\n updated_params\n ' updated_params = dict(old_params) if new_params: for (key, val) in new_params.items(): if ((key not in old_params) and check): raise ValueError((((("'" + key) + "' is not a valid parameter key, ") + 'consider one of \n') + str(list(old_params.keys())))) if (val is not None): updated_params[key] = val return updated_params
def check_nonnegative_integers(X: Union[(np.ndarray, sparse.spmatrix)]): 'Checks values of X to ensure it is count data' from numbers import Integral data = (X if isinstance(X, np.ndarray) else X.data) if np.signbit(data).any(): return False elif issubclass(data.dtype.type, Integral): return True elif np.any((~ np.equal(np.mod(data, 1), 0))): return False else: return True
2,527,016,762,427,564,000
Checks values of X to ensure it is count data
scanpy/_utils.py
check_nonnegative_integers
VolkerBergen/scanpy
python
def check_nonnegative_integers(X: Union[(np.ndarray, sparse.spmatrix)]): from numbers import Integral data = (X if isinstance(X, np.ndarray) else X.data) if np.signbit(data).any(): return False elif issubclass(data.dtype.type, Integral): return True elif np.any((~ np.equal(np.mod(data, 1), 0))): return False else: return True
def select_groups(adata, groups_order_subset='all', key='groups'): 'Get subset of groups in adata.obs[key].' groups_order = adata.obs[key].cat.categories if ((key + '_masks') in adata.uns): groups_masks = adata.uns[(key + '_masks')] else: groups_masks = np.zeros((len(adata.obs[key].cat.categories), adata.obs[key].values.size), dtype=bool) for (iname, name) in enumerate(adata.obs[key].cat.categories): if (adata.obs[key].cat.categories[iname] in adata.obs[key].values): mask = (adata.obs[key].cat.categories[iname] == adata.obs[key].values) else: mask = (str(iname) == adata.obs[key].values) groups_masks[iname] = mask groups_ids = list(range(len(groups_order))) if (groups_order_subset != 'all'): groups_ids = [] for name in groups_order_subset: groups_ids.append(np.where((adata.obs[key].cat.categories.values == name))[0][0]) if (len(groups_ids) == 0): groups_ids = np.where(np.in1d(np.arange(len(adata.obs[key].cat.categories)).astype(str), np.array(groups_order_subset)))[0] if (len(groups_ids) == 0): logg.debug(f'{np.array(groups_order_subset)} invalid! specify valid groups_order (or indices) from {adata.obs[key].cat.categories}') from sys import exit exit(0) groups_masks = groups_masks[groups_ids] groups_order_subset = adata.obs[key].cat.categories[groups_ids].values else: groups_order_subset = groups_order.values return (groups_order_subset, groups_masks)
-3,030,545,903,539,323,000
Get subset of groups in adata.obs[key].
scanpy/_utils.py
select_groups
VolkerBergen/scanpy
python
def select_groups(adata, groups_order_subset='all', key='groups'): groups_order = adata.obs[key].cat.categories if ((key + '_masks') in adata.uns): groups_masks = adata.uns[(key + '_masks')] else: groups_masks = np.zeros((len(adata.obs[key].cat.categories), adata.obs[key].values.size), dtype=bool) for (iname, name) in enumerate(adata.obs[key].cat.categories): if (adata.obs[key].cat.categories[iname] in adata.obs[key].values): mask = (adata.obs[key].cat.categories[iname] == adata.obs[key].values) else: mask = (str(iname) == adata.obs[key].values) groups_masks[iname] = mask groups_ids = list(range(len(groups_order))) if (groups_order_subset != 'all'): groups_ids = [] for name in groups_order_subset: groups_ids.append(np.where((adata.obs[key].cat.categories.values == name))[0][0]) if (len(groups_ids) == 0): groups_ids = np.where(np.in1d(np.arange(len(adata.obs[key].cat.categories)).astype(str), np.array(groups_order_subset)))[0] if (len(groups_ids) == 0): logg.debug(f'{np.array(groups_order_subset)} invalid! specify valid groups_order (or indices) from {adata.obs[key].cat.categories}') from sys import exit exit(0) groups_masks = groups_masks[groups_ids] groups_order_subset = adata.obs[key].cat.categories[groups_ids].values else: groups_order_subset = groups_order.values return (groups_order_subset, groups_masks)
def warn_with_traceback(message, category, filename, lineno, file=None, line=None): 'Get full tracebacks when warning is raised by setting\n\n warnings.showwarning = warn_with_traceback\n\n See also\n --------\n http://stackoverflow.com/questions/22373927/get-traceback-of-warnings\n ' import traceback traceback.print_stack() log = (file if hasattr(file, 'write') else sys.stderr) settings.write(warnings.formatwarning(message, category, filename, lineno, line))
3,395,872,396,392,298,500
Get full tracebacks when warning is raised by setting warnings.showwarning = warn_with_traceback See also -------- http://stackoverflow.com/questions/22373927/get-traceback-of-warnings
scanpy/_utils.py
warn_with_traceback
VolkerBergen/scanpy
python
def warn_with_traceback(message, category, filename, lineno, file=None, line=None): 'Get full tracebacks when warning is raised by setting\n\n warnings.showwarning = warn_with_traceback\n\n See also\n --------\n http://stackoverflow.com/questions/22373927/get-traceback-of-warnings\n ' import traceback traceback.print_stack() log = (file if hasattr(file, 'write') else sys.stderr) settings.write(warnings.formatwarning(message, category, filename, lineno, line))
def subsample(X: np.ndarray, subsample: int=1, seed: int=0) -> Tuple[(np.ndarray, np.ndarray)]: ' Subsample a fraction of 1/subsample samples from the rows of X.\n\n Parameters\n ----------\n X\n Data array.\n subsample\n 1/subsample is the fraction of data sampled, n = X.shape[0]/subsample.\n seed\n Seed for sampling.\n\n Returns\n -------\n Xsampled\n Subsampled X.\n rows\n Indices of rows that are stored in Xsampled.\n ' if ((subsample == 1) and (seed == 0)): return (X, np.arange(X.shape[0], dtype=int)) if (seed == 0): rows = np.arange(0, X.shape[0], subsample, dtype=int) n = rows.size Xsampled = np.array(X[rows]) else: if (seed < 0): raise ValueError(f'Invalid seed value < 0: {seed}') n = int((X.shape[0] / subsample)) np.random.seed(seed) (Xsampled, rows) = subsample_n(X, n=n) logg.debug(f'... subsampled to {n} of {X.shape[0]} data points') return (Xsampled, rows)
7,856,776,734,611,695,000
Subsample a fraction of 1/subsample samples from the rows of X. Parameters ---------- X Data array. subsample 1/subsample is the fraction of data sampled, n = X.shape[0]/subsample. seed Seed for sampling. Returns ------- Xsampled Subsampled X. rows Indices of rows that are stored in Xsampled.
scanpy/_utils.py
subsample
VolkerBergen/scanpy
python
def subsample(X: np.ndarray, subsample: int=1, seed: int=0) -> Tuple[(np.ndarray, np.ndarray)]: ' Subsample a fraction of 1/subsample samples from the rows of X.\n\n Parameters\n ----------\n X\n Data array.\n subsample\n 1/subsample is the fraction of data sampled, n = X.shape[0]/subsample.\n seed\n Seed for sampling.\n\n Returns\n -------\n Xsampled\n Subsampled X.\n rows\n Indices of rows that are stored in Xsampled.\n ' if ((subsample == 1) and (seed == 0)): return (X, np.arange(X.shape[0], dtype=int)) if (seed == 0): rows = np.arange(0, X.shape[0], subsample, dtype=int) n = rows.size Xsampled = np.array(X[rows]) else: if (seed < 0): raise ValueError(f'Invalid seed value < 0: {seed}') n = int((X.shape[0] / subsample)) np.random.seed(seed) (Xsampled, rows) = subsample_n(X, n=n) logg.debug(f'... subsampled to {n} of {X.shape[0]} data points') return (Xsampled, rows)
def subsample_n(X: np.ndarray, n: int=0, seed: int=0) -> Tuple[(np.ndarray, np.ndarray)]: 'Subsample n samples from rows of array.\n\n Parameters\n ----------\n X\n Data array.\n n\n Sample size.\n seed\n Seed for sampling.\n\n Returns\n -------\n Xsampled\n Subsampled X.\n rows\n Indices of rows that are stored in Xsampled.\n ' if (n < 0): raise ValueError('n must be greater 0') np.random.seed(seed) n = (X.shape[0] if ((n == 0) or (n > X.shape[0])) else n) rows = np.random.choice(X.shape[0], size=n, replace=False) Xsampled = X[rows] return (Xsampled, rows)
-170,607,391,929,341,630
Subsample n samples from rows of array. Parameters ---------- X Data array. n Sample size. seed Seed for sampling. Returns ------- Xsampled Subsampled X. rows Indices of rows that are stored in Xsampled.
scanpy/_utils.py
subsample_n
VolkerBergen/scanpy
python
def subsample_n(X: np.ndarray, n: int=0, seed: int=0) -> Tuple[(np.ndarray, np.ndarray)]: 'Subsample n samples from rows of array.\n\n Parameters\n ----------\n X\n Data array.\n n\n Sample size.\n seed\n Seed for sampling.\n\n Returns\n -------\n Xsampled\n Subsampled X.\n rows\n Indices of rows that are stored in Xsampled.\n ' if (n < 0): raise ValueError('n must be greater 0') np.random.seed(seed) n = (X.shape[0] if ((n == 0) or (n > X.shape[0])) else n) rows = np.random.choice(X.shape[0], size=n, replace=False) Xsampled = X[rows] return (Xsampled, rows)
def check_presence_download(filename: Path, backup_url): 'Check if file is present otherwise download.' if (not filename.is_file()): from .readwrite import _download _download(backup_url, filename)
5,616,465,864,957,180,000
Check if file is present otherwise download.
scanpy/_utils.py
check_presence_download
VolkerBergen/scanpy
python
def check_presence_download(filename: Path, backup_url): if (not filename.is_file()): from .readwrite import _download _download(backup_url, filename)
def lazy_import(full_name): 'Imports a module in a way that it’s only executed on member access' try: return sys.modules[full_name] except KeyError: spec = importlib.util.find_spec(full_name) module = importlib.util.module_from_spec(spec) loader = importlib.util.LazyLoader(spec.loader) loader.exec_module(module) return module
5,384,361,978,549,375,000
Imports a module in a way that it’s only executed on member access
scanpy/_utils.py
lazy_import
VolkerBergen/scanpy
python
def lazy_import(full_name): try: return sys.modules[full_name] except KeyError: spec = importlib.util.find_spec(full_name) module = importlib.util.module_from_spec(spec) loader = importlib.util.LazyLoader(spec.loader) loader.exec_module(module) return module
def _choose_graph(adata, obsp, neighbors_key): 'Choose connectivities from neighbbors or another obsp column' if ((obsp is not None) and (neighbors_key is not None)): raise ValueError("You can't specify both obsp, neighbors_key. Please select only one.") if (obsp is not None): return adata.obsp[obsp] else: neighbors = NeighborsView(adata, neighbors_key) if ('connectivities' not in neighbors): raise ValueError('You need to run `pp.neighbors` first to compute a neighborhood graph.') return neighbors['connectivities']
-3,498,210,811,662,967,300
Choose connectivities from neighbbors or another obsp column
scanpy/_utils.py
_choose_graph
VolkerBergen/scanpy
python
def _choose_graph(adata, obsp, neighbors_key): if ((obsp is not None) and (neighbors_key is not None)): raise ValueError("You can't specify both obsp, neighbors_key. Please select only one.") if (obsp is not None): return adata.obsp[obsp] else: neighbors = NeighborsView(adata, neighbors_key) if ('connectivities' not in neighbors): raise ValueError('You need to run `pp.neighbors` first to compute a neighborhood graph.') return neighbors['connectivities']
def user_display_name(user): '\n Returns the preferred display name for the given user object: the result of\n user.get_full_name() if implemented and non-empty, or user.get_username() otherwise.\n ' try: full_name = user.get_full_name().strip() if full_name: return full_name except AttributeError: pass try: return user.get_username() except AttributeError: return ''
1,981,022,365,203,509,000
Returns the preferred display name for the given user object: the result of user.get_full_name() if implemented and non-empty, or user.get_username() otherwise.
wagtail_review/text.py
user_display_name
icanbwell/wagtail-review
python
def user_display_name(user): '\n Returns the preferred display name for the given user object: the result of\n user.get_full_name() if implemented and non-empty, or user.get_username() otherwise.\n ' try: full_name = user.get_full_name().strip() if full_name: return full_name except AttributeError: pass try: return user.get_username() except AttributeError: return
def testDataProtectionOfficer(self): 'Test DataProtectionOfficer' pass
-2,687,668,233,648,560,600
Test DataProtectionOfficer
test/test_data_protection_officer.py
testDataProtectionOfficer
My-Data-My-Consent/python-sdk
python
def testDataProtectionOfficer(self): pass
def select_server(server_type, config): 'Select a server type using different possible strings.\n\n Right now this just returns `OptimizationServer`, but this\n function could be useful when there are multiple choices of\n server.\n\n Args:\n server_type (str): indicates server choice.\n config (dict): config parsed from YAML, passed so that\n parameters can be used to select a given server.\n ' return OptimizationServer
2,689,991,968,026,703,000
Select a server type using different possible strings. Right now this just returns `OptimizationServer`, but this function could be useful when there are multiple choices of server. Args: server_type (str): indicates server choice. config (dict): config parsed from YAML, passed so that parameters can be used to select a given server.
core/server.py
select_server
simra/msrflute
python
def select_server(server_type, config): 'Select a server type using different possible strings.\n\n Right now this just returns `OptimizationServer`, but this\n function could be useful when there are multiple choices of\n server.\n\n Args:\n server_type (str): indicates server choice.\n config (dict): config parsed from YAML, passed so that\n parameters can be used to select a given server.\n ' return OptimizationServer
def __init__(self, num_clients, model, optimizer, ss_scheduler, data_path, model_path, train_dataloader, val_dataloader, test_dataloader, config, config_server): "Implement Server's orchestration and aggregation.\n\n This is the main Server class, that actually implements orchestration\n and aggregation, inheriting from `federated.Server`, which deals with\n communication only.\n\n The `train` method is central in FLUTE, as it defines good part of what\n happens during training.\n\n Args:\n num_clients (int): total available clients.\n model (torch.nn.Module): neural network model.\n optimizer (torch.optim.Optimizer): optimizer.\n ss_scheduler: scheduled sampling scheduler.\n data_path (str): points to where data is.\n model_path (str): points to where pretrained model is.\n train_dataloader (torch.utils.data.DataLoader): dataloader for training\n val_dataloader (torch.utils.data.DataLoader): dataloader for validation\n test_dataloader (torch.utils.data.DataLoader): dataloader for test, can be None\n config (dict): JSON style configuration parameters\n config_server: deprecated, kept for API compatibility only.\n " super().__init__() self.client_idx_list = list(range(num_clients)) self.config = config server_config = config['server_config'] decoder_config = config.get('decoder_config', None) self.max_iteration = server_config['max_iteration'] self.do_clustering = server_config.get('clustering', False) self.num_clients_per_iteration = ([int(x) for x in server_config['num_clients_per_iteration'].split(',')] if isinstance(server_config['num_clients_per_iteration'], str) else [server_config['num_clients_per_iteration']]) self.val_freq = server_config['val_freq'] self.req_freq = server_config['rec_freq'] self.evaluation = Evaluation(config, model_path, self.process_testvalidate, val_dataloader, test_dataloader) self.metrics = {'best_val_loss': float('inf'), 'best_val_acc': 0.0, 'best_test_loss': float('inf'), 'best_test_acc': 0.0} self.model_backup_freq = server_config.get('model_backup_freq', 100) self.worker_trainer_config = server_config.get('trainer_config', {}) self.aggregate_median = server_config['aggregate_median'] self.initial_lr_client = server_config.get('initial_lr_client', (- 1.0)) self.lr_decay_factor = server_config.get('lr_decay_factor', 1.0) self.model_type = config['model_config']['model_type'] self.quant_thresh = config['client_config'].get('quant_thresh', None) self.quant_bits = config['client_config'].get('quant_bits', 10) self.list_of_train_data = config['client_config']['data_config']['train']['list_of_train_data'] self.data_path = data_path if ('train' in server_config['data_config']): max_grad_norm = server_config['data_config']['train'].get('max_grad_norm', None) else: max_grad_norm = None self.worker_trainer = ModelUpdater(model=model, optimizer=optimizer, ss_scheduler=ss_scheduler, train_dataloader=(train_dataloader if (train_dataloader is not None) else val_dataloader), val_dataloader=val_dataloader, max_grad_norm=max_grad_norm, anneal_config=server_config['annealing_config'], model_type=self.model_type, decoder_config=decoder_config) self.metrics['worker_trainer'] = self.worker_trainer self.server_replay_iterations = None self.server_trainer = None if (train_dataloader is not None): assert ('server_replay_config' in server_config), 'server_replay_config is not set' assert ('optimizer_config' in server_config['server_replay_config']), 'server-side replay training optimizer is not set' self.server_optimizer_config = server_config['server_replay_config']['optimizer_config'] self.server_trainer_config = server_config['server_replay_config'].get('trainer_config', {}) self.server_replay_iterations = server_config['server_replay_config']['server_iterations'] self.server_trainer = Trainer(model=model, optimizer=None, ss_scheduler=ss_scheduler, train_dataloader=train_dataloader, server_replay_config=server_config['server_replay_config'], val_dataloader=None, max_grad_norm=server_config['server_replay_config'].get('max_grad_norm', server_config['data_config']['train'].get('max_grad_norm', None)), anneal_config=server_config['server_replay_config'].get('annealing_config', None)) self.skip_model_update = False self.train_loss = 0.0 self.model_path = model_path self.best_model_criterion = server_config['best_model_criterion'] self.fall_back_to_best_model = server_config['fall_back_to_best_model'] self.last_model_path = os.path.join(self.model_path, 'latest_model.tar') self.best_model_path = os.path.join(self.model_path, 'best_val_{}_model.tar'.format(self.best_model_criterion)) self.log_path = os.path.join(self.model_path, 'status_log.json') self.cur_iter_no = 0 self.lr_weight = 1.0 self.losses = [] self.no_label_updates = 0 if server_config.get('resume_from_checkpoint', False): self.load_saved_status() self.decoder_config = decoder_config self.spm_model = server_config['data_config']['test'].get('spm_model', None) self.do_profiling = server_config.get('do_profiling', False) self.clients_in_parallel = config['client_config'].get('clients_in_parallel', None) StrategyClass = select_strategy(config['strategy']) self.strategy = StrategyClass('server', self.config, self.model_path) print_rank(f'Server successfully instantiated strategy {self.strategy}', loglevel=logging.DEBUG)
-5,509,703,606,496,693,000
Implement Server's orchestration and aggregation. This is the main Server class, that actually implements orchestration and aggregation, inheriting from `federated.Server`, which deals with communication only. The `train` method is central in FLUTE, as it defines good part of what happens during training. Args: num_clients (int): total available clients. model (torch.nn.Module): neural network model. optimizer (torch.optim.Optimizer): optimizer. ss_scheduler: scheduled sampling scheduler. data_path (str): points to where data is. model_path (str): points to where pretrained model is. train_dataloader (torch.utils.data.DataLoader): dataloader for training val_dataloader (torch.utils.data.DataLoader): dataloader for validation test_dataloader (torch.utils.data.DataLoader): dataloader for test, can be None config (dict): JSON style configuration parameters config_server: deprecated, kept for API compatibility only.
core/server.py
__init__
simra/msrflute
python
def __init__(self, num_clients, model, optimizer, ss_scheduler, data_path, model_path, train_dataloader, val_dataloader, test_dataloader, config, config_server): "Implement Server's orchestration and aggregation.\n\n This is the main Server class, that actually implements orchestration\n and aggregation, inheriting from `federated.Server`, which deals with\n communication only.\n\n The `train` method is central in FLUTE, as it defines good part of what\n happens during training.\n\n Args:\n num_clients (int): total available clients.\n model (torch.nn.Module): neural network model.\n optimizer (torch.optim.Optimizer): optimizer.\n ss_scheduler: scheduled sampling scheduler.\n data_path (str): points to where data is.\n model_path (str): points to where pretrained model is.\n train_dataloader (torch.utils.data.DataLoader): dataloader for training\n val_dataloader (torch.utils.data.DataLoader): dataloader for validation\n test_dataloader (torch.utils.data.DataLoader): dataloader for test, can be None\n config (dict): JSON style configuration parameters\n config_server: deprecated, kept for API compatibility only.\n " super().__init__() self.client_idx_list = list(range(num_clients)) self.config = config server_config = config['server_config'] decoder_config = config.get('decoder_config', None) self.max_iteration = server_config['max_iteration'] self.do_clustering = server_config.get('clustering', False) self.num_clients_per_iteration = ([int(x) for x in server_config['num_clients_per_iteration'].split(',')] if isinstance(server_config['num_clients_per_iteration'], str) else [server_config['num_clients_per_iteration']]) self.val_freq = server_config['val_freq'] self.req_freq = server_config['rec_freq'] self.evaluation = Evaluation(config, model_path, self.process_testvalidate, val_dataloader, test_dataloader) self.metrics = {'best_val_loss': float('inf'), 'best_val_acc': 0.0, 'best_test_loss': float('inf'), 'best_test_acc': 0.0} self.model_backup_freq = server_config.get('model_backup_freq', 100) self.worker_trainer_config = server_config.get('trainer_config', {}) self.aggregate_median = server_config['aggregate_median'] self.initial_lr_client = server_config.get('initial_lr_client', (- 1.0)) self.lr_decay_factor = server_config.get('lr_decay_factor', 1.0) self.model_type = config['model_config']['model_type'] self.quant_thresh = config['client_config'].get('quant_thresh', None) self.quant_bits = config['client_config'].get('quant_bits', 10) self.list_of_train_data = config['client_config']['data_config']['train']['list_of_train_data'] self.data_path = data_path if ('train' in server_config['data_config']): max_grad_norm = server_config['data_config']['train'].get('max_grad_norm', None) else: max_grad_norm = None self.worker_trainer = ModelUpdater(model=model, optimizer=optimizer, ss_scheduler=ss_scheduler, train_dataloader=(train_dataloader if (train_dataloader is not None) else val_dataloader), val_dataloader=val_dataloader, max_grad_norm=max_grad_norm, anneal_config=server_config['annealing_config'], model_type=self.model_type, decoder_config=decoder_config) self.metrics['worker_trainer'] = self.worker_trainer self.server_replay_iterations = None self.server_trainer = None if (train_dataloader is not None): assert ('server_replay_config' in server_config), 'server_replay_config is not set' assert ('optimizer_config' in server_config['server_replay_config']), 'server-side replay training optimizer is not set' self.server_optimizer_config = server_config['server_replay_config']['optimizer_config'] self.server_trainer_config = server_config['server_replay_config'].get('trainer_config', {}) self.server_replay_iterations = server_config['server_replay_config']['server_iterations'] self.server_trainer = Trainer(model=model, optimizer=None, ss_scheduler=ss_scheduler, train_dataloader=train_dataloader, server_replay_config=server_config['server_replay_config'], val_dataloader=None, max_grad_norm=server_config['server_replay_config'].get('max_grad_norm', server_config['data_config']['train'].get('max_grad_norm', None)), anneal_config=server_config['server_replay_config'].get('annealing_config', None)) self.skip_model_update = False self.train_loss = 0.0 self.model_path = model_path self.best_model_criterion = server_config['best_model_criterion'] self.fall_back_to_best_model = server_config['fall_back_to_best_model'] self.last_model_path = os.path.join(self.model_path, 'latest_model.tar') self.best_model_path = os.path.join(self.model_path, 'best_val_{}_model.tar'.format(self.best_model_criterion)) self.log_path = os.path.join(self.model_path, 'status_log.json') self.cur_iter_no = 0 self.lr_weight = 1.0 self.losses = [] self.no_label_updates = 0 if server_config.get('resume_from_checkpoint', False): self.load_saved_status() self.decoder_config = decoder_config self.spm_model = server_config['data_config']['test'].get('spm_model', None) self.do_profiling = server_config.get('do_profiling', False) self.clients_in_parallel = config['client_config'].get('clients_in_parallel', None) StrategyClass = select_strategy(config['strategy']) self.strategy = StrategyClass('server', self.config, self.model_path) print_rank(f'Server successfully instantiated strategy {self.strategy}', loglevel=logging.DEBUG)
def load_saved_status(self): 'Load checkpoint from disk' if os.path.exists(self.last_model_path): print_rank('Resuming from checkpoint model {}'.format(self.last_model_path)) self.worker_trainer.load(self.last_model_path, update_lr_scheduler=True, update_ss_scheduler=True) if (self.server_trainer is not None): self.server_trainer.model = self.worker_trainer.model if os.path.exists(self.log_path): with open(self.log_path, 'r') as logfp: elems = json.load(logfp) self.cur_iter_no = elems.get('i', 0) self.metrics['best_val_loss'] = elems.get('best_val_loss', float('inf')) self.metrics['best_val_acc'] = elems.get('best_val_acc', 0) self.metrics['best_test_loss'] = elems.get('best_test_loss', float('inf')) self.metrics['best_test_acc'] = elems.get('best_test_acc', 0) self.lr_weight = elems.get('weight', 1.0) self.no_label_updates = elems.get('num_label_updates', 0) print_rank(f'Resuming from status_log: cur_iter: {self.cur_iter_no}')
-5,987,352,651,717,467,000
Load checkpoint from disk
core/server.py
load_saved_status
simra/msrflute
python
def load_saved_status(self): if os.path.exists(self.last_model_path): print_rank('Resuming from checkpoint model {}'.format(self.last_model_path)) self.worker_trainer.load(self.last_model_path, update_lr_scheduler=True, update_ss_scheduler=True) if (self.server_trainer is not None): self.server_trainer.model = self.worker_trainer.model if os.path.exists(self.log_path): with open(self.log_path, 'r') as logfp: elems = json.load(logfp) self.cur_iter_no = elems.get('i', 0) self.metrics['best_val_loss'] = elems.get('best_val_loss', float('inf')) self.metrics['best_val_acc'] = elems.get('best_val_acc', 0) self.metrics['best_test_loss'] = elems.get('best_test_loss', float('inf')) self.metrics['best_test_acc'] = elems.get('best_test_acc', 0) self.lr_weight = elems.get('weight', 1.0) self.no_label_updates = elems.get('num_label_updates', 0) print_rank(f'Resuming from status_log: cur_iter: {self.cur_iter_no}')
def run(self): 'Trigger training.\n\n This is a simple wrapper to the `train` method.\n ' print_rank('server started') self.train() print_rank('server terminated')
5,204,790,440,284,381,000
Trigger training. This is a simple wrapper to the `train` method.
core/server.py
run
simra/msrflute
python
def run(self): 'Trigger training.\n\n This is a simple wrapper to the `train` method.\n ' print_rank('server started') self.train() print_rank('server terminated')
def train(self): 'Main method for training.' self.run_stats = {'secsPerClientRound': [], 'secsPerClient': [], 'secsPerClientTraining': [], 'secsPerClientSetup': [], 'secsPerClientFull': [], 'secsPerRoundHousekeeping': [], 'secsPerRoundTotal': [], 'mpiCosts': []} run.log('Max iterations', self.max_iteration) try: (self.worker_trainer.model.cuda() if torch.cuda.is_available() else None) eval_list = [] if (self.cur_iter_no == 0): if self.config['server_config']['initial_rec']: eval_list.append('test') if self.config['server_config']['initial_val']: eval_list.append('val') run.log('LR for agg. opt.', get_lr(self.worker_trainer.optimizer)) print_rank('Running {} at itr={}'.format(eval_list, self.cur_iter_no)) self.metrics = self.evaluation.run(eval_list, self.metrics, metric_logger=run.log) eval_list = [] print_rank('Saving Model Before Starting Training', loglevel=logging.INFO) for token in ['best_val_loss', 'best_val_acc', 'best_test_acc', 'latest']: self.worker_trainer.save(model_path=self.model_path, token=token, config=self.config['server_config']) self.worker_trainer.model.train() for i in range(self.cur_iter_no, self.max_iteration): begin = time.time() metrics_payload = {} def log_metric(k, v): metrics_payload[k] = v print_rank('==== iteration {}'.format(i)) log_metric('Current iteration', i) initial_lr = (self.initial_lr_client * self.lr_weight) print_rank('Client learning rate {}'.format(initial_lr)) self.worker_trainer.model.zero_grad() self.train_loss = [] server_data = (initial_lr, [p.data.to(torch.device('cpu')) for p in self.worker_trainer.model.parameters()]) if (len(self.num_clients_per_iteration) > 1): num_clients_curr_iter = random.randint(self.num_clients_per_iteration[0], self.num_clients_per_iteration[1]) else: num_clients_curr_iter = self.num_clients_per_iteration[0] log_metric('Clients for round', num_clients_curr_iter) if (self.quant_thresh is not None): self.config['client_config']['quant_thresh'] *= self.config['client_config'].get('quant_anneal', 1.0) self.quant_thresh = self.config['client_config']['quant_thresh'] log_metric('Quantization Thresh.', self.config['client_config']['quant_thresh']) sampled_idx_clients = (random.sample(self.client_idx_list, num_clients_curr_iter) if (num_clients_curr_iter > 0) else self.client_idx_list) sampled_clients = [Client(client_id, self.config, (self.config['client_config']['type'] == 'optimization'), None) for client_id in sampled_idx_clients] clients_begin = time.time() client_losses = [] client_mag_grads = [] client_mean_grads = [] client_var_grads = [] client_norm_grads = [] self.run_stats['secsPerClient'].append([]) self.run_stats['secsPerClientFull'].append([]) self.run_stats['secsPerClientTraining'].append([]) self.run_stats['secsPerClientSetup'].append([]) self.run_stats['mpiCosts'].append([]) apply_privacy_metrics = (self.config.get('privacy_metrics_config', None) and self.config['privacy_metrics_config']['apply_metrics']) adaptive_leakage = (apply_privacy_metrics and self.config['privacy_metrics_config'].get('adaptive_leakage_threshold', None)) if apply_privacy_metrics: privacy_metrics_stats = defaultdict(list) profiler = None if self.do_profiling: profiler = cProfile.Profile() profiler.enable() self.worker_trainer.model.zero_grad() for client_output in self.process_clients(sampled_clients, server_data, self.clients_in_parallel): client_timestamp = client_output['ts'] client_stats = client_output['cs'] client_loss = client_output['tl'] client_mag_grad = client_output['mg'] client_mean_grad = client_output['ng'] client_var_grad = client_output['vg'] client_norm_grad = client_output['rg'] client_payload = client_output['pl'] if apply_privacy_metrics: privacy_stats = client_output['ps'] for (metric, value) in privacy_stats.items(): privacy_metrics_stats[metric].append(value) self.run_stats['mpiCosts'][(- 1)].append((time.time() - client_timestamp)) payload_processed = self.strategy.process_individual_payload(self.worker_trainer, client_payload) if (not payload_processed): print_rank('Dropping client', loglevel=logging.DEBUG) num_clients_curr_iter -= 1 continue self.train_loss.append(client_loss) client_losses.append(client_loss) client_mag_grads.append(client_mag_grad.item()) client_mean_grads.append(client_mean_grad.item()) client_var_grads.append(client_var_grad.item()) client_norm_grads.append(client_norm_grad.item()) client_end = time.time() self.run_stats['secsPerClientFull'][(- 1)].append(client_stats['full cost']) self.run_stats['secsPerClientTraining'][(- 1)].append(client_stats['training']) self.run_stats['secsPerClientSetup'][(- 1)].append(client_stats['setup']) self.run_stats['secsPerClient'][(- 1)].append((client_end - clients_begin)) if self.do_profiling: profiler.disable() stats = pstats.Stats(profiler) stats.sort_stats('cumulative').print_stats() client_mag_grads = np.array(client_mag_grads) client_mean_grads = np.array(client_mean_grads) client_var_grads = np.array(client_var_grads) client_norm_grads = np.array(client_norm_grads) client_stats = (client_mag_grads, client_mean_grads, client_var_grads) dump_norm_stats = self.config.get('dump_norm_stats', False) if dump_norm_stats: with open(os.path.join(self.model_path, 'norm_stats.txt'), 'a', encoding='utf-8') as outF: outF.write('{}\n'.format(json.dumps(list(client_norm_grads)))) if apply_privacy_metrics: for (metric, values) in privacy_metrics_stats.items(): if (metric == 'Dropped clients'): log_metric(metric, sum(values)) else: log_metric(metric, max(values)) if (type(adaptive_leakage) is float): values = privacy_metrics_stats['Practical epsilon (Max leakage)'] new_threshold = list(sorted(values))[int((adaptive_leakage * len(values)))] print_rank('Updating leakage threshold to {}'.format(new_threshold)) self.config['privacy_metrics_config']['max_allowed_leakage'] = new_threshold end = time.time() self.run_stats['secsPerClientRound'].append((end - begin)) begin = end log_metric('Training loss', sum(self.train_loss)) self.losses = self.strategy.combine_payloads(worker_trainer=self.worker_trainer, curr_iter=i, num_clients_curr_iter=num_clients_curr_iter, client_stats=client_stats, logger=log_metric) if (self.server_trainer is not None): print_rank('Running replay iterations on server') if ('updatable_names' in self.server_trainer_config): set_component_wise_lr(self.worker_trainer.model, self.server_optimizer_config, self.server_trainer_config['updatable_names']) self.server_trainer.prepare_iteration(self.worker_trainer.model) self.server_trainer.train_desired_samples(self.server_replay_iterations) self.worker_trainer.model.load_state_dict(self.server_trainer.model.state_dict()) torch.cuda.empty_cache() print_rank('Run ss scheduler') self.worker_trainer.run_ss_scheduler() if (((i + 1) % self.val_freq) == 0): eval_list.append('val') if (((i + 1) % self.req_freq) == 0): eval_list.append('test') if (len(eval_list) > 0): print_rank('Running {} at itr={}'.format(eval_list, (i + 1))) self.metrics['worker_trainer'] = self.worker_trainer self.metrics = self.evaluation.run(eval_list, self.metrics, metric_logger=run.log) self.losses = self.evaluation.losses eval_list = [] if ('val' in eval_list): run.log('LR for agg. opt.', get_lr(self.worker_trainer.optimizer)) if (not (self.losses[0] < self.metrics['best_val_loss'])): self.lr_weight *= self.lr_decay_factor print_rank('LOG: Client weight of learning rate {}..'.format(self.lr_weight)) self.backup_models(i) self.fall_back_to_prev_best_status() update_json_log(self.log_path, {'i': (i + 1), 'best_val_loss': float(self.metrics['best_val_loss']), 'best_val_acc': float(self.metrics['best_val_acc']), 'best_test_loss': float(self.metrics['best_test_loss']), 'best_test_acc': float(self.metrics['best_test_acc']), 'weight': float(self.lr_weight), 'num_label_updates': int(self.no_label_updates)}) end = time.time() self.run_stats['secsPerRoundHousekeeping'].append((end - begin)) self.run_stats['secsPerRoundTotal'].append((self.run_stats['secsPerClientRound'][(- 1)] + self.run_stats['secsPerRoundHousekeeping'][(- 1)])) log_metric('secsPerRoundTotal', self.run_stats['secsPerRoundTotal'][(- 1)]) if self.do_profiling: log_metric('secsPerClientRound', self.run_stats['secsPerClientRound'][(- 1)]) log_metric('secsPerRoundHousekeeping', self.run_stats['secsPerRoundHousekeeping'][(- 1)]) metrics_for_stats = ['secsPerClient', 'secsPerClientTraining', 'secsPerClientFull', 'secsPerClientSetup', 'mpiCosts'] for metric in metrics_for_stats: log_metric(f'{metric}Mean', np.mean(self.run_stats[metric][(- 1)])) log_metric(f'{metric}Median', np.median(self.run_stats[metric][(- 1)])) log_metric(f'{metric}Max', max(self.run_stats[metric][(- 1)])) for k in self.run_stats: if (k in metrics_for_stats): print_rank('{}: {}'.format(k, max(self.run_stats[k][(- 1)])), loglevel=logging.DEBUG) else: print_rank('{}: {}'.format(k, self.run_stats[k][(- 1)]), loglevel=logging.DEBUG) for k in metrics_payload: run.log(k, metrics_payload[k]) finally: self.terminate_workers(terminate=(not self.do_clustering))
1,279,950,367,566,187,500
Main method for training.
core/server.py
train
simra/msrflute
python
def train(self): self.run_stats = {'secsPerClientRound': [], 'secsPerClient': [], 'secsPerClientTraining': [], 'secsPerClientSetup': [], 'secsPerClientFull': [], 'secsPerRoundHousekeeping': [], 'secsPerRoundTotal': [], 'mpiCosts': []} run.log('Max iterations', self.max_iteration) try: (self.worker_trainer.model.cuda() if torch.cuda.is_available() else None) eval_list = [] if (self.cur_iter_no == 0): if self.config['server_config']['initial_rec']: eval_list.append('test') if self.config['server_config']['initial_val']: eval_list.append('val') run.log('LR for agg. opt.', get_lr(self.worker_trainer.optimizer)) print_rank('Running {} at itr={}'.format(eval_list, self.cur_iter_no)) self.metrics = self.evaluation.run(eval_list, self.metrics, metric_logger=run.log) eval_list = [] print_rank('Saving Model Before Starting Training', loglevel=logging.INFO) for token in ['best_val_loss', 'best_val_acc', 'best_test_acc', 'latest']: self.worker_trainer.save(model_path=self.model_path, token=token, config=self.config['server_config']) self.worker_trainer.model.train() for i in range(self.cur_iter_no, self.max_iteration): begin = time.time() metrics_payload = {} def log_metric(k, v): metrics_payload[k] = v print_rank('==== iteration {}'.format(i)) log_metric('Current iteration', i) initial_lr = (self.initial_lr_client * self.lr_weight) print_rank('Client learning rate {}'.format(initial_lr)) self.worker_trainer.model.zero_grad() self.train_loss = [] server_data = (initial_lr, [p.data.to(torch.device('cpu')) for p in self.worker_trainer.model.parameters()]) if (len(self.num_clients_per_iteration) > 1): num_clients_curr_iter = random.randint(self.num_clients_per_iteration[0], self.num_clients_per_iteration[1]) else: num_clients_curr_iter = self.num_clients_per_iteration[0] log_metric('Clients for round', num_clients_curr_iter) if (self.quant_thresh is not None): self.config['client_config']['quant_thresh'] *= self.config['client_config'].get('quant_anneal', 1.0) self.quant_thresh = self.config['client_config']['quant_thresh'] log_metric('Quantization Thresh.', self.config['client_config']['quant_thresh']) sampled_idx_clients = (random.sample(self.client_idx_list, num_clients_curr_iter) if (num_clients_curr_iter > 0) else self.client_idx_list) sampled_clients = [Client(client_id, self.config, (self.config['client_config']['type'] == 'optimization'), None) for client_id in sampled_idx_clients] clients_begin = time.time() client_losses = [] client_mag_grads = [] client_mean_grads = [] client_var_grads = [] client_norm_grads = [] self.run_stats['secsPerClient'].append([]) self.run_stats['secsPerClientFull'].append([]) self.run_stats['secsPerClientTraining'].append([]) self.run_stats['secsPerClientSetup'].append([]) self.run_stats['mpiCosts'].append([]) apply_privacy_metrics = (self.config.get('privacy_metrics_config', None) and self.config['privacy_metrics_config']['apply_metrics']) adaptive_leakage = (apply_privacy_metrics and self.config['privacy_metrics_config'].get('adaptive_leakage_threshold', None)) if apply_privacy_metrics: privacy_metrics_stats = defaultdict(list) profiler = None if self.do_profiling: profiler = cProfile.Profile() profiler.enable() self.worker_trainer.model.zero_grad() for client_output in self.process_clients(sampled_clients, server_data, self.clients_in_parallel): client_timestamp = client_output['ts'] client_stats = client_output['cs'] client_loss = client_output['tl'] client_mag_grad = client_output['mg'] client_mean_grad = client_output['ng'] client_var_grad = client_output['vg'] client_norm_grad = client_output['rg'] client_payload = client_output['pl'] if apply_privacy_metrics: privacy_stats = client_output['ps'] for (metric, value) in privacy_stats.items(): privacy_metrics_stats[metric].append(value) self.run_stats['mpiCosts'][(- 1)].append((time.time() - client_timestamp)) payload_processed = self.strategy.process_individual_payload(self.worker_trainer, client_payload) if (not payload_processed): print_rank('Dropping client', loglevel=logging.DEBUG) num_clients_curr_iter -= 1 continue self.train_loss.append(client_loss) client_losses.append(client_loss) client_mag_grads.append(client_mag_grad.item()) client_mean_grads.append(client_mean_grad.item()) client_var_grads.append(client_var_grad.item()) client_norm_grads.append(client_norm_grad.item()) client_end = time.time() self.run_stats['secsPerClientFull'][(- 1)].append(client_stats['full cost']) self.run_stats['secsPerClientTraining'][(- 1)].append(client_stats['training']) self.run_stats['secsPerClientSetup'][(- 1)].append(client_stats['setup']) self.run_stats['secsPerClient'][(- 1)].append((client_end - clients_begin)) if self.do_profiling: profiler.disable() stats = pstats.Stats(profiler) stats.sort_stats('cumulative').print_stats() client_mag_grads = np.array(client_mag_grads) client_mean_grads = np.array(client_mean_grads) client_var_grads = np.array(client_var_grads) client_norm_grads = np.array(client_norm_grads) client_stats = (client_mag_grads, client_mean_grads, client_var_grads) dump_norm_stats = self.config.get('dump_norm_stats', False) if dump_norm_stats: with open(os.path.join(self.model_path, 'norm_stats.txt'), 'a', encoding='utf-8') as outF: outF.write('{}\n'.format(json.dumps(list(client_norm_grads)))) if apply_privacy_metrics: for (metric, values) in privacy_metrics_stats.items(): if (metric == 'Dropped clients'): log_metric(metric, sum(values)) else: log_metric(metric, max(values)) if (type(adaptive_leakage) is float): values = privacy_metrics_stats['Practical epsilon (Max leakage)'] new_threshold = list(sorted(values))[int((adaptive_leakage * len(values)))] print_rank('Updating leakage threshold to {}'.format(new_threshold)) self.config['privacy_metrics_config']['max_allowed_leakage'] = new_threshold end = time.time() self.run_stats['secsPerClientRound'].append((end - begin)) begin = end log_metric('Training loss', sum(self.train_loss)) self.losses = self.strategy.combine_payloads(worker_trainer=self.worker_trainer, curr_iter=i, num_clients_curr_iter=num_clients_curr_iter, client_stats=client_stats, logger=log_metric) if (self.server_trainer is not None): print_rank('Running replay iterations on server') if ('updatable_names' in self.server_trainer_config): set_component_wise_lr(self.worker_trainer.model, self.server_optimizer_config, self.server_trainer_config['updatable_names']) self.server_trainer.prepare_iteration(self.worker_trainer.model) self.server_trainer.train_desired_samples(self.server_replay_iterations) self.worker_trainer.model.load_state_dict(self.server_trainer.model.state_dict()) torch.cuda.empty_cache() print_rank('Run ss scheduler') self.worker_trainer.run_ss_scheduler() if (((i + 1) % self.val_freq) == 0): eval_list.append('val') if (((i + 1) % self.req_freq) == 0): eval_list.append('test') if (len(eval_list) > 0): print_rank('Running {} at itr={}'.format(eval_list, (i + 1))) self.metrics['worker_trainer'] = self.worker_trainer self.metrics = self.evaluation.run(eval_list, self.metrics, metric_logger=run.log) self.losses = self.evaluation.losses eval_list = [] if ('val' in eval_list): run.log('LR for agg. opt.', get_lr(self.worker_trainer.optimizer)) if (not (self.losses[0] < self.metrics['best_val_loss'])): self.lr_weight *= self.lr_decay_factor print_rank('LOG: Client weight of learning rate {}..'.format(self.lr_weight)) self.backup_models(i) self.fall_back_to_prev_best_status() update_json_log(self.log_path, {'i': (i + 1), 'best_val_loss': float(self.metrics['best_val_loss']), 'best_val_acc': float(self.metrics['best_val_acc']), 'best_test_loss': float(self.metrics['best_test_loss']), 'best_test_acc': float(self.metrics['best_test_acc']), 'weight': float(self.lr_weight), 'num_label_updates': int(self.no_label_updates)}) end = time.time() self.run_stats['secsPerRoundHousekeeping'].append((end - begin)) self.run_stats['secsPerRoundTotal'].append((self.run_stats['secsPerClientRound'][(- 1)] + self.run_stats['secsPerRoundHousekeeping'][(- 1)])) log_metric('secsPerRoundTotal', self.run_stats['secsPerRoundTotal'][(- 1)]) if self.do_profiling: log_metric('secsPerClientRound', self.run_stats['secsPerClientRound'][(- 1)]) log_metric('secsPerRoundHousekeeping', self.run_stats['secsPerRoundHousekeeping'][(- 1)]) metrics_for_stats = ['secsPerClient', 'secsPerClientTraining', 'secsPerClientFull', 'secsPerClientSetup', 'mpiCosts'] for metric in metrics_for_stats: log_metric(f'{metric}Mean', np.mean(self.run_stats[metric][(- 1)])) log_metric(f'{metric}Median', np.median(self.run_stats[metric][(- 1)])) log_metric(f'{metric}Max', max(self.run_stats[metric][(- 1)])) for k in self.run_stats: if (k in metrics_for_stats): print_rank('{}: {}'.format(k, max(self.run_stats[k][(- 1)])), loglevel=logging.DEBUG) else: print_rank('{}: {}'.format(k, self.run_stats[k][(- 1)]), loglevel=logging.DEBUG) for k in metrics_payload: run.log(k, metrics_payload[k]) finally: self.terminate_workers(terminate=(not self.do_clustering))
def backup_models(self, i): 'Save the current best models.\n\n Save CER model, the best loss model and the best WER model. This occurs\n at a specified period.\n\n Args:\n i: no. of iterations.\n ' self.worker_trainer.save(model_path=self.model_path, token='latest', config=self.config['server_config']) if ((i % self.model_backup_freq) == 0): self.worker_trainer.save(model_path=self.model_path, token='epoch{}'.format(i), config=self.config['server_config']) for bodyname in ['best_val_acc', 'best_val_loss', 'best_test_acc']: src_model_path = os.path.join(self.model_path, '{}_model.tar'.format(bodyname)) if os.path.exists(src_model_path): dst_model_path = os.path.join(self.model_path, 'epoch{}_{}_model.tar'.format(i, bodyname)) shutil.copyfile(src_model_path, dst_model_path) print_rank('Saved {}'.format(dst_model_path))
363,412,540,844,676,350
Save the current best models. Save CER model, the best loss model and the best WER model. This occurs at a specified period. Args: i: no. of iterations.
core/server.py
backup_models
simra/msrflute
python
def backup_models(self, i): 'Save the current best models.\n\n Save CER model, the best loss model and the best WER model. This occurs\n at a specified period.\n\n Args:\n i: no. of iterations.\n ' self.worker_trainer.save(model_path=self.model_path, token='latest', config=self.config['server_config']) if ((i % self.model_backup_freq) == 0): self.worker_trainer.save(model_path=self.model_path, token='epoch{}'.format(i), config=self.config['server_config']) for bodyname in ['best_val_acc', 'best_val_loss', 'best_test_acc']: src_model_path = os.path.join(self.model_path, '{}_model.tar'.format(bodyname)) if os.path.exists(src_model_path): dst_model_path = os.path.join(self.model_path, 'epoch{}_{}_model.tar'.format(i, bodyname)) shutil.copyfile(src_model_path, dst_model_path) print_rank('Saved {}'.format(dst_model_path))
def fall_back_to_prev_best_status(self): 'Go back to the past best status and switch to the recent best model.' if self.fall_back_to_best_model: print_rank('falling back to model {}'.format(self.best_model_path)) tmp_lr = get_lr(self.worker_trainer.optimizer) self.worker_trainer.load(self.best_model_path, update_lr_scheduler=False, update_ss_scheduler=False) for g in self.worker_trainer.optimizer.param_groups: g['lr'] = tmp_lr if (self.server_trainer is not None): self.server_trainer.model = self.worker_trainer.model
-3,130,464,585,995,101,700
Go back to the past best status and switch to the recent best model.
core/server.py
fall_back_to_prev_best_status
simra/msrflute
python
def fall_back_to_prev_best_status(self): if self.fall_back_to_best_model: print_rank('falling back to model {}'.format(self.best_model_path)) tmp_lr = get_lr(self.worker_trainer.optimizer) self.worker_trainer.load(self.best_model_path, update_lr_scheduler=False, update_ss_scheduler=False) for g in self.worker_trainer.optimizer.param_groups: g['lr'] = tmp_lr if (self.server_trainer is not None): self.server_trainer.model = self.worker_trainer.model
def get_http_authentication(private_key: RsaKey, private_key_id: str) -> HTTPSignatureHeaderAuth: '\n Get HTTP signature authentication for a request.\n ' key = private_key.exportKey() return HTTPSignatureHeaderAuth(headers=['(request-target)', 'user-agent', 'host', 'date'], algorithm='rsa-sha256', key=key, key_id=private_key_id)
6,171,740,104,618,513,000
Get HTTP signature authentication for a request.
federation/protocols/activitypub/signing.py
get_http_authentication
jaywink/federation
python
def get_http_authentication(private_key: RsaKey, private_key_id: str) -> HTTPSignatureHeaderAuth: '\n \n ' key = private_key.exportKey() return HTTPSignatureHeaderAuth(headers=['(request-target)', 'user-agent', 'host', 'date'], algorithm='rsa-sha256', key=key, key_id=private_key_id)
def verify_request_signature(request: RequestType, public_key: Union[(str, bytes)]): '\n Verify HTTP signature in request against a public key.\n ' key = encode_if_text(public_key) date_header = request.headers.get('Date') if (not date_header): raise ValueError('Rquest Date header is missing') ts = parse_http_date(date_header) dt = datetime.datetime.utcfromtimestamp(ts).replace(tzinfo=pytz.utc) past_delta = datetime.timedelta(hours=24) future_delta = datetime.timedelta(seconds=30) now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) if ((dt < (now - past_delta)) or (dt > (now + future_delta))): raise ValueError('Request Date is too far in future or past') HTTPSignatureHeaderAuth.verify(request, key_resolver=(lambda **kwargs: key))
3,867,310,073,539,097,000
Verify HTTP signature in request against a public key.
federation/protocols/activitypub/signing.py
verify_request_signature
jaywink/federation
python
def verify_request_signature(request: RequestType, public_key: Union[(str, bytes)]): '\n \n ' key = encode_if_text(public_key) date_header = request.headers.get('Date') if (not date_header): raise ValueError('Rquest Date header is missing') ts = parse_http_date(date_header) dt = datetime.datetime.utcfromtimestamp(ts).replace(tzinfo=pytz.utc) past_delta = datetime.timedelta(hours=24) future_delta = datetime.timedelta(seconds=30) now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) if ((dt < (now - past_delta)) or (dt > (now + future_delta))): raise ValueError('Request Date is too far in future or past') HTTPSignatureHeaderAuth.verify(request, key_resolver=(lambda **kwargs: key))
def __init__(self, selection): '\n Create a new :py:class:`Selection` from the given ``selection`` string.\n ' ptr = self.ffi.chfl_selection(selection.encode('utf8')) super(Selection, self).__init__(ptr, is_const=False)
8,582,937,328,836,864,000
Create a new :py:class:`Selection` from the given ``selection`` string.
chemfiles/selection.py
__init__
Luthaf/Chemharp-python
python
def __init__(self, selection): '\n \n ' ptr = self.ffi.chfl_selection(selection.encode('utf8')) super(Selection, self).__init__(ptr, is_const=False)
@property def size(self): "\n Get the size of this :py:class:`Selection`.\n\n The size of a selection is the number of atoms we are selecting\n together. This value is 1 for the 'atom' context, 2 for the 'pair' and\n 'bond' context, 3 for the 'three' and 'angles' contextes and 4 for the\n 'four' and 'dihedral' contextes.\n " size = c_uint64() self.ffi.chfl_selection_size(self.ptr, size) return size.value
-4,548,979,829,295,121,000
Get the size of this :py:class:`Selection`. The size of a selection is the number of atoms we are selecting together. This value is 1 for the 'atom' context, 2 for the 'pair' and 'bond' context, 3 for the 'three' and 'angles' contextes and 4 for the 'four' and 'dihedral' contextes.
chemfiles/selection.py
size
Luthaf/Chemharp-python
python
@property def size(self): "\n Get the size of this :py:class:`Selection`.\n\n The size of a selection is the number of atoms we are selecting\n together. This value is 1 for the 'atom' context, 2 for the 'pair' and\n 'bond' context, 3 for the 'three' and 'angles' contextes and 4 for the\n 'four' and 'dihedral' contextes.\n " size = c_uint64() self.ffi.chfl_selection_size(self.ptr, size) return size.value
@property def string(self): '\n Get the selection string used to create this :py:class:`Selection`.\n ' return _call_with_growing_buffer((lambda buffer, size: self.ffi.chfl_selection_string(self.ptr, buffer, size)), initial=128)
-8,501,303,544,896,859,000
Get the selection string used to create this :py:class:`Selection`.
chemfiles/selection.py
string
Luthaf/Chemharp-python
python
@property def string(self): '\n \n ' return _call_with_growing_buffer((lambda buffer, size: self.ffi.chfl_selection_string(self.ptr, buffer, size)), initial=128)
def evaluate(self, frame): '\n Evaluate a :py:class:`Selection` for a given :py:class:`Frame`, and\n return a list of matching atoms, either as a list of index or a list\n of tuples of indexes.\n ' matching = c_uint64() self.ffi.chfl_selection_evaluate(self.mut_ptr, frame.ptr, matching) matches = np.zeros(matching.value, chfl_match) self.ffi.chfl_selection_matches(self.mut_ptr, matches, matching) size = self.size result = [] for match in matches: assert (match[0] == size) atoms = match[1] if (size == 1): result.append(atoms[0]) elif (size == 2): result.append((atoms[0], atoms[1])) elif (size == 3): result.append((atoms[0], atoms[1], atoms[2])) elif (size == 4): result.append((atoms[0], atoms[1], atoms[2], atoms[3])) return result
2,673,721,999,087,359,000
Evaluate a :py:class:`Selection` for a given :py:class:`Frame`, and return a list of matching atoms, either as a list of index or a list of tuples of indexes.
chemfiles/selection.py
evaluate
Luthaf/Chemharp-python
python
def evaluate(self, frame): '\n Evaluate a :py:class:`Selection` for a given :py:class:`Frame`, and\n return a list of matching atoms, either as a list of index or a list\n of tuples of indexes.\n ' matching = c_uint64() self.ffi.chfl_selection_evaluate(self.mut_ptr, frame.ptr, matching) matches = np.zeros(matching.value, chfl_match) self.ffi.chfl_selection_matches(self.mut_ptr, matches, matching) size = self.size result = [] for match in matches: assert (match[0] == size) atoms = match[1] if (size == 1): result.append(atoms[0]) elif (size == 2): result.append((atoms[0], atoms[1])) elif (size == 3): result.append((atoms[0], atoms[1], atoms[2])) elif (size == 4): result.append((atoms[0], atoms[1], atoms[2], atoms[3])) return result
def _find_neighbor_and_lambda(neighbor_indices, neighbor_distances, core_distances, min_samples): '\n Find the nearest mutual reachability neighbor of a point, and compute\n the associated lambda value for the point, given the mutual reachability\n distance to a nearest neighbor.\n\n Parameters\n ----------\n neighbor_indices : array (2 * min_samples, )\n An array of raw distance based nearest neighbor indices.\n\n neighbor_distances : array (2 * min_samples, )\n An array of raw distances to the nearest neighbors.\n\n core_distances : array (n_samples, )\n An array of core distances for all points\n\n min_samples : int\n The min_samples value used to generate core distances.\n\n Returns\n -------\n neighbor : int\n The index into the full raw data set of the nearest mutual reachability\n distance neighbor of the point.\n\n lambda_ : float\n The lambda value at which this point joins/merges with `neighbor`.\n ' neighbor_core_distances = core_distances[neighbor_indices] point_core_distances = (neighbor_distances[min_samples] * np.ones(neighbor_indices.shape[0])) mr_distances = np.vstack((neighbor_core_distances, point_core_distances, neighbor_distances)).max(axis=0) nn_index = mr_distances.argmin() nearest_neighbor = neighbor_indices[nn_index] if (mr_distances[nn_index] > 0.0): lambda_ = (1.0 / mr_distances[nn_index]) else: lambda_ = np.finfo(np.double).max return (nearest_neighbor, lambda_)
-6,260,141,226,484,943,000
Find the nearest mutual reachability neighbor of a point, and compute the associated lambda value for the point, given the mutual reachability distance to a nearest neighbor. Parameters ---------- neighbor_indices : array (2 * min_samples, ) An array of raw distance based nearest neighbor indices. neighbor_distances : array (2 * min_samples, ) An array of raw distances to the nearest neighbors. core_distances : array (n_samples, ) An array of core distances for all points min_samples : int The min_samples value used to generate core distances. Returns ------- neighbor : int The index into the full raw data set of the nearest mutual reachability distance neighbor of the point. lambda_ : float The lambda value at which this point joins/merges with `neighbor`.
hdbscan/prediction.py
_find_neighbor_and_lambda
CKrawczyk/hdbscan
python
def _find_neighbor_and_lambda(neighbor_indices, neighbor_distances, core_distances, min_samples): '\n Find the nearest mutual reachability neighbor of a point, and compute\n the associated lambda value for the point, given the mutual reachability\n distance to a nearest neighbor.\n\n Parameters\n ----------\n neighbor_indices : array (2 * min_samples, )\n An array of raw distance based nearest neighbor indices.\n\n neighbor_distances : array (2 * min_samples, )\n An array of raw distances to the nearest neighbors.\n\n core_distances : array (n_samples, )\n An array of core distances for all points\n\n min_samples : int\n The min_samples value used to generate core distances.\n\n Returns\n -------\n neighbor : int\n The index into the full raw data set of the nearest mutual reachability\n distance neighbor of the point.\n\n lambda_ : float\n The lambda value at which this point joins/merges with `neighbor`.\n ' neighbor_core_distances = core_distances[neighbor_indices] point_core_distances = (neighbor_distances[min_samples] * np.ones(neighbor_indices.shape[0])) mr_distances = np.vstack((neighbor_core_distances, point_core_distances, neighbor_distances)).max(axis=0) nn_index = mr_distances.argmin() nearest_neighbor = neighbor_indices[nn_index] if (mr_distances[nn_index] > 0.0): lambda_ = (1.0 / mr_distances[nn_index]) else: lambda_ = np.finfo(np.double).max return (nearest_neighbor, lambda_)
def _extend_condensed_tree(tree, neighbor_indices, neighbor_distances, core_distances, min_samples): '\n Create a new condensed tree with an additional point added, allowing for\n computations as if this point had been part of the original tree. Note\n that this makes as little change to the tree as possible, with no\n re-optimizing/re-condensing so that the selected clusters remain\n effectively unchanged.\n\n Parameters\n ----------\n tree : structured array\n The raw format condensed tree to update.\n\n neighbor_indices : array (2 * min_samples, )\n An array of raw distance based nearest neighbor indices.\n\n neighbor_distances : array (2 * min_samples, )\n An array of raw distances to the nearest neighbors.\n\n core_distances : array (n_samples, )\n An array of core distances for all points\n\n min_samples : int\n The min_samples value used to generate core distances.\n\n Returns\n -------\n new_tree : structured array\n The original tree with an extra row providing the parent cluster\n and lambda information for a new point given index -1.\n ' tree_root = tree['parent'].min() (nearest_neighbor, lambda_) = _find_neighbor_and_lambda(neighbor_indices, neighbor_distances, core_distances, min_samples) neighbor_tree_row = get_tree_row_with_child(tree, nearest_neighbor) potential_cluster = neighbor_tree_row['parent'] if (neighbor_tree_row['lambda_val'] <= lambda_): new_tree_row = (potential_cluster, (- 1), 1, neighbor_tree_row['lambda_val']) else: while ((potential_cluster > tree_root) and (tree[(tree['child'] == potential_cluster)]['lambda_val'] >= lambda_)): potential_cluster = tree['parent'][(tree['child'] == potential_cluster)][0] new_tree_row = (potential_cluster, (- 1), 1, lambda_) return np.append(tree, new_tree_row)
2,089,607,605,473,284,400
Create a new condensed tree with an additional point added, allowing for computations as if this point had been part of the original tree. Note that this makes as little change to the tree as possible, with no re-optimizing/re-condensing so that the selected clusters remain effectively unchanged. Parameters ---------- tree : structured array The raw format condensed tree to update. neighbor_indices : array (2 * min_samples, ) An array of raw distance based nearest neighbor indices. neighbor_distances : array (2 * min_samples, ) An array of raw distances to the nearest neighbors. core_distances : array (n_samples, ) An array of core distances for all points min_samples : int The min_samples value used to generate core distances. Returns ------- new_tree : structured array The original tree with an extra row providing the parent cluster and lambda information for a new point given index -1.
hdbscan/prediction.py
_extend_condensed_tree
CKrawczyk/hdbscan
python
def _extend_condensed_tree(tree, neighbor_indices, neighbor_distances, core_distances, min_samples): '\n Create a new condensed tree with an additional point added, allowing for\n computations as if this point had been part of the original tree. Note\n that this makes as little change to the tree as possible, with no\n re-optimizing/re-condensing so that the selected clusters remain\n effectively unchanged.\n\n Parameters\n ----------\n tree : structured array\n The raw format condensed tree to update.\n\n neighbor_indices : array (2 * min_samples, )\n An array of raw distance based nearest neighbor indices.\n\n neighbor_distances : array (2 * min_samples, )\n An array of raw distances to the nearest neighbors.\n\n core_distances : array (n_samples, )\n An array of core distances for all points\n\n min_samples : int\n The min_samples value used to generate core distances.\n\n Returns\n -------\n new_tree : structured array\n The original tree with an extra row providing the parent cluster\n and lambda information for a new point given index -1.\n ' tree_root = tree['parent'].min() (nearest_neighbor, lambda_) = _find_neighbor_and_lambda(neighbor_indices, neighbor_distances, core_distances, min_samples) neighbor_tree_row = get_tree_row_with_child(tree, nearest_neighbor) potential_cluster = neighbor_tree_row['parent'] if (neighbor_tree_row['lambda_val'] <= lambda_): new_tree_row = (potential_cluster, (- 1), 1, neighbor_tree_row['lambda_val']) else: while ((potential_cluster > tree_root) and (tree[(tree['child'] == potential_cluster)]['lambda_val'] >= lambda_)): potential_cluster = tree['parent'][(tree['child'] == potential_cluster)][0] new_tree_row = (potential_cluster, (- 1), 1, lambda_) return np.append(tree, new_tree_row)
def _find_cluster_and_probability(tree, cluster_tree, neighbor_indices, neighbor_distances, core_distances, cluster_map, max_lambdas, min_samples): '\n Return the cluster label (of the original clustering) and membership\n probability of a new data point.\n\n Parameters\n ----------\n tree : CondensedTree\n The condensed tree associated with the clustering.\n\n cluster_tree : structured_array\n The raw form of the condensed tree with only cluster information (no\n data on individual points). This is significantly more compact.\n\n neighbor_indices : array (2 * min_samples, )\n An array of raw distance based nearest neighbor indices.\n\n neighbor_distances : array (2 * min_samples, )\n An array of raw distances to the nearest neighbors.\n\n core_distances : array (n_samples, )\n An array of core distances for all points\n\n cluster_map : dict\n A dictionary mapping cluster numbers in the condensed tree to labels\n in the final selected clustering.\n\n max_lambdas : dict\n A dictionary mapping cluster numbers in the condensed tree to the\n maximum lambda value seen in that cluster.\n\n min_samples : int\n The min_samples value used to generate core distances.\n ' raw_tree = tree._raw_tree tree_root = cluster_tree['parent'].min() (nearest_neighbor, lambda_) = _find_neighbor_and_lambda(neighbor_indices, neighbor_distances, core_distances, min_samples) neighbor_tree_row = get_tree_row_with_child(raw_tree, nearest_neighbor) potential_cluster = neighbor_tree_row['parent'] if (neighbor_tree_row['lambda_val'] > lambda_): while ((potential_cluster > tree_root) and (cluster_tree['lambda_val'][(cluster_tree['child'] == potential_cluster)] >= lambda_)): potential_cluster = cluster_tree['parent'][(cluster_tree['child'] == potential_cluster)][0] if (potential_cluster in cluster_map): cluster_label = cluster_map[potential_cluster] else: cluster_label = (- 1) if (cluster_label >= 0): max_lambda = max_lambdas[potential_cluster] if (max_lambda > 0.0): lambda_ = min(max_lambda, lambda_) prob = (lambda_ / max_lambda) else: prob = 1.0 else: prob = 0.0 return (cluster_label, prob)
-6,342,860,168,396,817,000
Return the cluster label (of the original clustering) and membership probability of a new data point. Parameters ---------- tree : CondensedTree The condensed tree associated with the clustering. cluster_tree : structured_array The raw form of the condensed tree with only cluster information (no data on individual points). This is significantly more compact. neighbor_indices : array (2 * min_samples, ) An array of raw distance based nearest neighbor indices. neighbor_distances : array (2 * min_samples, ) An array of raw distances to the nearest neighbors. core_distances : array (n_samples, ) An array of core distances for all points cluster_map : dict A dictionary mapping cluster numbers in the condensed tree to labels in the final selected clustering. max_lambdas : dict A dictionary mapping cluster numbers in the condensed tree to the maximum lambda value seen in that cluster. min_samples : int The min_samples value used to generate core distances.
hdbscan/prediction.py
_find_cluster_and_probability
CKrawczyk/hdbscan
python
def _find_cluster_and_probability(tree, cluster_tree, neighbor_indices, neighbor_distances, core_distances, cluster_map, max_lambdas, min_samples): '\n Return the cluster label (of the original clustering) and membership\n probability of a new data point.\n\n Parameters\n ----------\n tree : CondensedTree\n The condensed tree associated with the clustering.\n\n cluster_tree : structured_array\n The raw form of the condensed tree with only cluster information (no\n data on individual points). This is significantly more compact.\n\n neighbor_indices : array (2 * min_samples, )\n An array of raw distance based nearest neighbor indices.\n\n neighbor_distances : array (2 * min_samples, )\n An array of raw distances to the nearest neighbors.\n\n core_distances : array (n_samples, )\n An array of core distances for all points\n\n cluster_map : dict\n A dictionary mapping cluster numbers in the condensed tree to labels\n in the final selected clustering.\n\n max_lambdas : dict\n A dictionary mapping cluster numbers in the condensed tree to the\n maximum lambda value seen in that cluster.\n\n min_samples : int\n The min_samples value used to generate core distances.\n ' raw_tree = tree._raw_tree tree_root = cluster_tree['parent'].min() (nearest_neighbor, lambda_) = _find_neighbor_and_lambda(neighbor_indices, neighbor_distances, core_distances, min_samples) neighbor_tree_row = get_tree_row_with_child(raw_tree, nearest_neighbor) potential_cluster = neighbor_tree_row['parent'] if (neighbor_tree_row['lambda_val'] > lambda_): while ((potential_cluster > tree_root) and (cluster_tree['lambda_val'][(cluster_tree['child'] == potential_cluster)] >= lambda_)): potential_cluster = cluster_tree['parent'][(cluster_tree['child'] == potential_cluster)][0] if (potential_cluster in cluster_map): cluster_label = cluster_map[potential_cluster] else: cluster_label = (- 1) if (cluster_label >= 0): max_lambda = max_lambdas[potential_cluster] if (max_lambda > 0.0): lambda_ = min(max_lambda, lambda_) prob = (lambda_ / max_lambda) else: prob = 1.0 else: prob = 0.0 return (cluster_label, prob)
def approximate_predict(clusterer, points_to_predict): "Predict the cluster label of new points. The returned labels\n will be those of the original clustering found by ``clusterer``,\n and therefore are not (necessarily) the cluster labels that would\n be found by clustering the original data combined with\n ``points_to_predict``, hence the 'approximate' label.\n\n If you simply wish to assign new points to an existing clustering\n in the 'best' way possible, this is the function to use. If you\n want to predict how ``points_to_predict`` would cluster with\n the original data under HDBSCAN the most efficient existing approach\n is to simply recluster with the new point(s) added to the original dataset.\n\n Parameters\n ----------\n clusterer : HDBSCAN\n A clustering object that has been fit to the data and\n either had ``prediction_data=True`` set, or called the\n ``generate_prediction_data`` method after the fact.\n\n points_to_predict : array, or array-like (n_samples, n_features)\n The new data points to predict cluster labels for. They should\n have the same dimensionality as the original dataset over which\n clusterer was fit.\n\n Returns\n -------\n labels : array (n_samples,)\n The predicted labels of the ``points_to_predict``\n\n probabilities : array (n_samples,)\n The soft cluster scores for each of the ``points_to_predict``\n\n See Also\n --------\n :py:func:`hdbscan.predict.membership_vector`\n :py:func:`hdbscan.predict.all_points_membership_vectors`\n\n " if (clusterer.prediction_data_ is None): raise ValueError('Clusterer does not have prediction data! Try fitting with prediction_data=True set, or run generate_prediction_data on the clusterer') points_to_predict = np.asarray(points_to_predict) if (points_to_predict.shape[1] != clusterer.prediction_data_.raw_data.shape[1]): raise ValueError('New points dimension does not match fit data!') if (clusterer.prediction_data_.cluster_tree.shape[0] == 0): warn('Clusterer does not have any defined clusters, new data will be automatically predicted as noise.') labels = ((- 1) * np.ones(points_to_predict.shape[0], dtype=np.int32)) probabilities = np.zeros(points_to_predict.shape[0], dtype=np.float32) return (labels, probabilities) labels = np.empty(points_to_predict.shape[0], dtype=np.int) probabilities = np.empty(points_to_predict.shape[0], dtype=np.float64) min_samples = (clusterer.min_samples or clusterer.min_cluster_size) (neighbor_distances, neighbor_indices) = clusterer.prediction_data_.tree.query(points_to_predict, k=(2 * min_samples)) for i in range(points_to_predict.shape[0]): (label, prob) = _find_cluster_and_probability(clusterer.condensed_tree_, clusterer.prediction_data_.cluster_tree, neighbor_indices[i], neighbor_distances[i], clusterer.prediction_data_.core_distances, clusterer.prediction_data_.cluster_map, clusterer.prediction_data_.max_lambdas, min_samples) labels[i] = label probabilities[i] = prob return (labels, probabilities)
-9,133,352,897,079,680,000
Predict the cluster label of new points. The returned labels will be those of the original clustering found by ``clusterer``, and therefore are not (necessarily) the cluster labels that would be found by clustering the original data combined with ``points_to_predict``, hence the 'approximate' label. If you simply wish to assign new points to an existing clustering in the 'best' way possible, this is the function to use. If you want to predict how ``points_to_predict`` would cluster with the original data under HDBSCAN the most efficient existing approach is to simply recluster with the new point(s) added to the original dataset. Parameters ---------- clusterer : HDBSCAN A clustering object that has been fit to the data and either had ``prediction_data=True`` set, or called the ``generate_prediction_data`` method after the fact. points_to_predict : array, or array-like (n_samples, n_features) The new data points to predict cluster labels for. They should have the same dimensionality as the original dataset over which clusterer was fit. Returns ------- labels : array (n_samples,) The predicted labels of the ``points_to_predict`` probabilities : array (n_samples,) The soft cluster scores for each of the ``points_to_predict`` See Also -------- :py:func:`hdbscan.predict.membership_vector` :py:func:`hdbscan.predict.all_points_membership_vectors`
hdbscan/prediction.py
approximate_predict
CKrawczyk/hdbscan
python
def approximate_predict(clusterer, points_to_predict): "Predict the cluster label of new points. The returned labels\n will be those of the original clustering found by ``clusterer``,\n and therefore are not (necessarily) the cluster labels that would\n be found by clustering the original data combined with\n ``points_to_predict``, hence the 'approximate' label.\n\n If you simply wish to assign new points to an existing clustering\n in the 'best' way possible, this is the function to use. If you\n want to predict how ``points_to_predict`` would cluster with\n the original data under HDBSCAN the most efficient existing approach\n is to simply recluster with the new point(s) added to the original dataset.\n\n Parameters\n ----------\n clusterer : HDBSCAN\n A clustering object that has been fit to the data and\n either had ``prediction_data=True`` set, or called the\n ``generate_prediction_data`` method after the fact.\n\n points_to_predict : array, or array-like (n_samples, n_features)\n The new data points to predict cluster labels for. They should\n have the same dimensionality as the original dataset over which\n clusterer was fit.\n\n Returns\n -------\n labels : array (n_samples,)\n The predicted labels of the ``points_to_predict``\n\n probabilities : array (n_samples,)\n The soft cluster scores for each of the ``points_to_predict``\n\n See Also\n --------\n :py:func:`hdbscan.predict.membership_vector`\n :py:func:`hdbscan.predict.all_points_membership_vectors`\n\n " if (clusterer.prediction_data_ is None): raise ValueError('Clusterer does not have prediction data! Try fitting with prediction_data=True set, or run generate_prediction_data on the clusterer') points_to_predict = np.asarray(points_to_predict) if (points_to_predict.shape[1] != clusterer.prediction_data_.raw_data.shape[1]): raise ValueError('New points dimension does not match fit data!') if (clusterer.prediction_data_.cluster_tree.shape[0] == 0): warn('Clusterer does not have any defined clusters, new data will be automatically predicted as noise.') labels = ((- 1) * np.ones(points_to_predict.shape[0], dtype=np.int32)) probabilities = np.zeros(points_to_predict.shape[0], dtype=np.float32) return (labels, probabilities) labels = np.empty(points_to_predict.shape[0], dtype=np.int) probabilities = np.empty(points_to_predict.shape[0], dtype=np.float64) min_samples = (clusterer.min_samples or clusterer.min_cluster_size) (neighbor_distances, neighbor_indices) = clusterer.prediction_data_.tree.query(points_to_predict, k=(2 * min_samples)) for i in range(points_to_predict.shape[0]): (label, prob) = _find_cluster_and_probability(clusterer.condensed_tree_, clusterer.prediction_data_.cluster_tree, neighbor_indices[i], neighbor_distances[i], clusterer.prediction_data_.core_distances, clusterer.prediction_data_.cluster_map, clusterer.prediction_data_.max_lambdas, min_samples) labels[i] = label probabilities[i] = prob return (labels, probabilities)
def membership_vector(clusterer, points_to_predict): 'Predict soft cluster membership. The result produces a vector\n for each point in ``points_to_predict`` that gives a probability that\n the given point is a member of a cluster for each of the selected clusters\n of the ``clusterer``.\n\n Parameters\n ----------\n clusterer : HDBSCAN\n A clustering object that has been fit to the data and\n either had ``prediction_data=True`` set, or called the\n ``generate_prediction_data`` method after the fact.\n\n points_to_predict : array, or array-like (n_samples, n_features)\n The new data points to predict cluster labels for. They should\n have the same dimensionality as the original dataset over which\n clusterer was fit.\n\n Returns\n -------\n membership_vectors : array (n_samples, n_clusters)\n The probability that point ``i`` is a member of cluster ``j`` is\n in ``membership_vectors[i, j]``.\n\n See Also\n --------\n :py:func:`hdbscan.predict.predict`\n :py:func:`hdbscan.predict.all_points_membership_vectors`\n' clusters = np.array(sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp) result = np.empty((points_to_predict.shape[0], clusters.shape[0]), dtype=np.float64) min_samples = (clusterer.min_samples or clusterer.min_cluster_size) (neighbor_distances, neighbor_indices) = clusterer.prediction_data_.tree.query(points_to_predict, k=(2 * min_samples)) for i in range(points_to_predict.shape[0]): (nearest_neighbor, lambda_) = _find_neighbor_and_lambda(neighbor_indices[i], neighbor_distances[i], clusterer.prediction_data_.core_distances, min_samples) neighbor_tree_row = get_tree_row_with_child(clusterer.condensed_tree_._raw_tree, nearest_neighbor) if (neighbor_tree_row['lambda_val'] <= lambda_): lambda_ = neighbor_tree_row['lambda_val'] distance_vec = dist_membership_vector(points_to_predict[i], clusterer.prediction_data_.exemplars, clusterer.prediction_data_.dist_metric) outlier_vec = outlier_membership_vector(nearest_neighbor, lambda_, clusters, clusterer.condensed_tree_._raw_tree, clusterer.prediction_data_.leaf_max_lambdas, clusterer.prediction_data_.cluster_tree) result[i] = ((distance_vec ** 0.5) * (outlier_vec ** 2.0)) result[i] /= result[i].sum() result[i] *= prob_in_some_cluster(nearest_neighbor, lambda_, clusters, clusterer.condensed_tree_._raw_tree, clusterer.prediction_data_.leaf_max_lambdas, clusterer.prediction_data_.cluster_tree) return result
1,341,451,579,952,617,700
Predict soft cluster membership. The result produces a vector for each point in ``points_to_predict`` that gives a probability that the given point is a member of a cluster for each of the selected clusters of the ``clusterer``. Parameters ---------- clusterer : HDBSCAN A clustering object that has been fit to the data and either had ``prediction_data=True`` set, or called the ``generate_prediction_data`` method after the fact. points_to_predict : array, or array-like (n_samples, n_features) The new data points to predict cluster labels for. They should have the same dimensionality as the original dataset over which clusterer was fit. Returns ------- membership_vectors : array (n_samples, n_clusters) The probability that point ``i`` is a member of cluster ``j`` is in ``membership_vectors[i, j]``. See Also -------- :py:func:`hdbscan.predict.predict` :py:func:`hdbscan.predict.all_points_membership_vectors`
hdbscan/prediction.py
membership_vector
CKrawczyk/hdbscan
python
def membership_vector(clusterer, points_to_predict): 'Predict soft cluster membership. The result produces a vector\n for each point in ``points_to_predict`` that gives a probability that\n the given point is a member of a cluster for each of the selected clusters\n of the ``clusterer``.\n\n Parameters\n ----------\n clusterer : HDBSCAN\n A clustering object that has been fit to the data and\n either had ``prediction_data=True`` set, or called the\n ``generate_prediction_data`` method after the fact.\n\n points_to_predict : array, or array-like (n_samples, n_features)\n The new data points to predict cluster labels for. They should\n have the same dimensionality as the original dataset over which\n clusterer was fit.\n\n Returns\n -------\n membership_vectors : array (n_samples, n_clusters)\n The probability that point ``i`` is a member of cluster ``j`` is\n in ``membership_vectors[i, j]``.\n\n See Also\n --------\n :py:func:`hdbscan.predict.predict`\n :py:func:`hdbscan.predict.all_points_membership_vectors`\n' clusters = np.array(sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp) result = np.empty((points_to_predict.shape[0], clusters.shape[0]), dtype=np.float64) min_samples = (clusterer.min_samples or clusterer.min_cluster_size) (neighbor_distances, neighbor_indices) = clusterer.prediction_data_.tree.query(points_to_predict, k=(2 * min_samples)) for i in range(points_to_predict.shape[0]): (nearest_neighbor, lambda_) = _find_neighbor_and_lambda(neighbor_indices[i], neighbor_distances[i], clusterer.prediction_data_.core_distances, min_samples) neighbor_tree_row = get_tree_row_with_child(clusterer.condensed_tree_._raw_tree, nearest_neighbor) if (neighbor_tree_row['lambda_val'] <= lambda_): lambda_ = neighbor_tree_row['lambda_val'] distance_vec = dist_membership_vector(points_to_predict[i], clusterer.prediction_data_.exemplars, clusterer.prediction_data_.dist_metric) outlier_vec = outlier_membership_vector(nearest_neighbor, lambda_, clusters, clusterer.condensed_tree_._raw_tree, clusterer.prediction_data_.leaf_max_lambdas, clusterer.prediction_data_.cluster_tree) result[i] = ((distance_vec ** 0.5) * (outlier_vec ** 2.0)) result[i] /= result[i].sum() result[i] *= prob_in_some_cluster(nearest_neighbor, lambda_, clusters, clusterer.condensed_tree_._raw_tree, clusterer.prediction_data_.leaf_max_lambdas, clusterer.prediction_data_.cluster_tree) return result
def all_points_membership_vectors(clusterer): "Predict soft cluster membership vectors for all points in the\n original dataset the clusterer was trained on. This function is more\n efficient by making use of the fact that all points are already in the\n condensed tree, and processing in bulk.\n\n Parameters\n ----------\n clusterer : HDBSCAN\n A clustering object that has been fit to the data and\n either had ``prediction_data=True`` set, or called the\n ``generate_prediction_data`` method after the fact.\n This method does not work if the clusterer was trained\n with ``metric='precomputed'``.\n\n Returns\n -------\n membership_vectors : array (n_samples, n_clusters)\n The probability that point ``i`` of the original dataset is a member of\n cluster ``j`` is in ``membership_vectors[i, j]``.\n\n See Also\n --------\n :py:func:`hdbscan.predict.predict`\n :py:func:`hdbscan.predict.all_points_membership_vectors`\n " clusters = np.array(sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp) all_points = clusterer.prediction_data_.raw_data if (clusters.size == 0): return np.zeros(all_points.shape[0]) distance_vecs = all_points_dist_membership_vector(all_points, clusterer.prediction_data_.exemplars, clusterer.prediction_data_.dist_metric) outlier_vecs = all_points_outlier_membership_vector(clusters, clusterer.condensed_tree_._raw_tree, clusterer.prediction_data_.leaf_max_lambdas, clusterer.prediction_data_.cluster_tree) in_cluster_probs = all_points_prob_in_some_cluster(clusters, clusterer.condensed_tree_._raw_tree, clusterer.prediction_data_.leaf_max_lambdas, clusterer.prediction_data_.cluster_tree) result = (distance_vecs * outlier_vecs) row_sums = result.sum(axis=1) result = (result / row_sums[:, np.newaxis]) result *= in_cluster_probs[:, np.newaxis] return result
8,111,365,759,064,287,000
Predict soft cluster membership vectors for all points in the original dataset the clusterer was trained on. This function is more efficient by making use of the fact that all points are already in the condensed tree, and processing in bulk. Parameters ---------- clusterer : HDBSCAN A clustering object that has been fit to the data and either had ``prediction_data=True`` set, or called the ``generate_prediction_data`` method after the fact. This method does not work if the clusterer was trained with ``metric='precomputed'``. Returns ------- membership_vectors : array (n_samples, n_clusters) The probability that point ``i`` of the original dataset is a member of cluster ``j`` is in ``membership_vectors[i, j]``. See Also -------- :py:func:`hdbscan.predict.predict` :py:func:`hdbscan.predict.all_points_membership_vectors`
hdbscan/prediction.py
all_points_membership_vectors
CKrawczyk/hdbscan
python
def all_points_membership_vectors(clusterer): "Predict soft cluster membership vectors for all points in the\n original dataset the clusterer was trained on. This function is more\n efficient by making use of the fact that all points are already in the\n condensed tree, and processing in bulk.\n\n Parameters\n ----------\n clusterer : HDBSCAN\n A clustering object that has been fit to the data and\n either had ``prediction_data=True`` set, or called the\n ``generate_prediction_data`` method after the fact.\n This method does not work if the clusterer was trained\n with ``metric='precomputed'``.\n\n Returns\n -------\n membership_vectors : array (n_samples, n_clusters)\n The probability that point ``i`` of the original dataset is a member of\n cluster ``j`` is in ``membership_vectors[i, j]``.\n\n See Also\n --------\n :py:func:`hdbscan.predict.predict`\n :py:func:`hdbscan.predict.all_points_membership_vectors`\n " clusters = np.array(sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp) all_points = clusterer.prediction_data_.raw_data if (clusters.size == 0): return np.zeros(all_points.shape[0]) distance_vecs = all_points_dist_membership_vector(all_points, clusterer.prediction_data_.exemplars, clusterer.prediction_data_.dist_metric) outlier_vecs = all_points_outlier_membership_vector(clusters, clusterer.condensed_tree_._raw_tree, clusterer.prediction_data_.leaf_max_lambdas, clusterer.prediction_data_.cluster_tree) in_cluster_probs = all_points_prob_in_some_cluster(clusters, clusterer.condensed_tree_._raw_tree, clusterer.prediction_data_.leaf_max_lambdas, clusterer.prediction_data_.cluster_tree) result = (distance_vecs * outlier_vecs) row_sums = result.sum(axis=1) result = (result / row_sums[:, np.newaxis]) result *= in_cluster_probs[:, np.newaxis] return result
def index(request): '\n View for the static index page\n ' return render(request, 'public/home.html', _get_context('Home'))
5,071,290,183,332,569,000
View for the static index page
sigmapiweb/apps/PubSite/views.py
index
Jacobvs/sigmapi-web
python
def index(request): '\n \n ' return render(request, 'public/home.html', _get_context('Home'))
def about(request): '\n View for the static chapter history page.\n ' return render(request, 'public/about.html', _get_context('About'))
3,343,995,519,148,531,000
View for the static chapter history page.
sigmapiweb/apps/PubSite/views.py
about
Jacobvs/sigmapi-web
python
def about(request): '\n \n ' return render(request, 'public/about.html', _get_context('About'))
def activities(request): '\n View for the static chapter service page.\n ' return render(request, 'public/activities.html', _get_context('Service & Activities'))
5,797,297,894,108,019,000
View for the static chapter service page.
sigmapiweb/apps/PubSite/views.py
activities
Jacobvs/sigmapi-web
python
def activities(request): '\n \n ' return render(request, 'public/activities.html', _get_context('Service & Activities'))
def rush(request): '\n View for the static chapter service page.\n ' return render(request, 'public/rush.html', _get_context('Rush'))
2,282,172,684,523,212,000
View for the static chapter service page.
sigmapiweb/apps/PubSite/views.py
rush
Jacobvs/sigmapi-web
python
def rush(request): '\n \n ' return render(request, 'public/rush.html', _get_context('Rush'))
def campaign(request): '\n View for the campaign service page.\n ' class NoRebuildAuthSession(requests.Session): def rebuild_auth(self, prepared_request, response): '\n No code here means requests will always preserve the Authorization\n header when redirected.\n Be careful not to leak your credentials to untrusted hosts!\n ' url = 'https://api.givebutter.com/v1/transactions/' headers = {'Authorization': f'Bearer {settings.GIVEBUTTER_API_KEY}'} response = None session = NoRebuildAuthSession() try: r = session.get(url, headers=headers, timeout=0.75) if (r.status_code == 200): response = r.json() else: logger.error(f'ERROR in request: {r.status_code}') except requests.exceptions.Timeout: logger.warning('Connection to GiveButter API Timed out') except requests.ConnectionError: logger.warning('Connection to GiveButter API could not be resolved') except requests.exceptions.RequestException: logger.error('An unknown issue occurred while trying to retrieve GiveButter Donor List') ctx = _get_context('Campaign') if (response and ('data' in response)): response = response['data'] logger.debug(f'GiveButter API Response: {response}') successful_txs = [tx for tx in response if (tx['status'] == 'succeeded')] sorted_txs = sorted(successful_txs, key=(lambda tx: tx['amount']), reverse=True) transactions = [{'name': tx['giving_space']['name'], 'amount': tx['giving_space']['amount'], 'message': tx['giving_space']['message']} for tx in sorted_txs[:20]] ctx['transactions'] = transactions ctx['num_txs'] = len(successful_txs) return render(request, 'public/campaign.html', ctx)
8,055,564,660,194,777,000
View for the campaign service page.
sigmapiweb/apps/PubSite/views.py
campaign
Jacobvs/sigmapi-web
python
def campaign(request): '\n \n ' class NoRebuildAuthSession(requests.Session): def rebuild_auth(self, prepared_request, response): '\n No code here means requests will always preserve the Authorization\n header when redirected.\n Be careful not to leak your credentials to untrusted hosts!\n ' url = 'https://api.givebutter.com/v1/transactions/' headers = {'Authorization': f'Bearer {settings.GIVEBUTTER_API_KEY}'} response = None session = NoRebuildAuthSession() try: r = session.get(url, headers=headers, timeout=0.75) if (r.status_code == 200): response = r.json() else: logger.error(f'ERROR in request: {r.status_code}') except requests.exceptions.Timeout: logger.warning('Connection to GiveButter API Timed out') except requests.ConnectionError: logger.warning('Connection to GiveButter API could not be resolved') except requests.exceptions.RequestException: logger.error('An unknown issue occurred while trying to retrieve GiveButter Donor List') ctx = _get_context('Campaign') if (response and ('data' in response)): response = response['data'] logger.debug(f'GiveButter API Response: {response}') successful_txs = [tx for tx in response if (tx['status'] == 'succeeded')] sorted_txs = sorted(successful_txs, key=(lambda tx: tx['amount']), reverse=True) transactions = [{'name': tx['giving_space']['name'], 'amount': tx['giving_space']['amount'], 'message': tx['giving_space']['message']} for tx in sorted_txs[:20]] ctx['transactions'] = transactions ctx['num_txs'] = len(successful_txs) return render(request, 'public/campaign.html', ctx)
def permission_denied(request): '\n View for 403 (Permission Denied) error.\n ' return render(request, 'common/403.html', _get_context('Permission Denied'))
-3,855,063,181,969,753,600
View for 403 (Permission Denied) error.
sigmapiweb/apps/PubSite/views.py
permission_denied
Jacobvs/sigmapi-web
python
def permission_denied(request): '\n \n ' return render(request, 'common/403.html', _get_context('Permission Denied'))
def rebuild_auth(self, prepared_request, response): '\n No code here means requests will always preserve the Authorization\n header when redirected.\n Be careful not to leak your credentials to untrusted hosts!\n '
-5,803,225,964,835,695,000
No code here means requests will always preserve the Authorization header when redirected. Be careful not to leak your credentials to untrusted hosts!
sigmapiweb/apps/PubSite/views.py
rebuild_auth
Jacobvs/sigmapi-web
python
def rebuild_auth(self, prepared_request, response): '\n No code here means requests will always preserve the Authorization\n header when redirected.\n Be careful not to leak your credentials to untrusted hosts!\n '
def normalize(self, text: str) -> str: 'Normalize text.\n \n Args:\n text (str): text to be normalized\n ' for (normalize_fn, repl) in self._normalize: text = normalize_fn(text, repl) return text
6,344,072,354,142,538,000
Normalize text. Args: text (str): text to be normalized
prenlp/data/normalizer.py
normalize
awesome-archive/prenlp
python
def normalize(self, text: str) -> str: 'Normalize text.\n \n Args:\n text (str): text to be normalized\n ' for (normalize_fn, repl) in self._normalize: text = normalize_fn(text, repl) return text
def _init_normalize(self) -> None: "Initialize normalize function.\n If 'repl' is None, normalization is not applied to the pattern corresponding to 'repl'.\n " if (self.url_repl is not None): self._normalize.append((self._url_normalize, self.url_repl)) if (self.tag_repl is not None): self._normalize.append((self._tag_normalize, self.tag_repl)) if (self.emoji_repl is not None): self._normalize.append((self._emoji_normalize, self.emoji_repl)) if (self.email_repl is not None): self._normalize.append((self._email_normalize, self.email_repl)) if (self.tel_repl is not None): self._normalize.append((self._tel_normalize, self.tel_repl))
9,143,328,064,171,123,000
Initialize normalize function. If 'repl' is None, normalization is not applied to the pattern corresponding to 'repl'.
prenlp/data/normalizer.py
_init_normalize
awesome-archive/prenlp
python
def _init_normalize(self) -> None: "Initialize normalize function.\n If 'repl' is None, normalization is not applied to the pattern corresponding to 'repl'.\n " if (self.url_repl is not None): self._normalize.append((self._url_normalize, self.url_repl)) if (self.tag_repl is not None): self._normalize.append((self._tag_normalize, self.tag_repl)) if (self.emoji_repl is not None): self._normalize.append((self._emoji_normalize, self.emoji_repl)) if (self.email_repl is not None): self._normalize.append((self._email_normalize, self.email_repl)) if (self.tel_repl is not None): self._normalize.append((self._tel_normalize, self.tel_repl))