body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def get_api_key(): ' Get secret api key from a file on filesystem ' paren_dir = os.path.dirname(os.path.realpath(__file__)) api_path = os.path.join(paren_dir, 'weather_api.txt') with open(api_path, 'r') as file: api_key = file.read().replace('\n', '') return api_key
239,451,994,047,830,600
Get secret api key from a file on filesystem
.config/polybar/weather/weather.py
get_api_key
NearHuscarl/dotfiles
python
def get_api_key(): ' ' paren_dir = os.path.dirname(os.path.realpath(__file__)) api_path = os.path.join(paren_dir, 'weather_api.txt') with open(api_path, 'r') as file: api_key = file.read().replace('\n', ) return api_key
def get_city_id(): ' Workaround to get city id based on my schedule ' region_code = {'TPHCM': 1580578, 'TPHCM2': 1566083, 'Hai Duong': 1581326, 'Tan An': 1567069} hour = int(datetime.datetime.now().strftime('%H')) weekday = datetime.datetime.now().strftime('%a') if (((hour >= 17) and (weekday == 'Fri')) or (weekday == 'Sat') or ((hour < 17) and (weekday == 'Sun'))): return region_code['Tan An'] return region_code['Hai Duong']
-7,807,401,822,778,362,000
Workaround to get city id based on my schedule
.config/polybar/weather/weather.py
get_city_id
NearHuscarl/dotfiles
python
def get_city_id(): ' ' region_code = {'TPHCM': 1580578, 'TPHCM2': 1566083, 'Hai Duong': 1581326, 'Tan An': 1567069} hour = int(datetime.datetime.now().strftime('%H')) weekday = datetime.datetime.now().strftime('%a') if (((hour >= 17) and (weekday == 'Fri')) or (weekday == 'Sat') or ((hour < 17) and (weekday == 'Sun'))): return region_code['Tan An'] return region_code['Hai Duong']
def update_weather(city_id, units, api_key): ' Update weather by using openweather api ' url = 'http://api.openweathermap.org/data/2.5/weather?id={}&appid={}&units={}' temp_unit = ('C' if (units == 'metric') else 'K') error_icon = color_polybar('\ue2c1', 'red') try: req = requests.get(url.format(city_id, api_key, units)) try: description = req.json()['weather'][0]['description'].capitalize() except ValueError: print(error_icon, flush=True) raise MyInternetIsShitty temp_value = round(req.json()['main']['temp']) temp = ((str(temp_value) + '°') + temp_unit) thermo_icon = color_polybar(get_thermo_icon(temp_value, units), 'main') weather_id = req.json()['weather'][0]['id'] weather_icon = color_polybar(get_weather_icon(weather_id), 'main') print('{} {} {} {}'.format(weather_icon, description, thermo_icon, temp), flush=True) except (HTTPError, Timeout, ConnectionError): print(error_icon, flush=True) raise MyInternetIsShitty
6,975,599,857,060,252,000
Update weather by using openweather api
.config/polybar/weather/weather.py
update_weather
NearHuscarl/dotfiles
python
def update_weather(city_id, units, api_key): ' ' url = 'http://api.openweathermap.org/data/2.5/weather?id={}&appid={}&units={}' temp_unit = ('C' if (units == 'metric') else 'K') error_icon = color_polybar('\ue2c1', 'red') try: req = requests.get(url.format(city_id, api_key, units)) try: description = req.json()['weather'][0]['description'].capitalize() except ValueError: print(error_icon, flush=True) raise MyInternetIsShitty temp_value = round(req.json()['main']['temp']) temp = ((str(temp_value) + '°') + temp_unit) thermo_icon = color_polybar(get_thermo_icon(temp_value, units), 'main') weather_id = req.json()['weather'][0]['id'] weather_icon = color_polybar(get_weather_icon(weather_id), 'main') print('{} {} {} {}'.format(weather_icon, description, thermo_icon, temp), flush=True) except (HTTPError, Timeout, ConnectionError): print(error_icon, flush=True) raise MyInternetIsShitty
def main(): ' main function ' arg = get_args() if (arg.log == 'debug'): set_up_logging() units = arg.unit api_key = get_api_key() city_id = get_city_id() while True: try: update_weather(city_id, units, api_key) except MyInternetIsShitty: logging.info(cb('update failed: ', 'red')) time.sleep(3) else: logging.info(cb('update success', 'green')) time.sleep(700)
-4,733,848,066,255,199,000
main function
.config/polybar/weather/weather.py
main
NearHuscarl/dotfiles
python
def main(): ' ' arg = get_args() if (arg.log == 'debug'): set_up_logging() units = arg.unit api_key = get_api_key() city_id = get_city_id() while True: try: update_weather(city_id, units, api_key) except MyInternetIsShitty: logging.info(cb('update failed: ', 'red')) time.sleep(3) else: logging.info(cb('update success', 'green')) time.sleep(700)
def run_server(server_port): 'Run the UDP pinger server\n ' with Socket(socket.AF_INET, socket.SOCK_DGRAM) as server_socket: server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_socket.bind(('', server_port)) print('Ping server ready on port', server_port) while True: (_, client_address) = server_socket.recvfrom(1024) server_socket.sendto(''.encode(), client_address) return 0
6,592,167,236,038,547,000
Run the UDP pinger server
ping/ping.py
run_server
akshayrb22/Kurose-and-Ross-socket-programming-exercises
python
def run_server(server_port): '\n ' with Socket(socket.AF_INET, socket.SOCK_DGRAM) as server_socket: server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_socket.bind((, server_port)) print('Ping server ready on port', server_port) while True: (_, client_address) = server_socket.recvfrom(1024) server_socket.sendto(.encode(), client_address) return 0
def run_client(server_address, server_port): 'Ping a UDP pinger server running at the given address\n ' raise NotImplementedError return 0
-7,235,053,255,711,612,000
Ping a UDP pinger server running at the given address
ping/ping.py
run_client
akshayrb22/Kurose-and-Ross-socket-programming-exercises
python
def run_client(server_address, server_port): '\n ' raise NotImplementedError return 0
def _calculate_reciprocal_rank(self, hypothesis_ids: np.ndarray, reference_id: int) -> float: 'Calculate the reciprocal rank for a given hypothesis and reference\n \n Params:\n hypothesis_ids: Iterator of hypothesis ids (as numpy array) ordered by its relevance\n reference_id: Reference id (as a integer) of the correct id of response\n Returns:\n reciprocal rank\n ' hypothesis_ids = np.asarray(hypothesis_ids) try: rank = (np.where((hypothesis_ids == reference_id))[0][0] + 1) except IndexError: rank = (self.max_rank + 1) if (rank > self.max_rank): reciprocal_rank = 0.0 else: reciprocal_rank = (1.0 / rank) return reciprocal_rank
-8,653,979,882,358,909,000
Calculate the reciprocal rank for a given hypothesis and reference Params: hypothesis_ids: Iterator of hypothesis ids (as numpy array) ordered by its relevance reference_id: Reference id (as a integer) of the correct id of response Returns: reciprocal rank
tasks/retriever/mrr.py
_calculate_reciprocal_rank
platiagro/tasks
python
def _calculate_reciprocal_rank(self, hypothesis_ids: np.ndarray, reference_id: int) -> float: 'Calculate the reciprocal rank for a given hypothesis and reference\n \n Params:\n hypothesis_ids: Iterator of hypothesis ids (as numpy array) ordered by its relevance\n reference_id: Reference id (as a integer) of the correct id of response\n Returns:\n reciprocal rank\n ' hypothesis_ids = np.asarray(hypothesis_ids) try: rank = (np.where((hypothesis_ids == reference_id))[0][0] + 1) except IndexError: rank = (self.max_rank + 1) if (rank > self.max_rank): reciprocal_rank = 0.0 else: reciprocal_rank = (1.0 / rank) return reciprocal_rank
def forward(self, batch_hypothesis_ids: List[np.ndarray], batch_reference_id: List[int]) -> float: 'Score the mean reciprocal rank for the batch\n \n Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank\n \n >>> batch_hypothesis_ids = [[1, 0, 2], [0, 2, 1], [1, 0, 2]]\n >>> batch_reference_id = [2, 2, 1]\n >>> mrr = MRR()\n >>> mrr(batch_hypothesis_ids, batch_reference_id)\n 0.61111111111111105\n\n Args:\n batch_hypothesis_ids: Batch of hypothesis ids (as numpy array) ordered by its relevance\n reference_id: Batch of reference id (as a integer) of the correct id of response\n Returns:\n Mean reciprocal rank (MRR)\n ' assert (len(batch_hypothesis_ids) == len(batch_reference_id)), 'Hypothesis batch and reference batch must have same length.' batch_size = len(batch_hypothesis_ids) mrr = 0 for (hypothesis_ids, reference_id) in zip(batch_hypothesis_ids, batch_reference_id): reciprocal_rank = self._calculate_reciprocal_rank(hypothesis_ids, reference_id) mrr += (reciprocal_rank / batch_size) return mrr
-4,246,488,428,252,922,400
Score the mean reciprocal rank for the batch Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank >>> batch_hypothesis_ids = [[1, 0, 2], [0, 2, 1], [1, 0, 2]] >>> batch_reference_id = [2, 2, 1] >>> mrr = MRR() >>> mrr(batch_hypothesis_ids, batch_reference_id) 0.61111111111111105 Args: batch_hypothesis_ids: Batch of hypothesis ids (as numpy array) ordered by its relevance reference_id: Batch of reference id (as a integer) of the correct id of response Returns: Mean reciprocal rank (MRR)
tasks/retriever/mrr.py
forward
platiagro/tasks
python
def forward(self, batch_hypothesis_ids: List[np.ndarray], batch_reference_id: List[int]) -> float: 'Score the mean reciprocal rank for the batch\n \n Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank\n \n >>> batch_hypothesis_ids = [[1, 0, 2], [0, 2, 1], [1, 0, 2]]\n >>> batch_reference_id = [2, 2, 1]\n >>> mrr = MRR()\n >>> mrr(batch_hypothesis_ids, batch_reference_id)\n 0.61111111111111105\n\n Args:\n batch_hypothesis_ids: Batch of hypothesis ids (as numpy array) ordered by its relevance\n reference_id: Batch of reference id (as a integer) of the correct id of response\n Returns:\n Mean reciprocal rank (MRR)\n ' assert (len(batch_hypothesis_ids) == len(batch_reference_id)), 'Hypothesis batch and reference batch must have same length.' batch_size = len(batch_hypothesis_ids) mrr = 0 for (hypothesis_ids, reference_id) in zip(batch_hypothesis_ids, batch_reference_id): reciprocal_rank = self._calculate_reciprocal_rank(hypothesis_ids, reference_id) mrr += (reciprocal_rank / batch_size) return mrr
def get_oof_pred_proba(self, X, normalize=None, **kwargs): 'X should be the same X passed to `.fit`' y_oof_pred_proba = self._get_oof_pred_proba(X=X, **kwargs) if (normalize is None): normalize = self.normalize_pred_probas if normalize: y_oof_pred_proba = normalize_pred_probas(y_oof_pred_proba, self.problem_type) y_oof_pred_proba = y_oof_pred_proba.astype(np.float32) return y_oof_pred_proba
-8,436,748,996,598,062,000
X should be the same X passed to `.fit`
tabular/src/autogluon/tabular/models/knn/knn_model.py
get_oof_pred_proba
taesup-aws/autogluon
python
def get_oof_pred_proba(self, X, normalize=None, **kwargs): y_oof_pred_proba = self._get_oof_pred_proba(X=X, **kwargs) if (normalize is None): normalize = self.normalize_pred_probas if normalize: y_oof_pred_proba = normalize_pred_probas(y_oof_pred_proba, self.problem_type) y_oof_pred_proba = y_oof_pred_proba.astype(np.float32) return y_oof_pred_proba
def _fit_with_samples(self, X, y, time_limit, start_samples=10000, max_samples=None, sample_growth_factor=2, sample_time_growth_factor=8): '\n Fit model with samples of the data repeatedly, gradually increasing the amount of data until time_limit is reached or all data is used.\n\n X and y must already be preprocessed.\n\n Parameters\n ----------\n X : np.ndarray\n The training data features (preprocessed).\n y : Series\n The training data ground truth labels.\n time_limit : float, default = None\n Time limit in seconds to adhere to when fitting model.\n start_samples : int, default = 10000\n Number of samples to start with. This will be multiplied by sample_growth_factor after each model fit to determine the next number of samples.\n For example, if start_samples=10000, sample_growth_factor=2, then the number of samples per model fit would be [10000, 20000, 40000, 80000, ...]\n max_samples : int, default = None\n The maximum number of samples to use.\n If None or greater than the number of rows in X, then it is set equal to the number of rows in X.\n sample_growth_factor : float, default = 2\n The rate of growth in sample size between each model fit. If 2, then the sample size doubles after each fit.\n sample_time_growth_factor : float, default = 8\n The multiplier to the expected fit time of the next model. If `sample_time_growth_factor=8` and a model took 10 seconds to train, the next model fit will be expected to take 80 seconds.\n If an expected time is greater than the remaining time in `time_limit`, the model will not be trained and the method will return early.\n ' time_start = time.time() num_rows_samples = [] if (max_samples is None): num_rows_max = len(X) else: num_rows_max = min(len(X), max_samples) num_rows_cur = start_samples while True: num_rows_cur = min(num_rows_cur, num_rows_max) num_rows_samples.append(num_rows_cur) if (num_rows_cur == num_rows_max): break num_rows_cur *= sample_growth_factor num_rows_cur = math.ceil(num_rows_cur) if ((num_rows_cur * 1.5) >= num_rows_max): num_rows_cur = num_rows_max def sample_func(chunk, frac): n = max(math.ceil((len(chunk) * frac)), 1) return chunk.sample(n=n, replace=False, random_state=0) if (self.problem_type != REGRESSION): y_df = y.to_frame(name='label').reset_index(drop=True) else: y_df = None time_start_sample_loop = time.time() time_limit_left = (time_limit - (time_start_sample_loop - time_start)) model_type = self._get_model_type() idx = None for (i, samples) in enumerate(num_rows_samples): if (samples != num_rows_max): if (self.problem_type == REGRESSION): idx = np.random.choice(num_rows_max, size=samples, replace=False) else: idx = y_df.groupby('label', group_keys=False).apply(sample_func, frac=(samples / num_rows_max)).index X_samp = X[idx, :] y_samp = y.iloc[idx] else: X_samp = X y_samp = y idx = None self.model = model_type(**self._get_model_params()).fit(X_samp, y_samp) time_limit_left_prior = time_limit_left time_fit_end_sample = time.time() time_limit_left = (time_limit - (time_fit_end_sample - time_start)) time_fit_sample = (time_limit_left_prior - time_limit_left) time_required_for_next = (time_fit_sample * sample_time_growth_factor) logger.log(15, f' {round(time_fit_sample, 2)}s = Train Time (Using {samples}/{num_rows_max} rows) ({round(time_limit_left, 2)}s remaining time)') if ((time_required_for_next > time_limit_left) and (i != (len(num_rows_samples) - 1))): logger.log(20, f' Not enough time to train KNN model on all training rows. Fit {samples}/{num_rows_max} rows. (Training KNN model on {num_rows_samples[(i + 1)]} rows is expected to take {round(time_required_for_next, 2)}s)') break if (idx is not None): idx = set(idx) self._X_unused_index = [i for i in range(num_rows_max) if (i not in idx)] return self.model
7,872,693,222,056,237,000
Fit model with samples of the data repeatedly, gradually increasing the amount of data until time_limit is reached or all data is used. X and y must already be preprocessed. Parameters ---------- X : np.ndarray The training data features (preprocessed). y : Series The training data ground truth labels. time_limit : float, default = None Time limit in seconds to adhere to when fitting model. start_samples : int, default = 10000 Number of samples to start with. This will be multiplied by sample_growth_factor after each model fit to determine the next number of samples. For example, if start_samples=10000, sample_growth_factor=2, then the number of samples per model fit would be [10000, 20000, 40000, 80000, ...] max_samples : int, default = None The maximum number of samples to use. If None or greater than the number of rows in X, then it is set equal to the number of rows in X. sample_growth_factor : float, default = 2 The rate of growth in sample size between each model fit. If 2, then the sample size doubles after each fit. sample_time_growth_factor : float, default = 8 The multiplier to the expected fit time of the next model. If `sample_time_growth_factor=8` and a model took 10 seconds to train, the next model fit will be expected to take 80 seconds. If an expected time is greater than the remaining time in `time_limit`, the model will not be trained and the method will return early.
tabular/src/autogluon/tabular/models/knn/knn_model.py
_fit_with_samples
taesup-aws/autogluon
python
def _fit_with_samples(self, X, y, time_limit, start_samples=10000, max_samples=None, sample_growth_factor=2, sample_time_growth_factor=8): '\n Fit model with samples of the data repeatedly, gradually increasing the amount of data until time_limit is reached or all data is used.\n\n X and y must already be preprocessed.\n\n Parameters\n ----------\n X : np.ndarray\n The training data features (preprocessed).\n y : Series\n The training data ground truth labels.\n time_limit : float, default = None\n Time limit in seconds to adhere to when fitting model.\n start_samples : int, default = 10000\n Number of samples to start with. This will be multiplied by sample_growth_factor after each model fit to determine the next number of samples.\n For example, if start_samples=10000, sample_growth_factor=2, then the number of samples per model fit would be [10000, 20000, 40000, 80000, ...]\n max_samples : int, default = None\n The maximum number of samples to use.\n If None or greater than the number of rows in X, then it is set equal to the number of rows in X.\n sample_growth_factor : float, default = 2\n The rate of growth in sample size between each model fit. If 2, then the sample size doubles after each fit.\n sample_time_growth_factor : float, default = 8\n The multiplier to the expected fit time of the next model. If `sample_time_growth_factor=8` and a model took 10 seconds to train, the next model fit will be expected to take 80 seconds.\n If an expected time is greater than the remaining time in `time_limit`, the model will not be trained and the method will return early.\n ' time_start = time.time() num_rows_samples = [] if (max_samples is None): num_rows_max = len(X) else: num_rows_max = min(len(X), max_samples) num_rows_cur = start_samples while True: num_rows_cur = min(num_rows_cur, num_rows_max) num_rows_samples.append(num_rows_cur) if (num_rows_cur == num_rows_max): break num_rows_cur *= sample_growth_factor num_rows_cur = math.ceil(num_rows_cur) if ((num_rows_cur * 1.5) >= num_rows_max): num_rows_cur = num_rows_max def sample_func(chunk, frac): n = max(math.ceil((len(chunk) * frac)), 1) return chunk.sample(n=n, replace=False, random_state=0) if (self.problem_type != REGRESSION): y_df = y.to_frame(name='label').reset_index(drop=True) else: y_df = None time_start_sample_loop = time.time() time_limit_left = (time_limit - (time_start_sample_loop - time_start)) model_type = self._get_model_type() idx = None for (i, samples) in enumerate(num_rows_samples): if (samples != num_rows_max): if (self.problem_type == REGRESSION): idx = np.random.choice(num_rows_max, size=samples, replace=False) else: idx = y_df.groupby('label', group_keys=False).apply(sample_func, frac=(samples / num_rows_max)).index X_samp = X[idx, :] y_samp = y.iloc[idx] else: X_samp = X y_samp = y idx = None self.model = model_type(**self._get_model_params()).fit(X_samp, y_samp) time_limit_left_prior = time_limit_left time_fit_end_sample = time.time() time_limit_left = (time_limit - (time_fit_end_sample - time_start)) time_fit_sample = (time_limit_left_prior - time_limit_left) time_required_for_next = (time_fit_sample * sample_time_growth_factor) logger.log(15, f' {round(time_fit_sample, 2)}s = Train Time (Using {samples}/{num_rows_max} rows) ({round(time_limit_left, 2)}s remaining time)') if ((time_required_for_next > time_limit_left) and (i != (len(num_rows_samples) - 1))): logger.log(20, f' Not enough time to train KNN model on all training rows. Fit {samples}/{num_rows_max} rows. (Training KNN model on {num_rows_samples[(i + 1)]} rows is expected to take {round(time_required_for_next, 2)}s)') break if (idx is not None): idx = set(idx) self._X_unused_index = [i for i in range(num_rows_max) if (i not in idx)] return self.model
def naked_twins(values): "Eliminate values using the naked twins strategy.\n\n The naked twins strategy says that if you have two or more unallocated boxes\n in a unit and there are only two digits that can go in those two boxes, then\n those two digits can be eliminated from the possible assignments of all other\n boxes in the same unit.\n\n Parameters\n ----------\n values(dict)\n a dictionary of the form {'box_name': '123456789', ...}\n\n Returns\n -------\n dict\n The values dictionary with the naked twins eliminated from peers\n\n Notes\n -----\n Your solution can either process all pairs of naked twins from the input once,\n or it can continue processing pairs of naked twins until there are no such\n pairs remaining -- the project assistant test suite will accept either\n convention. However, it will not accept code that does not process all pairs\n of naked twins from the original input. (For example, if you start processing\n pairs of twins and eliminate another pair of twins before the second pair\n is processed then your code will fail the PA test suite.)\n\n The first convention is preferred for consistency with the other strategies,\n and because it is simpler (since the reduce_puzzle function already calls this\n strategy repeatedly).\n\n See Also\n --------\n Pseudocode for this algorithm on github:\n https://github.com/udacity/artificial-intelligence/blob/master/Projects/1_Sudoku/pseudocode.md\n " "\n out = values.copy()\n len_2_boxes = [box for box in values if len(values[box]) == 2]\n for boxA in len_2_boxes:\n boxAPeers = peers[boxA]\n for boxB in boxAPeers:\n if values[boxA] == values[boxB]:\n intersect = [val for val in boxAPeers if val in peers[boxB]]\n for peer in intersect:\n out[peer] = out[peer].replace(values[boxA], '')\n return out\n " out = values.copy() for boxA in values: boxAPeers = peers[boxA] for boxB in boxAPeers: if ((values[boxA] == values[boxB]) and (len(values[boxA]) == 2)): intersect = [val for val in boxAPeers if (val in peers[boxB])] for peer in intersect: for digit in values[boxA]: out[peer] = out[peer].replace(digit, '') return out
-1,691,231,728,503,389,700
Eliminate values using the naked twins strategy. The naked twins strategy says that if you have two or more unallocated boxes in a unit and there are only two digits that can go in those two boxes, then those two digits can be eliminated from the possible assignments of all other boxes in the same unit. Parameters ---------- values(dict) a dictionary of the form {'box_name': '123456789', ...} Returns ------- dict The values dictionary with the naked twins eliminated from peers Notes ----- Your solution can either process all pairs of naked twins from the input once, or it can continue processing pairs of naked twins until there are no such pairs remaining -- the project assistant test suite will accept either convention. However, it will not accept code that does not process all pairs of naked twins from the original input. (For example, if you start processing pairs of twins and eliminate another pair of twins before the second pair is processed then your code will fail the PA test suite.) The first convention is preferred for consistency with the other strategies, and because it is simpler (since the reduce_puzzle function already calls this strategy repeatedly). See Also -------- Pseudocode for this algorithm on github: https://github.com/udacity/artificial-intelligence/blob/master/Projects/1_Sudoku/pseudocode.md
Projects/1_Sudoku/solution.py
naked_twins
justinlnx/artificial-intelligence
python
def naked_twins(values): "Eliminate values using the naked twins strategy.\n\n The naked twins strategy says that if you have two or more unallocated boxes\n in a unit and there are only two digits that can go in those two boxes, then\n those two digits can be eliminated from the possible assignments of all other\n boxes in the same unit.\n\n Parameters\n ----------\n values(dict)\n a dictionary of the form {'box_name': '123456789', ...}\n\n Returns\n -------\n dict\n The values dictionary with the naked twins eliminated from peers\n\n Notes\n -----\n Your solution can either process all pairs of naked twins from the input once,\n or it can continue processing pairs of naked twins until there are no such\n pairs remaining -- the project assistant test suite will accept either\n convention. However, it will not accept code that does not process all pairs\n of naked twins from the original input. (For example, if you start processing\n pairs of twins and eliminate another pair of twins before the second pair\n is processed then your code will fail the PA test suite.)\n\n The first convention is preferred for consistency with the other strategies,\n and because it is simpler (since the reduce_puzzle function already calls this\n strategy repeatedly).\n\n See Also\n --------\n Pseudocode for this algorithm on github:\n https://github.com/udacity/artificial-intelligence/blob/master/Projects/1_Sudoku/pseudocode.md\n " "\n out = values.copy()\n len_2_boxes = [box for box in values if len(values[box]) == 2]\n for boxA in len_2_boxes:\n boxAPeers = peers[boxA]\n for boxB in boxAPeers:\n if values[boxA] == values[boxB]:\n intersect = [val for val in boxAPeers if val in peers[boxB]]\n for peer in intersect:\n out[peer] = out[peer].replace(values[boxA], )\n return out\n " out = values.copy() for boxA in values: boxAPeers = peers[boxA] for boxB in boxAPeers: if ((values[boxA] == values[boxB]) and (len(values[boxA]) == 2)): intersect = [val for val in boxAPeers if (val in peers[boxB])] for peer in intersect: for digit in values[boxA]: out[peer] = out[peer].replace(digit, ) return out
def eliminate(values): "Apply the eliminate strategy to a Sudoku puzzle\n\n The eliminate strategy says that if a box has a value assigned, then none\n of the peers of that box can have the same value.\n\n Parameters\n ----------\n values(dict)\n a dictionary of the form {'box_name': '123456789', ...}\n\n Returns\n -------\n dict\n The values dictionary with the assigned values eliminated from peers\n " solved_values = [box for box in values.keys() if (len(values[box]) == 1)] for box in solved_values: digit = values[box] for peer in peers[box]: values[peer] = values[peer].replace(digit, '') return values
1,745,120,404,089,232,000
Apply the eliminate strategy to a Sudoku puzzle The eliminate strategy says that if a box has a value assigned, then none of the peers of that box can have the same value. Parameters ---------- values(dict) a dictionary of the form {'box_name': '123456789', ...} Returns ------- dict The values dictionary with the assigned values eliminated from peers
Projects/1_Sudoku/solution.py
eliminate
justinlnx/artificial-intelligence
python
def eliminate(values): "Apply the eliminate strategy to a Sudoku puzzle\n\n The eliminate strategy says that if a box has a value assigned, then none\n of the peers of that box can have the same value.\n\n Parameters\n ----------\n values(dict)\n a dictionary of the form {'box_name': '123456789', ...}\n\n Returns\n -------\n dict\n The values dictionary with the assigned values eliminated from peers\n " solved_values = [box for box in values.keys() if (len(values[box]) == 1)] for box in solved_values: digit = values[box] for peer in peers[box]: values[peer] = values[peer].replace(digit, ) return values
def only_choice(values): "Apply the only choice strategy to a Sudoku puzzle\n\n The only choice strategy says that if only one box in a unit allows a certain\n digit, then that box must be assigned that digit.\n\n Parameters\n ----------\n values(dict)\n a dictionary of the form {'box_name': '123456789', ...}\n\n Returns\n -------\n dict\n The values dictionary with all single-valued boxes assigned\n\n Notes\n -----\n You should be able to complete this function by copying your code from the classroom\n " for unit in unitlist: for digit in '123456789': dplaces = [box for box in unit if (digit in values[box])] if (len(dplaces) == 1): values[dplaces[0]] = digit return values
-4,383,931,250,168,897,500
Apply the only choice strategy to a Sudoku puzzle The only choice strategy says that if only one box in a unit allows a certain digit, then that box must be assigned that digit. Parameters ---------- values(dict) a dictionary of the form {'box_name': '123456789', ...} Returns ------- dict The values dictionary with all single-valued boxes assigned Notes ----- You should be able to complete this function by copying your code from the classroom
Projects/1_Sudoku/solution.py
only_choice
justinlnx/artificial-intelligence
python
def only_choice(values): "Apply the only choice strategy to a Sudoku puzzle\n\n The only choice strategy says that if only one box in a unit allows a certain\n digit, then that box must be assigned that digit.\n\n Parameters\n ----------\n values(dict)\n a dictionary of the form {'box_name': '123456789', ...}\n\n Returns\n -------\n dict\n The values dictionary with all single-valued boxes assigned\n\n Notes\n -----\n You should be able to complete this function by copying your code from the classroom\n " for unit in unitlist: for digit in '123456789': dplaces = [box for box in unit if (digit in values[box])] if (len(dplaces) == 1): values[dplaces[0]] = digit return values
def reduce_puzzle(values): "Reduce a Sudoku puzzle by repeatedly applying all constraint strategies\n\n Parameters\n ----------\n values(dict)\n a dictionary of the form {'box_name': '123456789', ...}\n\n Returns\n -------\n dict or False\n The values dictionary after continued application of the constraint strategies\n no longer produces any changes, or False if the puzzle is unsolvable \n " solved_values = [box for box in values.keys() if (len(values[box]) == 1)] stalled = False while (not stalled): solved_values_before = len([box for box in values.keys() if (len(values[box]) == 1)]) values = eliminate(values) values = only_choice(values) values = naked_twins(values) solved_values_after = len([box for box in values.keys() if (len(values[box]) == 1)]) stalled = (solved_values_before == solved_values_after) if len([box for box in values.keys() if (len(values[box]) == 0)]): return False return values
-3,851,804,040,853,470,000
Reduce a Sudoku puzzle by repeatedly applying all constraint strategies Parameters ---------- values(dict) a dictionary of the form {'box_name': '123456789', ...} Returns ------- dict or False The values dictionary after continued application of the constraint strategies no longer produces any changes, or False if the puzzle is unsolvable
Projects/1_Sudoku/solution.py
reduce_puzzle
justinlnx/artificial-intelligence
python
def reduce_puzzle(values): "Reduce a Sudoku puzzle by repeatedly applying all constraint strategies\n\n Parameters\n ----------\n values(dict)\n a dictionary of the form {'box_name': '123456789', ...}\n\n Returns\n -------\n dict or False\n The values dictionary after continued application of the constraint strategies\n no longer produces any changes, or False if the puzzle is unsolvable \n " solved_values = [box for box in values.keys() if (len(values[box]) == 1)] stalled = False while (not stalled): solved_values_before = len([box for box in values.keys() if (len(values[box]) == 1)]) values = eliminate(values) values = only_choice(values) values = naked_twins(values) solved_values_after = len([box for box in values.keys() if (len(values[box]) == 1)]) stalled = (solved_values_before == solved_values_after) if len([box for box in values.keys() if (len(values[box]) == 0)]): return False return values
def search(values): "Apply depth first search to solve Sudoku puzzles in order to solve puzzles\n that cannot be solved by repeated reduction alone.\n\n Parameters\n ----------\n values(dict)\n a dictionary of the form {'box_name': '123456789', ...}\n\n Returns\n -------\n dict or False\n The values dictionary with all boxes assigned or False\n\n Notes\n -----\n You should be able to complete this function by copying your code from the classroom\n and extending it to call the naked twins strategy.\n " 'Using depth-first search and propagation, try all possible values.' values = reduce_puzzle(values) if (values is False): return False if all(((len(values[s]) == 1) for s in boxes)): return values (n, s) = min(((len(values[s]), s) for s in boxes if (len(values[s]) > 1))) for value in values[s]: new_sudoku = values.copy() new_sudoku[s] = value attempt = search(new_sudoku) if attempt: return attempt
-5,391,375,916,073,540,000
Apply depth first search to solve Sudoku puzzles in order to solve puzzles that cannot be solved by repeated reduction alone. Parameters ---------- values(dict) a dictionary of the form {'box_name': '123456789', ...} Returns ------- dict or False The values dictionary with all boxes assigned or False Notes ----- You should be able to complete this function by copying your code from the classroom and extending it to call the naked twins strategy.
Projects/1_Sudoku/solution.py
search
justinlnx/artificial-intelligence
python
def search(values): "Apply depth first search to solve Sudoku puzzles in order to solve puzzles\n that cannot be solved by repeated reduction alone.\n\n Parameters\n ----------\n values(dict)\n a dictionary of the form {'box_name': '123456789', ...}\n\n Returns\n -------\n dict or False\n The values dictionary with all boxes assigned or False\n\n Notes\n -----\n You should be able to complete this function by copying your code from the classroom\n and extending it to call the naked twins strategy.\n " 'Using depth-first search and propagation, try all possible values.' values = reduce_puzzle(values) if (values is False): return False if all(((len(values[s]) == 1) for s in boxes)): return values (n, s) = min(((len(values[s]), s) for s in boxes if (len(values[s]) > 1))) for value in values[s]: new_sudoku = values.copy() new_sudoku[s] = value attempt = search(new_sudoku) if attempt: return attempt
def solve(grid): "Find the solution to a Sudoku puzzle using search and constraint propagation\n\n Parameters\n ----------\n grid(string)\n a string representing a sudoku grid.\n \n Ex. '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\n\n Returns\n -------\n dict or False\n The dictionary representation of the final sudoku grid or False if no solution exists.\n " values = grid2values(grid) values = search(values) return values
7,617,055,493,705,177,000
Find the solution to a Sudoku puzzle using search and constraint propagation Parameters ---------- grid(string) a string representing a sudoku grid. Ex. '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3' Returns ------- dict or False The dictionary representation of the final sudoku grid or False if no solution exists.
Projects/1_Sudoku/solution.py
solve
justinlnx/artificial-intelligence
python
def solve(grid): "Find the solution to a Sudoku puzzle using search and constraint propagation\n\n Parameters\n ----------\n grid(string)\n a string representing a sudoku grid.\n \n Ex. '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\n\n Returns\n -------\n dict or False\n The dictionary representation of the final sudoku grid or False if no solution exists.\n " values = grid2values(grid) values = search(values) return values
def basic_argument_parser(distributed=True, requires_config_file=True, requires_output_dir=True): ' Basic cli tool parser for Detectron2Go binaries ' parser = argparse.ArgumentParser(description='PyTorch Object Detection Training') parser.add_argument('--runner', type=str, default='d2go.runner.GeneralizedRCNNRunner', help='Full class name, i.e. (package.)module.class') parser.add_argument('--config-file', help='path to config file', default='', required=requires_config_file, metavar='FILE') parser.add_argument('--output-dir', help='When given, this will override the OUTPUT_DIR in the config-file', required=requires_output_dir, default=None, type=str) parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER) if distributed: parser.add_argument('--num-processes', type=int, default=1, help='number of gpus per machine') parser.add_argument('--num-machines', type=int, default=1) parser.add_argument('--machine-rank', type=int, default=0, help='the rank of this machine (unique per machine)') parser.add_argument('--dist-url', default='file:///tmp/d2go_dist_file_{}'.format(time.time())) parser.add_argument('--dist-backend', type=str, default='NCCL') if (not requires_config_file): parser.add_argument('--datasets', type=str, nargs='+', required=True, help='cfg.DATASETS.TEST') parser.add_argument('--min_size', type=int, required=True, help='cfg.INPUT.MIN_SIZE_TEST') parser.add_argument('--max_size', type=int, required=True, help='cfg.INPUT.MAX_SIZE_TEST') return parser return parser
-3,745,655,481,647,895,000
Basic cli tool parser for Detectron2Go binaries
d2go/setup.py
basic_argument_parser
Dinesh101041/d2go
python
def basic_argument_parser(distributed=True, requires_config_file=True, requires_output_dir=True): ' ' parser = argparse.ArgumentParser(description='PyTorch Object Detection Training') parser.add_argument('--runner', type=str, default='d2go.runner.GeneralizedRCNNRunner', help='Full class name, i.e. (package.)module.class') parser.add_argument('--config-file', help='path to config file', default=, required=requires_config_file, metavar='FILE') parser.add_argument('--output-dir', help='When given, this will override the OUTPUT_DIR in the config-file', required=requires_output_dir, default=None, type=str) parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER) if distributed: parser.add_argument('--num-processes', type=int, default=1, help='number of gpus per machine') parser.add_argument('--num-machines', type=int, default=1) parser.add_argument('--machine-rank', type=int, default=0, help='the rank of this machine (unique per machine)') parser.add_argument('--dist-url', default='file:///tmp/d2go_dist_file_{}'.format(time.time())) parser.add_argument('--dist-backend', type=str, default='NCCL') if (not requires_config_file): parser.add_argument('--datasets', type=str, nargs='+', required=True, help='cfg.DATASETS.TEST') parser.add_argument('--min_size', type=int, required=True, help='cfg.INPUT.MIN_SIZE_TEST') parser.add_argument('--max_size', type=int, required=True, help='cfg.INPUT.MAX_SIZE_TEST') return parser return parser
def create_cfg_from_cli_args(args, default_cfg): "\n Instead of loading from defaults.py, this binary only includes necessary\n configs building from scratch, and overrides them from args. There're two\n levels of config:\n _C: the config system used by this binary, which is a sub-set of training\n config, override by configurable_cfg. It can also be override by\n args.opts for convinience.\n configurable_cfg: common configs that user should explicitly specify\n in the args.\n " _C = CN() _C.INPUT = default_cfg.INPUT _C.DATASETS = default_cfg.DATASETS _C.DATALOADER = default_cfg.DATALOADER _C.TEST = default_cfg.TEST if hasattr(default_cfg, 'D2GO_DATA'): _C.D2GO_DATA = default_cfg.D2GO_DATA if hasattr(default_cfg, 'TENSORBOARD'): _C.TENSORBOARD = default_cfg.TENSORBOARD _C.MODEL = CN() _C.MODEL.META_ARCHITECTURE = default_cfg.MODEL.META_ARCHITECTURE _C.MODEL.MASK_ON = default_cfg.MODEL.MASK_ON _C.MODEL.KEYPOINT_ON = default_cfg.MODEL.KEYPOINT_ON _C.MODEL.LOAD_PROPOSALS = default_cfg.MODEL.LOAD_PROPOSALS assert (_C.MODEL.LOAD_PROPOSALS is False), "caffe2 model doesn't support" _C.OUTPUT_DIR = args.output_dir configurable_cfg = ['DATASETS.TEST', args.datasets, 'INPUT.MIN_SIZE_TEST', args.min_size, 'INPUT.MAX_SIZE_TEST', args.max_size] cfg = _C.clone() cfg.merge_from_list(configurable_cfg) cfg.merge_from_list(args.opts) return cfg
1,567,503,064,963,738,400
Instead of loading from defaults.py, this binary only includes necessary configs building from scratch, and overrides them from args. There're two levels of config: _C: the config system used by this binary, which is a sub-set of training config, override by configurable_cfg. It can also be override by args.opts for convinience. configurable_cfg: common configs that user should explicitly specify in the args.
d2go/setup.py
create_cfg_from_cli_args
Dinesh101041/d2go
python
def create_cfg_from_cli_args(args, default_cfg): "\n Instead of loading from defaults.py, this binary only includes necessary\n configs building from scratch, and overrides them from args. There're two\n levels of config:\n _C: the config system used by this binary, which is a sub-set of training\n config, override by configurable_cfg. It can also be override by\n args.opts for convinience.\n configurable_cfg: common configs that user should explicitly specify\n in the args.\n " _C = CN() _C.INPUT = default_cfg.INPUT _C.DATASETS = default_cfg.DATASETS _C.DATALOADER = default_cfg.DATALOADER _C.TEST = default_cfg.TEST if hasattr(default_cfg, 'D2GO_DATA'): _C.D2GO_DATA = default_cfg.D2GO_DATA if hasattr(default_cfg, 'TENSORBOARD'): _C.TENSORBOARD = default_cfg.TENSORBOARD _C.MODEL = CN() _C.MODEL.META_ARCHITECTURE = default_cfg.MODEL.META_ARCHITECTURE _C.MODEL.MASK_ON = default_cfg.MODEL.MASK_ON _C.MODEL.KEYPOINT_ON = default_cfg.MODEL.KEYPOINT_ON _C.MODEL.LOAD_PROPOSALS = default_cfg.MODEL.LOAD_PROPOSALS assert (_C.MODEL.LOAD_PROPOSALS is False), "caffe2 model doesn't support" _C.OUTPUT_DIR = args.output_dir configurable_cfg = ['DATASETS.TEST', args.datasets, 'INPUT.MIN_SIZE_TEST', args.min_size, 'INPUT.MAX_SIZE_TEST', args.max_size] cfg = _C.clone() cfg.merge_from_list(configurable_cfg) cfg.merge_from_list(args.opts) return cfg
def prepare_for_launch(args): '\n Load config, figure out working directory, create runner.\n - when args.config_file is empty, returned cfg will be the default one\n - returned output_dir will always be non empty, args.output_dir has higher\n priority than cfg.OUTPUT_DIR.\n ' print(args) runner = create_runner(args.runner) cfg = runner.get_default_cfg() if args.config_file: with PathManager.open(reroute_config_path(args.config_file), 'r') as f: print('Loaded config file {}:\n{}'.format(args.config_file, f.read())) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) else: cfg = create_cfg_from_cli_args(args, default_cfg=cfg) cfg.freeze() assert (args.output_dir or args.config_file) output_dir = (args.output_dir or cfg.OUTPUT_DIR) return (cfg, output_dir, runner)
8,141,107,573,497,229,000
Load config, figure out working directory, create runner. - when args.config_file is empty, returned cfg will be the default one - returned output_dir will always be non empty, args.output_dir has higher priority than cfg.OUTPUT_DIR.
d2go/setup.py
prepare_for_launch
Dinesh101041/d2go
python
def prepare_for_launch(args): '\n Load config, figure out working directory, create runner.\n - when args.config_file is empty, returned cfg will be the default one\n - returned output_dir will always be non empty, args.output_dir has higher\n priority than cfg.OUTPUT_DIR.\n ' print(args) runner = create_runner(args.runner) cfg = runner.get_default_cfg() if args.config_file: with PathManager.open(reroute_config_path(args.config_file), 'r') as f: print('Loaded config file {}:\n{}'.format(args.config_file, f.read())) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) else: cfg = create_cfg_from_cli_args(args, default_cfg=cfg) cfg.freeze() assert (args.output_dir or args.config_file) output_dir = (args.output_dir or cfg.OUTPUT_DIR) return (cfg, output_dir, runner)
def setup_after_launch(cfg, output_dir, runner): '\n Set things up after entering DDP, including\n - creating working directory\n - setting up logger\n - logging environment\n - initializing runner\n ' create_dir_on_global_main_process(output_dir) comm.synchronize() setup_loggers(output_dir) cfg.freeze() if (cfg.OUTPUT_DIR != output_dir): with temp_defrost(cfg): logger.warning('Override cfg.OUTPUT_DIR ({}) to be the same as output_dir {}'.format(cfg.OUTPUT_DIR, output_dir)) cfg.OUTPUT_DIR = output_dir logger.info('Initializing runner ...') runner = initialize_runner(runner, cfg) log_info(cfg, runner) dump_cfg(cfg, os.path.join(output_dir, 'config.yaml')) auto_scale_world_size(cfg, new_world_size=comm.get_world_size())
-8,754,067,670,595,316,000
Set things up after entering DDP, including - creating working directory - setting up logger - logging environment - initializing runner
d2go/setup.py
setup_after_launch
Dinesh101041/d2go
python
def setup_after_launch(cfg, output_dir, runner): '\n Set things up after entering DDP, including\n - creating working directory\n - setting up logger\n - logging environment\n - initializing runner\n ' create_dir_on_global_main_process(output_dir) comm.synchronize() setup_loggers(output_dir) cfg.freeze() if (cfg.OUTPUT_DIR != output_dir): with temp_defrost(cfg): logger.warning('Override cfg.OUTPUT_DIR ({}) to be the same as output_dir {}'.format(cfg.OUTPUT_DIR, output_dir)) cfg.OUTPUT_DIR = output_dir logger.info('Initializing runner ...') runner = initialize_runner(runner, cfg) log_info(cfg, runner) dump_cfg(cfg, os.path.join(output_dir, 'config.yaml')) auto_scale_world_size(cfg, new_world_size=comm.get_world_size())
def __init__(self, main_window, palette): "\n Creates a new window for user to input\n which regions to add to scene.\n\n Arguments:\n ----------\n\n main_window: reference to the App's main window\n palette: main_window's palette, used to style widgets\n " super().__init__() self.setWindowTitle('Add brain regions') self.ui() self.main_window = main_window self.setStyleSheet(update_css(style, palette))
2,832,295,261,314,470,000
Creates a new window for user to input which regions to add to scene. Arguments: ---------- main_window: reference to the App's main window palette: main_window's palette, used to style widgets
brainrender_gui/widgets/add_regions.py
__init__
brainglobe/bg-brainrender-gui
python
def __init__(self, main_window, palette): "\n Creates a new window for user to input\n which regions to add to scene.\n\n Arguments:\n ----------\n\n main_window: reference to the App's main window\n palette: main_window's palette, used to style widgets\n " super().__init__() self.setWindowTitle('Add brain regions') self.ui() self.main_window = main_window self.setStyleSheet(update_css(style, palette))
def ui(self): "\n Define UI's elements\n " self.setGeometry(self.left, self.top, self.width, self.height) layout = QVBoxLayout() label = QLabel(self) label.setObjectName('PopupLabel') label.setText(self.label_msg) self.textbox = QLineEdit(self) alpha_label = QLabel(self) alpha_label.setObjectName('PopupLabel') alpha_label.setText('Alpha') self.alpha_textbox = QLineEdit(self) self.alpha_textbox.setText(str(1.0)) color_label = QLabel(self) color_label.setObjectName('PopupLabel') color_label.setText('Color') self.color_textbox = QLineEdit(self) self.color_textbox.setText('atlas') self.button = QPushButton('Add regions', self) self.button.clicked.connect(self.on_click) self.button.setObjectName('RegionsButton') layout.addWidget(label) layout.addWidget(self.textbox) layout.addWidget(alpha_label) layout.addWidget(self.alpha_textbox) layout.addWidget(color_label) layout.addWidget(self.color_textbox) layout.addWidget(self.button) self.setLayout(layout) self.show()
-7,489,549,448,365,388,000
Define UI's elements
brainrender_gui/widgets/add_regions.py
ui
brainglobe/bg-brainrender-gui
python
def ui(self): "\n \n " self.setGeometry(self.left, self.top, self.width, self.height) layout = QVBoxLayout() label = QLabel(self) label.setObjectName('PopupLabel') label.setText(self.label_msg) self.textbox = QLineEdit(self) alpha_label = QLabel(self) alpha_label.setObjectName('PopupLabel') alpha_label.setText('Alpha') self.alpha_textbox = QLineEdit(self) self.alpha_textbox.setText(str(1.0)) color_label = QLabel(self) color_label.setObjectName('PopupLabel') color_label.setText('Color') self.color_textbox = QLineEdit(self) self.color_textbox.setText('atlas') self.button = QPushButton('Add regions', self) self.button.clicked.connect(self.on_click) self.button.setObjectName('RegionsButton') layout.addWidget(label) layout.addWidget(self.textbox) layout.addWidget(alpha_label) layout.addWidget(self.alpha_textbox) layout.addWidget(color_label) layout.addWidget(self.color_textbox) layout.addWidget(self.button) self.setLayout(layout) self.show()
def on_click(self): "\n On click or 'Enter' get the regions\n from the input and call the add_regions\n method of the main window\n " regions = self.textbox.text().split(' ') self.main_window.add_regions(regions, self.alpha_textbox.text(), self.color_textbox.text()) self.close()
-1,581,329,918,703,527,000
On click or 'Enter' get the regions from the input and call the add_regions method of the main window
brainrender_gui/widgets/add_regions.py
on_click
brainglobe/bg-brainrender-gui
python
def on_click(self): "\n On click or 'Enter' get the regions\n from the input and call the add_regions\n method of the main window\n " regions = self.textbox.text().split(' ') self.main_window.add_regions(regions, self.alpha_textbox.text(), self.color_textbox.text()) self.close()
def update_user_data(): 'Update user_data to enable or disable Telemetry.\n\n If employment data has been changed Telemetry might be switched on\n automatically. The opt-in decision is taken for the new employee. Non employees\n will have an option to enable data collection.\n ' is_employee_changed = user_data.set_user_data() if (not is_employee_changed): return if user_data.is_employee: logger.warning('Enabled collecting MozPhab usage data.\nSee https://moz-conduit.readthedocs.io/en/latest/mozphab-data-collection.html') config.telemetry_enabled = True else: opt_in = (prompt('Would you like to allow MozPhab to collect usage data?', ['Yes', 'No']) == 'Yes') if opt_in: config.telemetry_enabled = True else: logger.info('MozPhab usage data collection disabled.\nSee https://moz-conduit.readthedocs.io/en/latest/mozphab-data-collection.html') config.telemetry_enabled = False config.write()
4,639,693,185,802,704,000
Update user_data to enable or disable Telemetry. If employment data has been changed Telemetry might be switched on automatically. The opt-in decision is taken for the new employee. Non employees will have an option to enable data collection.
mozphab/telemetry.py
update_user_data
cgsheeh/review
python
def update_user_data(): 'Update user_data to enable or disable Telemetry.\n\n If employment data has been changed Telemetry might be switched on\n automatically. The opt-in decision is taken for the new employee. Non employees\n will have an option to enable data collection.\n ' is_employee_changed = user_data.set_user_data() if (not is_employee_changed): return if user_data.is_employee: logger.warning('Enabled collecting MozPhab usage data.\nSee https://moz-conduit.readthedocs.io/en/latest/mozphab-data-collection.html') config.telemetry_enabled = True else: opt_in = (prompt('Would you like to allow MozPhab to collect usage data?', ['Yes', 'No']) == 'Yes') if opt_in: config.telemetry_enabled = True else: logger.info('MozPhab usage data collection disabled.\nSee https://moz-conduit.readthedocs.io/en/latest/mozphab-data-collection.html') config.telemetry_enabled = False config.write()
def __init__(self): 'Initiate Glean, load pings and metrics.' import glean logging.getLogger('glean').setLevel(logging.DEBUG) logger.debug('Initializing Glean...') glean.Glean.initialize(application_id='MozPhab', application_version=MOZPHAB_VERSION, upload_enabled=True, configuration=glean.Configuration(), data_dir=(Path(environment.MOZBUILD_PATH) / 'telemetry-data')) self._pings = glean.load_pings((environment.MOZPHAB_MAIN_DIR / 'pings.yaml')) self._metrics = glean.load_metrics((environment.MOZPHAB_MAIN_DIR / 'metrics.yaml'))
-7,231,570,573,987,132,000
Initiate Glean, load pings and metrics.
mozphab/telemetry.py
__init__
cgsheeh/review
python
def __init__(self): import glean logging.getLogger('glean').setLevel(logging.DEBUG) logger.debug('Initializing Glean...') glean.Glean.initialize(application_id='MozPhab', application_version=MOZPHAB_VERSION, upload_enabled=True, configuration=glean.Configuration(), data_dir=(Path(environment.MOZBUILD_PATH) / 'telemetry-data')) self._pings = glean.load_pings((environment.MOZPHAB_MAIN_DIR / 'pings.yaml')) self._metrics = glean.load_metrics((environment.MOZPHAB_MAIN_DIR / 'metrics.yaml'))
def _set_os(self): 'Collect human readable information about the OS version.\n\n For Linux it is setting a distribution name and version.\n ' (system, node, release, version, machine, processor) = platform.uname() if (system == 'Linux'): (distribution_name, distribution_number, _) = distro.linux_distribution(full_distribution_name=False) distribution_version = ' '.join([distribution_name, distribution_number]) elif (system == 'Windows'): (_release, distribution_version, _csd, _ptype) = platform.win32_ver() elif (system == 'Darwin'): (distribution_version, _versioninfo, _machine) = platform.mac_ver() else: distribution_version = release self.environment.distribution_version.set(distribution_version)
-6,868,257,696,406,971,000
Collect human readable information about the OS version. For Linux it is setting a distribution name and version.
mozphab/telemetry.py
_set_os
cgsheeh/review
python
def _set_os(self): 'Collect human readable information about the OS version.\n\n For Linux it is setting a distribution name and version.\n ' (system, node, release, version, machine, processor) = platform.uname() if (system == 'Linux'): (distribution_name, distribution_number, _) = distro.linux_distribution(full_distribution_name=False) distribution_version = ' '.join([distribution_name, distribution_number]) elif (system == 'Windows'): (_release, distribution_version, _csd, _ptype) = platform.win32_ver() elif (system == 'Darwin'): (distribution_version, _versioninfo, _machine) = platform.mac_ver() else: distribution_version = release self.environment.distribution_version.set(distribution_version)
def set_metrics(self, args): 'Sets metrics common to all commands.' self.usage.command.set(args.command) self._set_os() self._set_python() self.usage.override_switch.set((getattr(args, 'force_vcs', False) or getattr(args, 'force', False))) self.usage.command_time.start() self.user.installation.set(user_data.installation_id) self.user.id.set(user_data.user_code)
-1,575,089,079,134,722,300
Sets metrics common to all commands.
mozphab/telemetry.py
set_metrics
cgsheeh/review
python
def set_metrics(self, args): self.usage.command.set(args.command) self._set_os() self._set_python() self.usage.override_switch.set((getattr(args, 'force_vcs', False) or getattr(args, 'force', False))) self.usage.command_time.start() self.user.installation.set(user_data.installation_id) self.user.id.set(user_data.user_code)
def binary_image_to_lut_indices(x): '\n Convert a binary image to an index image that can be used with a lookup table\n to perform morphological operations. Non-zero elements in the image are interpreted\n as 1, zero elements as 0\n\n :param x: a 2D NumPy array.\n :return: a 2D NumPy array, same shape as x\n ' if (x.ndim != 2): raise ValueError('x should have 2 dimensions, not {}'.format(x.ndim)) if (x.dtype != np.bool): x = (x != 0) x = np.pad(x, [(1, 1), (1, 1)], mode='constant') lut_indices = (((((((((x[:(- 2), :(- 2)] * NEIGH_MASK_NORTH_WEST) + (x[:(- 2), 1:(- 1)] * NEIGH_MASK_NORTH)) + (x[:(- 2), 2:] * NEIGH_MASK_NORTH_EAST)) + (x[1:(- 1), :(- 2)] * NEIGH_MASK_WEST)) + (x[1:(- 1), 1:(- 1)] * NEIGH_MASK_CENTRE)) + (x[1:(- 1), 2:] * NEIGH_MASK_EAST)) + (x[2:, :(- 2)] * NEIGH_MASK_SOUTH_WEST)) + (x[2:, 1:(- 1)] * NEIGH_MASK_SOUTH)) + (x[2:, 2:] * NEIGH_MASK_SOUTH_EAST)) return lut_indices.astype(np.int32)
-7,441,921,039,338,985,000
Convert a binary image to an index image that can be used with a lookup table to perform morphological operations. Non-zero elements in the image are interpreted as 1, zero elements as 0 :param x: a 2D NumPy array. :return: a 2D NumPy array, same shape as x
Benchmarking/bsds500/bsds/thin.py
binary_image_to_lut_indices
CipiOrhei/eecvf
python
def binary_image_to_lut_indices(x): '\n Convert a binary image to an index image that can be used with a lookup table\n to perform morphological operations. Non-zero elements in the image are interpreted\n as 1, zero elements as 0\n\n :param x: a 2D NumPy array.\n :return: a 2D NumPy array, same shape as x\n ' if (x.ndim != 2): raise ValueError('x should have 2 dimensions, not {}'.format(x.ndim)) if (x.dtype != np.bool): x = (x != 0) x = np.pad(x, [(1, 1), (1, 1)], mode='constant') lut_indices = (((((((((x[:(- 2), :(- 2)] * NEIGH_MASK_NORTH_WEST) + (x[:(- 2), 1:(- 1)] * NEIGH_MASK_NORTH)) + (x[:(- 2), 2:] * NEIGH_MASK_NORTH_EAST)) + (x[1:(- 1), :(- 2)] * NEIGH_MASK_WEST)) + (x[1:(- 1), 1:(- 1)] * NEIGH_MASK_CENTRE)) + (x[1:(- 1), 2:] * NEIGH_MASK_EAST)) + (x[2:, :(- 2)] * NEIGH_MASK_SOUTH_WEST)) + (x[2:, 1:(- 1)] * NEIGH_MASK_SOUTH)) + (x[2:, 2:] * NEIGH_MASK_SOUTH_EAST)) return lut_indices.astype(np.int32)
def apply_lut(x, lut): '\n Perform a morphological operation on the binary image x using the supplied lookup table\n :param x:\n :param lut:\n :return:\n ' if (lut.ndim != 1): raise ValueError('lut should have 1 dimension, not {}'.format(lut.ndim)) if (lut.shape[0] != 512): raise ValueError('lut should have 512 entries, not {}'.format(lut.shape[0])) lut_indices = binary_image_to_lut_indices(x) return lut[lut_indices]
-4,490,145,918,969,152,000
Perform a morphological operation on the binary image x using the supplied lookup table :param x: :param lut: :return:
Benchmarking/bsds500/bsds/thin.py
apply_lut
CipiOrhei/eecvf
python
def apply_lut(x, lut): '\n Perform a morphological operation on the binary image x using the supplied lookup table\n :param x:\n :param lut:\n :return:\n ' if (lut.ndim != 1): raise ValueError('lut should have 1 dimension, not {}'.format(lut.ndim)) if (lut.shape[0] != 512): raise ValueError('lut should have 512 entries, not {}'.format(lut.shape[0])) lut_indices = binary_image_to_lut_indices(x) return lut[lut_indices]
def identity_lut(): '\n Create identity lookup tablef\n :return:\n ' lut = np.zeros((512,), dtype=bool) inds = np.arange(512) lut[((inds & NEIGH_MASK_CENTRE) != 0)] = True return lut
-3,448,551,723,326,318,600
Create identity lookup tablef :return:
Benchmarking/bsds500/bsds/thin.py
identity_lut
CipiOrhei/eecvf
python
def identity_lut(): '\n Create identity lookup tablef\n :return:\n ' lut = np.zeros((512,), dtype=bool) inds = np.arange(512) lut[((inds & NEIGH_MASK_CENTRE) != 0)] = True return lut
def _lut_mutate_mask(lut): '\n Get a mask that shows which neighbourhood shapes result in changes to the image\n :param lut: lookup table\n :return: mask indicating which lookup indices result in changes\n ' return (lut != identity_lut())
-1,491,527,051,737,313,000
Get a mask that shows which neighbourhood shapes result in changes to the image :param lut: lookup table :return: mask indicating which lookup indices result in changes
Benchmarking/bsds500/bsds/thin.py
_lut_mutate_mask
CipiOrhei/eecvf
python
def _lut_mutate_mask(lut): '\n Get a mask that shows which neighbourhood shapes result in changes to the image\n :param lut: lookup table\n :return: mask indicating which lookup indices result in changes\n ' return (lut != identity_lut())
def lut_masks_zero(neigh): '\n Create a LUT index mask for which the specified neighbour is 0\n :param neigh: neighbour index; counter-clockwise from 1 staring at the eastern neighbour\n :return: a LUT index mask\n ' if (neigh > 8): neigh -= 8 return ((_LUT_INDS & MASKS[neigh]) == 0)
7,111,937,062,312,660,000
Create a LUT index mask for which the specified neighbour is 0 :param neigh: neighbour index; counter-clockwise from 1 staring at the eastern neighbour :return: a LUT index mask
Benchmarking/bsds500/bsds/thin.py
lut_masks_zero
CipiOrhei/eecvf
python
def lut_masks_zero(neigh): '\n Create a LUT index mask for which the specified neighbour is 0\n :param neigh: neighbour index; counter-clockwise from 1 staring at the eastern neighbour\n :return: a LUT index mask\n ' if (neigh > 8): neigh -= 8 return ((_LUT_INDS & MASKS[neigh]) == 0)
def lut_masks_one(neigh): '\n Create a LUT index mask for which the specified neighbour is 1\n :param neigh: neighbour index; counter-clockwise from 1 staring at the eastern neighbour\n :return: a LUT index mask\n ' if (neigh > 8): neigh -= 8 return ((_LUT_INDS & MASKS[neigh]) != 0)
6,568,589,080,645,123,000
Create a LUT index mask for which the specified neighbour is 1 :param neigh: neighbour index; counter-clockwise from 1 staring at the eastern neighbour :return: a LUT index mask
Benchmarking/bsds500/bsds/thin.py
lut_masks_one
CipiOrhei/eecvf
python
def lut_masks_one(neigh): '\n Create a LUT index mask for which the specified neighbour is 1\n :param neigh: neighbour index; counter-clockwise from 1 staring at the eastern neighbour\n :return: a LUT index mask\n ' if (neigh > 8): neigh -= 8 return ((_LUT_INDS & MASKS[neigh]) != 0)
def _thin_cond_g1(): '\n Thinning morphological operation; condition G1\n :return: a LUT index mask\n ' b = np.zeros(512, dtype=int) for i in range(1, 5): b += (lut_masks_zero(((2 * i) - 1)) & (lut_masks_one((2 * i)) | lut_masks_one(((2 * i) + 1)))) return (b == 1)
7,932,152,981,081,950,000
Thinning morphological operation; condition G1 :return: a LUT index mask
Benchmarking/bsds500/bsds/thin.py
_thin_cond_g1
CipiOrhei/eecvf
python
def _thin_cond_g1(): '\n Thinning morphological operation; condition G1\n :return: a LUT index mask\n ' b = np.zeros(512, dtype=int) for i in range(1, 5): b += (lut_masks_zero(((2 * i) - 1)) & (lut_masks_one((2 * i)) | lut_masks_one(((2 * i) + 1)))) return (b == 1)
def _thin_cond_g2(): '\n Thinning morphological operation; condition G2\n :return: a LUT index mask\n ' n1 = np.zeros(512, dtype=int) n2 = np.zeros(512, dtype=int) for k in range(1, 5): n1 += (lut_masks_one(((2 * k) - 1)) | lut_masks_one((2 * k))) n2 += (lut_masks_one((2 * k)) | lut_masks_one(((2 * k) + 1))) m = np.minimum(n1, n2) return ((m >= 2) & (m <= 3))
5,711,260,385,655,939,000
Thinning morphological operation; condition G2 :return: a LUT index mask
Benchmarking/bsds500/bsds/thin.py
_thin_cond_g2
CipiOrhei/eecvf
python
def _thin_cond_g2(): '\n Thinning morphological operation; condition G2\n :return: a LUT index mask\n ' n1 = np.zeros(512, dtype=int) n2 = np.zeros(512, dtype=int) for k in range(1, 5): n1 += (lut_masks_one(((2 * k) - 1)) | lut_masks_one((2 * k))) n2 += (lut_masks_one((2 * k)) | lut_masks_one(((2 * k) + 1))) m = np.minimum(n1, n2) return ((m >= 2) & (m <= 3))
def _thin_cond_g3(): '\n Thinning morphological operation; condition G3\n :return: a LUT index mask\n ' return ((((lut_masks_one(2) | lut_masks_one(3)) | lut_masks_zero(8)) & lut_masks_one(1)) == 0)
-1,797,199,284,587,221,000
Thinning morphological operation; condition G3 :return: a LUT index mask
Benchmarking/bsds500/bsds/thin.py
_thin_cond_g3
CipiOrhei/eecvf
python
def _thin_cond_g3(): '\n Thinning morphological operation; condition G3\n :return: a LUT index mask\n ' return ((((lut_masks_one(2) | lut_masks_one(3)) | lut_masks_zero(8)) & lut_masks_one(1)) == 0)
def _thin_cond_g3_prime(): "\n Thinning morphological operation; condition G3'\n :return: a LUT index mask\n " return ((((lut_masks_one(6) | lut_masks_one(7)) | lut_masks_zero(4)) & lut_masks_one(5)) == 0)
7,209,364,479,417,253,000
Thinning morphological operation; condition G3' :return: a LUT index mask
Benchmarking/bsds500/bsds/thin.py
_thin_cond_g3_prime
CipiOrhei/eecvf
python
def _thin_cond_g3_prime(): "\n Thinning morphological operation; condition G3'\n :return: a LUT index mask\n " return ((((lut_masks_one(6) | lut_masks_one(7)) | lut_masks_zero(4)) & lut_masks_one(5)) == 0)
def _thin_iter_1_lut(): '\n Thinning morphological operation; lookup table for iteration 1\n :return: lookup table\n ' lut = identity_lut() cond = ((_thin_cond_g1() & _thin_cond_g2()) & _thin_cond_g3()) lut[cond] = False return lut
5,085,434,141,869,963,000
Thinning morphological operation; lookup table for iteration 1 :return: lookup table
Benchmarking/bsds500/bsds/thin.py
_thin_iter_1_lut
CipiOrhei/eecvf
python
def _thin_iter_1_lut(): '\n Thinning morphological operation; lookup table for iteration 1\n :return: lookup table\n ' lut = identity_lut() cond = ((_thin_cond_g1() & _thin_cond_g2()) & _thin_cond_g3()) lut[cond] = False return lut
def _thin_iter_2_lut(): '\n Thinning morphological operation; lookup table for iteration 2\n :return: lookup table\n ' lut = identity_lut() cond = ((_thin_cond_g1() & _thin_cond_g2()) & _thin_cond_g3_prime()) lut[cond] = False return lut
-103,154,475,881,035,140
Thinning morphological operation; lookup table for iteration 2 :return: lookup table
Benchmarking/bsds500/bsds/thin.py
_thin_iter_2_lut
CipiOrhei/eecvf
python
def _thin_iter_2_lut(): '\n Thinning morphological operation; lookup table for iteration 2\n :return: lookup table\n ' lut = identity_lut() cond = ((_thin_cond_g1() & _thin_cond_g2()) & _thin_cond_g3_prime()) lut[cond] = False return lut
def binary_thin(x, max_iter=None): '\n Binary thinning morphological operation\n\n :param x: a binary image, or an image that is to be converted to a binary image\n :param max_iter: maximum number of iterations; default is `None` that results in an infinite\n number of iterations (note that `binary_thin` will automatically terminate when no more changes occur)\n :return:\n ' thin1 = _thin_iter_1_lut() thin2 = _thin_iter_2_lut() thin1_mut = _lut_mutate_mask(thin1) thin2_mut = _lut_mutate_mask(thin2) iter_count = 0 while ((max_iter is None) or (iter_count < max_iter)): lut_indices = binary_image_to_lut_indices(x) x_mut = thin1_mut[lut_indices] if (x_mut.sum() == 0): break x = thin1[lut_indices] lut_indices = binary_image_to_lut_indices(x) x_mut = thin2_mut[lut_indices] if (x_mut.sum() == 0): break x = thin2[lut_indices] iter_count += 1 return x
3,673,415,387,885,628,400
Binary thinning morphological operation :param x: a binary image, or an image that is to be converted to a binary image :param max_iter: maximum number of iterations; default is `None` that results in an infinite number of iterations (note that `binary_thin` will automatically terminate when no more changes occur) :return:
Benchmarking/bsds500/bsds/thin.py
binary_thin
CipiOrhei/eecvf
python
def binary_thin(x, max_iter=None): '\n Binary thinning morphological operation\n\n :param x: a binary image, or an image that is to be converted to a binary image\n :param max_iter: maximum number of iterations; default is `None` that results in an infinite\n number of iterations (note that `binary_thin` will automatically terminate when no more changes occur)\n :return:\n ' thin1 = _thin_iter_1_lut() thin2 = _thin_iter_2_lut() thin1_mut = _lut_mutate_mask(thin1) thin2_mut = _lut_mutate_mask(thin2) iter_count = 0 while ((max_iter is None) or (iter_count < max_iter)): lut_indices = binary_image_to_lut_indices(x) x_mut = thin1_mut[lut_indices] if (x_mut.sum() == 0): break x = thin1[lut_indices] lut_indices = binary_image_to_lut_indices(x) x_mut = thin2_mut[lut_indices] if (x_mut.sum() == 0): break x = thin2[lut_indices] iter_count += 1 return x
def play(self): "\n # 1. Create a deck of 52 cards\n # 2. Shuffle the deck\n # 3. Ask the Player for their bet\n # 4. Make sure that the Player's bet does not exceed their available chips\n # 5. Deal two cards to the Dealer and two cards to the Player\n # 6. Show only one of the Dealer's cards, the other remains hidden\n # 7. Show both of the Player's cards\n # 8. Ask the Player if they wish to Hit, and take another card\n # 9. If the Player's hand doesn't Bust (go over 21), ask if they'd like to Hit again.\n # 10. If a Player Stands, play the Dealer's hand.\n # The dealer will always Hit until the Dealer's value meets or exceeds 17\n # 11. Determine the winner and adjust the Player's chips accordingly\n # 12. Ask the Player if they'd like to play again\n " print('--NEW GAME---') self.playing = True self.deck.shuffle() dealer_hand = Hand() player_hand = Hand() player_hand.add_card(self.deck.deal_card()) dealer_hand.add_card(self.deck.deal_card()) player_hand.add_card(self.deck.deal_card()) dealer_hand.add_card(self.deck.deal_card()) self.take_bet() self.show_some(player_hand, dealer_hand) while self.playing: self.hit_or_stand(player_hand) self.show_some(player_hand, dealer_hand) if (player_hand.value > 21): self.player_busts(player_hand, dealer_hand) break if (player_hand.value <= 21): while (dealer_hand.value < 17): self.hit(dealer_hand) self.show_all_cards(player_hand, dealer_hand) if (dealer_hand.value > 21): self.dealer_busts(player_hand, dealer_hand) elif (player_hand.value > dealer_hand.value): self.player_wins(player_hand, dealer_hand) elif (player_hand.value < dealer_hand.value): self.dealer_wins(player_hand, dealer_hand) else: self.push(player_hand, dealer_hand)
7,772,900,167,371,930,000
# 1. Create a deck of 52 cards # 2. Shuffle the deck # 3. Ask the Player for their bet # 4. Make sure that the Player's bet does not exceed their available chips # 5. Deal two cards to the Dealer and two cards to the Player # 6. Show only one of the Dealer's cards, the other remains hidden # 7. Show both of the Player's cards # 8. Ask the Player if they wish to Hit, and take another card # 9. If the Player's hand doesn't Bust (go over 21), ask if they'd like to Hit again. # 10. If a Player Stands, play the Dealer's hand. # The dealer will always Hit until the Dealer's value meets or exceeds 17 # 11. Determine the winner and adjust the Player's chips accordingly # 12. Ask the Player if they'd like to play again
BlackJack.py
play
tse4a/Python-Challenge
python
def play(self): "\n # 1. Create a deck of 52 cards\n # 2. Shuffle the deck\n # 3. Ask the Player for their bet\n # 4. Make sure that the Player's bet does not exceed their available chips\n # 5. Deal two cards to the Dealer and two cards to the Player\n # 6. Show only one of the Dealer's cards, the other remains hidden\n # 7. Show both of the Player's cards\n # 8. Ask the Player if they wish to Hit, and take another card\n # 9. If the Player's hand doesn't Bust (go over 21), ask if they'd like to Hit again.\n # 10. If a Player Stands, play the Dealer's hand.\n # The dealer will always Hit until the Dealer's value meets or exceeds 17\n # 11. Determine the winner and adjust the Player's chips accordingly\n # 12. Ask the Player if they'd like to play again\n " print('--NEW GAME---') self.playing = True self.deck.shuffle() dealer_hand = Hand() player_hand = Hand() player_hand.add_card(self.deck.deal_card()) dealer_hand.add_card(self.deck.deal_card()) player_hand.add_card(self.deck.deal_card()) dealer_hand.add_card(self.deck.deal_card()) self.take_bet() self.show_some(player_hand, dealer_hand) while self.playing: self.hit_or_stand(player_hand) self.show_some(player_hand, dealer_hand) if (player_hand.value > 21): self.player_busts(player_hand, dealer_hand) break if (player_hand.value <= 21): while (dealer_hand.value < 17): self.hit(dealer_hand) self.show_all_cards(player_hand, dealer_hand) if (dealer_hand.value > 21): self.dealer_busts(player_hand, dealer_hand) elif (player_hand.value > dealer_hand.value): self.player_wins(player_hand, dealer_hand) elif (player_hand.value < dealer_hand.value): self.dealer_wins(player_hand, dealer_hand) else: self.push(player_hand, dealer_hand)
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, account_name: Optional[pulumi.Input[str]]=None, active_directories: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]]]=None, location: Optional[pulumi.Input[str]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, __props__=None, __name__=None, __opts__=None): "\n NetApp account resource\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] account_name: The name of the NetApp account\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]] active_directories: Active Directories\n :param pulumi.Input[str] location: Resource location\n :param pulumi.Input[str] resource_group_name: The name of the resource group.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags\n " if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (opts is None): opts = pulumi.ResourceOptions() if (not isinstance(opts, pulumi.ResourceOptions)): raise TypeError('Expected resource options to be a ResourceOptions instance') if (opts.version is None): opts.version = _utilities.get_version() if (opts.id is None): if (__props__ is not None): raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['account_name'] = account_name __props__['active_directories'] = active_directories __props__['location'] = location if ((resource_group_name is None) and (not opts.urn)): raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['tags'] = tags __props__['name'] = None __props__['provisioning_state'] = None __props__['type'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_='azure-nextgen:netapp/v20200901:Account'), pulumi.Alias(type_='azure-native:netapp:Account'), pulumi.Alias(type_='azure-nextgen:netapp:Account'), pulumi.Alias(type_='azure-native:netapp/latest:Account'), pulumi.Alias(type_='azure-nextgen:netapp/latest:Account'), pulumi.Alias(type_='azure-native:netapp/v20170815:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20170815:Account'), pulumi.Alias(type_='azure-native:netapp/v20190501:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20190501:Account'), pulumi.Alias(type_='azure-native:netapp/v20190601:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20190601:Account'), pulumi.Alias(type_='azure-native:netapp/v20190701:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20190701:Account'), pulumi.Alias(type_='azure-native:netapp/v20190801:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20190801:Account'), pulumi.Alias(type_='azure-native:netapp/v20191001:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20191001:Account'), pulumi.Alias(type_='azure-native:netapp/v20191101:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20191101:Account'), pulumi.Alias(type_='azure-native:netapp/v20200201:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20200201:Account'), pulumi.Alias(type_='azure-native:netapp/v20200301:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20200301:Account'), pulumi.Alias(type_='azure-native:netapp/v20200501:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20200501:Account'), pulumi.Alias(type_='azure-native:netapp/v20200601:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20200601:Account'), pulumi.Alias(type_='azure-native:netapp/v20200701:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20200701:Account'), pulumi.Alias(type_='azure-native:netapp/v20200801:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20200801:Account'), pulumi.Alias(type_='azure-native:netapp/v20201101:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20201101:Account')]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(Account, __self__).__init__('azure-native:netapp/v20200901:Account', resource_name, __props__, opts)
-3,839,363,611,189,158,000
NetApp account resource :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] account_name: The name of the NetApp account :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]] active_directories: Active Directories :param pulumi.Input[str] location: Resource location :param pulumi.Input[str] resource_group_name: The name of the resource group. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
sdk/python/pulumi_azure_native/netapp/v20200901/account.py
__init__
pulumi-bot/pulumi-azure-native
python
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, account_name: Optional[pulumi.Input[str]]=None, active_directories: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]]]=None, location: Optional[pulumi.Input[str]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, __props__=None, __name__=None, __opts__=None): "\n NetApp account resource\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] account_name: The name of the NetApp account\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]] active_directories: Active Directories\n :param pulumi.Input[str] location: Resource location\n :param pulumi.Input[str] resource_group_name: The name of the resource group.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags\n " if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (opts is None): opts = pulumi.ResourceOptions() if (not isinstance(opts, pulumi.ResourceOptions)): raise TypeError('Expected resource options to be a ResourceOptions instance') if (opts.version is None): opts.version = _utilities.get_version() if (opts.id is None): if (__props__ is not None): raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['account_name'] = account_name __props__['active_directories'] = active_directories __props__['location'] = location if ((resource_group_name is None) and (not opts.urn)): raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['tags'] = tags __props__['name'] = None __props__['provisioning_state'] = None __props__['type'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_='azure-nextgen:netapp/v20200901:Account'), pulumi.Alias(type_='azure-native:netapp:Account'), pulumi.Alias(type_='azure-nextgen:netapp:Account'), pulumi.Alias(type_='azure-native:netapp/latest:Account'), pulumi.Alias(type_='azure-nextgen:netapp/latest:Account'), pulumi.Alias(type_='azure-native:netapp/v20170815:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20170815:Account'), pulumi.Alias(type_='azure-native:netapp/v20190501:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20190501:Account'), pulumi.Alias(type_='azure-native:netapp/v20190601:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20190601:Account'), pulumi.Alias(type_='azure-native:netapp/v20190701:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20190701:Account'), pulumi.Alias(type_='azure-native:netapp/v20190801:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20190801:Account'), pulumi.Alias(type_='azure-native:netapp/v20191001:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20191001:Account'), pulumi.Alias(type_='azure-native:netapp/v20191101:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20191101:Account'), pulumi.Alias(type_='azure-native:netapp/v20200201:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20200201:Account'), pulumi.Alias(type_='azure-native:netapp/v20200301:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20200301:Account'), pulumi.Alias(type_='azure-native:netapp/v20200501:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20200501:Account'), pulumi.Alias(type_='azure-native:netapp/v20200601:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20200601:Account'), pulumi.Alias(type_='azure-native:netapp/v20200701:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20200701:Account'), pulumi.Alias(type_='azure-native:netapp/v20200801:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20200801:Account'), pulumi.Alias(type_='azure-native:netapp/v20201101:Account'), pulumi.Alias(type_='azure-nextgen:netapp/v20201101:Account')]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(Account, __self__).__init__('azure-native:netapp/v20200901:Account', resource_name, __props__, opts)
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'Account': "\n Get an existing Account resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__['active_directories'] = None __props__['location'] = None __props__['name'] = None __props__['provisioning_state'] = None __props__['tags'] = None __props__['type'] = None return Account(resource_name, opts=opts, __props__=__props__)
329,630,109,003,327,500
Get an existing Account resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource.
sdk/python/pulumi_azure_native/netapp/v20200901/account.py
get
pulumi-bot/pulumi-azure-native
python
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'Account': "\n Get an existing Account resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__['active_directories'] = None __props__['location'] = None __props__['name'] = None __props__['provisioning_state'] = None __props__['tags'] = None __props__['type'] = None return Account(resource_name, opts=opts, __props__=__props__)
@property @pulumi.getter(name='activeDirectories') def active_directories(self) -> pulumi.Output[Optional[Sequence['outputs.ActiveDirectoryResponse']]]: '\n Active Directories\n ' return pulumi.get(self, 'active_directories')
6,275,772,879,752,033,000
Active Directories
sdk/python/pulumi_azure_native/netapp/v20200901/account.py
active_directories
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter(name='activeDirectories') def active_directories(self) -> pulumi.Output[Optional[Sequence['outputs.ActiveDirectoryResponse']]]: '\n \n ' return pulumi.get(self, 'active_directories')
@property @pulumi.getter def location(self) -> pulumi.Output[str]: '\n Resource location\n ' return pulumi.get(self, 'location')
2,974,713,878,710,662,000
Resource location
sdk/python/pulumi_azure_native/netapp/v20200901/account.py
location
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter def location(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'location')
@property @pulumi.getter def name(self) -> pulumi.Output[str]: '\n Resource name\n ' return pulumi.get(self, 'name')
387,709,723,693,576,260
Resource name
sdk/python/pulumi_azure_native/netapp/v20200901/account.py
name
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter def name(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'name')
@property @pulumi.getter(name='provisioningState') def provisioning_state(self) -> pulumi.Output[str]: '\n Azure lifecycle management\n ' return pulumi.get(self, 'provisioning_state')
5,814,604,552,307,744,000
Azure lifecycle management
sdk/python/pulumi_azure_native/netapp/v20200901/account.py
provisioning_state
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter(name='provisioningState') def provisioning_state(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'provisioning_state')
@property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]: '\n Resource tags\n ' return pulumi.get(self, 'tags')
-1,239,552,863,427,208,400
Resource tags
sdk/python/pulumi_azure_native/netapp/v20200901/account.py
tags
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]: '\n \n ' return pulumi.get(self, 'tags')
@property @pulumi.getter def type(self) -> pulumi.Output[str]: '\n Resource type\n ' return pulumi.get(self, 'type')
8,967,421,614,257,702,000
Resource type
sdk/python/pulumi_azure_native/netapp/v20200901/account.py
type
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter def type(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'type')
def parse_input(input, inflv, starttime, endtime): 'Read simulations data from input file.\n\n Arguments:\n input -- prefix of file containing neutrino fluxes\n inflv -- neutrino flavor to consider\n starttime -- start time set by user via command line option (or None)\n endtime -- end time set by user via command line option (or None)\n ' f = h5py.File(input, 'r') for (t, r) in f['sim_data']['shock_radius']: if (r > 1): tbounce = (t * 1000) break starttime = get_starttime(starttime, ((1000 * f['sim_data']['shock_radius'][0][0]) - tbounce)) endtime = get_endtime(endtime, ((1000 * f['sim_data']['shock_radius'][(- 1)][0]) - tbounce)) global flux flux = {} path = {'e': 'nue_data', 'eb': 'nuae_data', 'x': 'nux_data', 'xb': 'nux_data'}[inflv] for (i, (t, lum)) in enumerate(f[path]['lum']): t = ((1000 * t) - tbounce) if ((t < (starttime - 30)) or (t > (endtime + 30))): continue lum *= (1e+51 * 624.151) mean_e = f[path]['avg_energy'][i][1] mean_e_sq = (f[path]['rms_energy'][i][1] ** 2) flux[t] = (mean_e, mean_e_sq, lum) f.close() return (starttime, endtime, sorted(flux.keys()))
6,570,633,104,090,349,000
Read simulations data from input file. Arguments: input -- prefix of file containing neutrino fluxes inflv -- neutrino flavor to consider starttime -- start time set by user via command line option (or None) endtime -- end time set by user via command line option (or None)
sntools/formats/warren2020.py
parse_input
arfon/sntools
python
def parse_input(input, inflv, starttime, endtime): 'Read simulations data from input file.\n\n Arguments:\n input -- prefix of file containing neutrino fluxes\n inflv -- neutrino flavor to consider\n starttime -- start time set by user via command line option (or None)\n endtime -- end time set by user via command line option (or None)\n ' f = h5py.File(input, 'r') for (t, r) in f['sim_data']['shock_radius']: if (r > 1): tbounce = (t * 1000) break starttime = get_starttime(starttime, ((1000 * f['sim_data']['shock_radius'][0][0]) - tbounce)) endtime = get_endtime(endtime, ((1000 * f['sim_data']['shock_radius'][(- 1)][0]) - tbounce)) global flux flux = {} path = {'e': 'nue_data', 'eb': 'nuae_data', 'x': 'nux_data', 'xb': 'nux_data'}[inflv] for (i, (t, lum)) in enumerate(f[path]['lum']): t = ((1000 * t) - tbounce) if ((t < (starttime - 30)) or (t > (endtime + 30))): continue lum *= (1e+51 * 624.151) mean_e = f[path]['avg_energy'][i][1] mean_e_sq = (f[path]['rms_energy'][i][1] ** 2) flux[t] = (mean_e, mean_e_sq, lum) f.close() return (starttime, endtime, sorted(flux.keys()))
def testEzsignformfieldResponseCompound(self): 'Test EzsignformfieldResponseCompound' pass
-4,861,070,669,607,094,000
Test EzsignformfieldResponseCompound
test/test_ezsignformfield_response_compound.py
testEzsignformfieldResponseCompound
eZmaxinc/eZmax-SDK-python
python
def testEzsignformfieldResponseCompound(self): pass
@patch('regulations.apps.get_app_template_dirs') def test_precompute_custom_templates(self, get_app_template_dirs): 'Verify that custom templates are found' get_app_template_dirs.return_value = [self.tmpdir] open(os.path.join(self.tmpdir, '123-45-a.html'), 'w').close() open(os.path.join(self.tmpdir, 'other.html'), 'w').close() RegulationsConfig.precompute_custom_templates() self.assertEqual(RegulationsConfig.custom_tpls['123-45-a'], 'regulations/custom_nodes/123-45-a.html') self.assertEqual(RegulationsConfig.custom_tpls['other'], 'regulations/custom_nodes/other.html') self.assertFalse(('another' in RegulationsConfig.custom_tpls))
-4,249,644,129,594,510,300
Verify that custom templates are found
regulations/tests/apps_tests.py
test_precompute_custom_templates
CMSgov/cmcs-eregulations
python
@patch('regulations.apps.get_app_template_dirs') def test_precompute_custom_templates(self, get_app_template_dirs): get_app_template_dirs.return_value = [self.tmpdir] open(os.path.join(self.tmpdir, '123-45-a.html'), 'w').close() open(os.path.join(self.tmpdir, 'other.html'), 'w').close() RegulationsConfig.precompute_custom_templates() self.assertEqual(RegulationsConfig.custom_tpls['123-45-a'], 'regulations/custom_nodes/123-45-a.html') self.assertEqual(RegulationsConfig.custom_tpls['other'], 'regulations/custom_nodes/other.html') self.assertFalse(('another' in RegulationsConfig.custom_tpls))
def uvc_return_mapping(x_sol, data, tol=1e-08, maximum_iterations=1000): " Implements the time integration of the updated Voce-Chaboche material model.\n\n :param np.array x_sol: Updated Voce-Chaboche model parameters.\n :param pd.DataFrame data: stress-strain data.\n :param float tol: Local Newton tolerance.\n :param int maximum_iterations: maximum iterations in local Newton procedure, raises RuntimeError if exceeded.\n :return dict: History of: stress ('stress'), strain ('strain'), the total error ('error') calculated by the\n updated Voce-Chaboche model, number of iterations for convergence at each increment ('num_its').\n " if (len(x_sol) < 8): raise RuntimeError('No backstresses or using original V-C params.') n_param_per_back = 2 n_basic_param = 6 E = (x_sol[0] * 1.0) sy_0 = (x_sol[1] * 1.0) Q = (x_sol[2] * 1.0) b = (x_sol[3] * 1.0) D = (x_sol[4] * 1.0) a = (x_sol[5] * 1.0) n_backstresses = int(((len(x_sol) - n_basic_param) / n_param_per_back)) c_k = [] gamma_k = [] for i in range(0, n_backstresses): c_k.append(x_sol[(n_basic_param + (n_param_per_back * i))]) gamma_k.append(x_sol[((n_basic_param + 1) + (n_param_per_back * i))]) alpha_components = np.zeros(n_backstresses, dtype=object) strain = 0.0 stress = 0.0 ep_eq = 0.0 error = 0.0 sum_abs_de = 0.0 stress_sim = 0.0 stress_test = 0.0 area_test = 0.0 stress_track = [] strain_track = [] strain_inc_track = [] iteration_track = [] loading = np.diff(data['e_true']) for (increment_number, strain_inc) in enumerate(loading): strain += strain_inc alpha = np.sum(alpha_components) yield_stress = ((sy_0 + (Q * (1.0 - np.exp(((- b) * ep_eq))))) - (D * (1.0 - np.exp(((- a) * ep_eq))))) trial_stress = (stress + (E * strain_inc)) relative_stress = (trial_stress - alpha) flow_dir = np.sign(relative_stress) yield_condition = (np.abs(relative_stress) - yield_stress) if (yield_condition > tol): is_converged = False else: is_converged = True stress_sim_1 = (stress_sim * 1.0) stress_test_1 = (stress_test * 1.0) ep_eq_init = ep_eq alpha_init = alpha consist_param = 0.0 number_of_iterations = 0 while ((is_converged is False) and (number_of_iterations < maximum_iterations)): number_of_iterations += 1 yield_stress = ((sy_0 + (Q * (1.0 - np.exp(((- b) * ep_eq))))) - (D * (1.0 - np.exp(((- a) * ep_eq))))) iso_modulus = (((Q * b) * np.exp(((- b) * ep_eq))) - ((D * a) * np.exp(((- a) * ep_eq)))) alpha = 0.0 kin_modulus = 0.0 for i in range(0, n_backstresses): e_k = np.exp(((- gamma_k[i]) * (ep_eq - ep_eq_init))) alpha += (((flow_dir * c_k[i]) / gamma_k[i]) + ((alpha_components[i] - ((flow_dir * c_k[i]) / gamma_k[i])) * e_k)) kin_modulus += ((c_k[i] * e_k) - (((flow_dir * gamma_k[i]) * e_k) * alpha_components[i])) delta_alpha = (alpha - alpha_init) numerator = (np.abs(relative_stress) - (((consist_param * E) + yield_stress) + (flow_dir * delta_alpha))) denominator = (- ((E + iso_modulus) + kin_modulus)) consist_param = (consist_param - (numerator / denominator)) ep_eq = (ep_eq_init + consist_param) if (np.abs(numerator) < tol): is_converged = True stress = (trial_stress - ((E * flow_dir) * consist_param)) for i in range(0, n_backstresses): e_k = np.exp(((- gamma_k[i]) * (ep_eq - ep_eq_init))) alpha_components[i] = (((flow_dir * c_k[i]) / gamma_k[i]) + ((alpha_components[i] - ((flow_dir * c_k[i]) / gamma_k[i])) * e_k)) stress_track.append(stress) strain_track.append(strain) strain_inc_track.append(strain_inc) iteration_track.append(number_of_iterations) stress_sim = (stress * 1.0) stress_test = data['Sigma_true'].iloc[(increment_number + 1)] sum_abs_de += np.abs(strain_inc) area_test += ((np.abs(strain_inc) * ((stress_test ** 2) + (stress_test_1 ** 2))) / 2.0) error += ((np.abs(strain_inc) * (((stress_sim - stress_test) ** 2) + ((stress_sim_1 - stress_test_1) ** 2))) / 2.0) if (number_of_iterations >= maximum_iterations): print('Increment number = ', increment_number) print('Parameters = ', x_sol) print('Numerator = ', numerator) raise RuntimeError((('Return mapping did not converge in ' + str(maximum_iterations)) + ' iterations.')) area = (area_test / sum_abs_de) error = (error / sum_abs_de) return {'stress': stress_track, 'strain': strain_track, 'error': error, 'num_its': iteration_track, 'area': area}
-8,363,361,874,546,954,000
Implements the time integration of the updated Voce-Chaboche material model. :param np.array x_sol: Updated Voce-Chaboche model parameters. :param pd.DataFrame data: stress-strain data. :param float tol: Local Newton tolerance. :param int maximum_iterations: maximum iterations in local Newton procedure, raises RuntimeError if exceeded. :return dict: History of: stress ('stress'), strain ('strain'), the total error ('error') calculated by the updated Voce-Chaboche model, number of iterations for convergence at each increment ('num_its').
RESSPyLab/uvc_model.py
uvc_return_mapping
AlbanoCastroSousa/RESSPyLab
python
def uvc_return_mapping(x_sol, data, tol=1e-08, maximum_iterations=1000): " Implements the time integration of the updated Voce-Chaboche material model.\n\n :param np.array x_sol: Updated Voce-Chaboche model parameters.\n :param pd.DataFrame data: stress-strain data.\n :param float tol: Local Newton tolerance.\n :param int maximum_iterations: maximum iterations in local Newton procedure, raises RuntimeError if exceeded.\n :return dict: History of: stress ('stress'), strain ('strain'), the total error ('error') calculated by the\n updated Voce-Chaboche model, number of iterations for convergence at each increment ('num_its').\n " if (len(x_sol) < 8): raise RuntimeError('No backstresses or using original V-C params.') n_param_per_back = 2 n_basic_param = 6 E = (x_sol[0] * 1.0) sy_0 = (x_sol[1] * 1.0) Q = (x_sol[2] * 1.0) b = (x_sol[3] * 1.0) D = (x_sol[4] * 1.0) a = (x_sol[5] * 1.0) n_backstresses = int(((len(x_sol) - n_basic_param) / n_param_per_back)) c_k = [] gamma_k = [] for i in range(0, n_backstresses): c_k.append(x_sol[(n_basic_param + (n_param_per_back * i))]) gamma_k.append(x_sol[((n_basic_param + 1) + (n_param_per_back * i))]) alpha_components = np.zeros(n_backstresses, dtype=object) strain = 0.0 stress = 0.0 ep_eq = 0.0 error = 0.0 sum_abs_de = 0.0 stress_sim = 0.0 stress_test = 0.0 area_test = 0.0 stress_track = [] strain_track = [] strain_inc_track = [] iteration_track = [] loading = np.diff(data['e_true']) for (increment_number, strain_inc) in enumerate(loading): strain += strain_inc alpha = np.sum(alpha_components) yield_stress = ((sy_0 + (Q * (1.0 - np.exp(((- b) * ep_eq))))) - (D * (1.0 - np.exp(((- a) * ep_eq))))) trial_stress = (stress + (E * strain_inc)) relative_stress = (trial_stress - alpha) flow_dir = np.sign(relative_stress) yield_condition = (np.abs(relative_stress) - yield_stress) if (yield_condition > tol): is_converged = False else: is_converged = True stress_sim_1 = (stress_sim * 1.0) stress_test_1 = (stress_test * 1.0) ep_eq_init = ep_eq alpha_init = alpha consist_param = 0.0 number_of_iterations = 0 while ((is_converged is False) and (number_of_iterations < maximum_iterations)): number_of_iterations += 1 yield_stress = ((sy_0 + (Q * (1.0 - np.exp(((- b) * ep_eq))))) - (D * (1.0 - np.exp(((- a) * ep_eq))))) iso_modulus = (((Q * b) * np.exp(((- b) * ep_eq))) - ((D * a) * np.exp(((- a) * ep_eq)))) alpha = 0.0 kin_modulus = 0.0 for i in range(0, n_backstresses): e_k = np.exp(((- gamma_k[i]) * (ep_eq - ep_eq_init))) alpha += (((flow_dir * c_k[i]) / gamma_k[i]) + ((alpha_components[i] - ((flow_dir * c_k[i]) / gamma_k[i])) * e_k)) kin_modulus += ((c_k[i] * e_k) - (((flow_dir * gamma_k[i]) * e_k) * alpha_components[i])) delta_alpha = (alpha - alpha_init) numerator = (np.abs(relative_stress) - (((consist_param * E) + yield_stress) + (flow_dir * delta_alpha))) denominator = (- ((E + iso_modulus) + kin_modulus)) consist_param = (consist_param - (numerator / denominator)) ep_eq = (ep_eq_init + consist_param) if (np.abs(numerator) < tol): is_converged = True stress = (trial_stress - ((E * flow_dir) * consist_param)) for i in range(0, n_backstresses): e_k = np.exp(((- gamma_k[i]) * (ep_eq - ep_eq_init))) alpha_components[i] = (((flow_dir * c_k[i]) / gamma_k[i]) + ((alpha_components[i] - ((flow_dir * c_k[i]) / gamma_k[i])) * e_k)) stress_track.append(stress) strain_track.append(strain) strain_inc_track.append(strain_inc) iteration_track.append(number_of_iterations) stress_sim = (stress * 1.0) stress_test = data['Sigma_true'].iloc[(increment_number + 1)] sum_abs_de += np.abs(strain_inc) area_test += ((np.abs(strain_inc) * ((stress_test ** 2) + (stress_test_1 ** 2))) / 2.0) error += ((np.abs(strain_inc) * (((stress_sim - stress_test) ** 2) + ((stress_sim_1 - stress_test_1) ** 2))) / 2.0) if (number_of_iterations >= maximum_iterations): print('Increment number = ', increment_number) print('Parameters = ', x_sol) print('Numerator = ', numerator) raise RuntimeError((('Return mapping did not converge in ' + str(maximum_iterations)) + ' iterations.')) area = (area_test / sum_abs_de) error = (error / sum_abs_de) return {'stress': stress_track, 'strain': strain_track, 'error': error, 'num_its': iteration_track, 'area': area}
def sim_curve_uvc(x_sol, test_clean): ' Returns the stress-strain approximation of the updated Voce-Chaboche material model to a given strain input.\n\n :param np.array x_sol: Voce-Chaboche model parameters\n :param DataFrame test_clean: stress-strain data\n :return DataFrame: Voce-Chaboche approximation\n\n The strain column in the DataFrame is labeled "e_true" and the stress column is labeled "Sigma_true".\n ' model_output = uvc_return_mapping(x_sol, test_clean) strain = np.append([0.0], model_output['strain']) stress = np.append([0.0], model_output['stress']) sim_curve = pd.DataFrame(np.array([strain, stress]).transpose(), columns=['e_true', 'Sigma_true']) return sim_curve
3,410,126,839,265,906,700
Returns the stress-strain approximation of the updated Voce-Chaboche material model to a given strain input. :param np.array x_sol: Voce-Chaboche model parameters :param DataFrame test_clean: stress-strain data :return DataFrame: Voce-Chaboche approximation The strain column in the DataFrame is labeled "e_true" and the stress column is labeled "Sigma_true".
RESSPyLab/uvc_model.py
sim_curve_uvc
AlbanoCastroSousa/RESSPyLab
python
def sim_curve_uvc(x_sol, test_clean): ' Returns the stress-strain approximation of the updated Voce-Chaboche material model to a given strain input.\n\n :param np.array x_sol: Voce-Chaboche model parameters\n :param DataFrame test_clean: stress-strain data\n :return DataFrame: Voce-Chaboche approximation\n\n The strain column in the DataFrame is labeled "e_true" and the stress column is labeled "Sigma_true".\n ' model_output = uvc_return_mapping(x_sol, test_clean) strain = np.append([0.0], model_output['strain']) stress = np.append([0.0], model_output['stress']) sim_curve = pd.DataFrame(np.array([strain, stress]).transpose(), columns=['e_true', 'Sigma_true']) return sim_curve
def error_single_test_uvc(x_sol, test_clean): ' Returns the relative error between a test and its approximation using the updated Voce-Chaboche material model.\n\n :param np.array x_sol: Voce-Chaboche model parameters\n :param DataFrame test_clean: stress-strain data\n :return float: relative error\n\n The strain column in the DataFrame is labeled "e_true" and the stress column is labeled "Sigma_true".\n ' model_output = uvc_return_mapping(x_sol, test_clean) return model_output['error']
-6,505,289,781,695,587,000
Returns the relative error between a test and its approximation using the updated Voce-Chaboche material model. :param np.array x_sol: Voce-Chaboche model parameters :param DataFrame test_clean: stress-strain data :return float: relative error The strain column in the DataFrame is labeled "e_true" and the stress column is labeled "Sigma_true".
RESSPyLab/uvc_model.py
error_single_test_uvc
AlbanoCastroSousa/RESSPyLab
python
def error_single_test_uvc(x_sol, test_clean): ' Returns the relative error between a test and its approximation using the updated Voce-Chaboche material model.\n\n :param np.array x_sol: Voce-Chaboche model parameters\n :param DataFrame test_clean: stress-strain data\n :return float: relative error\n\n The strain column in the DataFrame is labeled "e_true" and the stress column is labeled "Sigma_true".\n ' model_output = uvc_return_mapping(x_sol, test_clean) return model_output['error']
def normalized_error_single_test_uvc(x_sol, test_clean): ' Returns the error and the total area of a test and its approximation using the updated Voce-Chaboche\n material model.\n\n :param np.array x_sol: Voce-Chaboche model parameters\n :param DataFrame test_clean: stress-strain data\n :return list: (float) total error, (float) total area\n\n The strain column in the DataFrame is labeled "e_true" and the stress column is labeled "Sigma_true".\n ' model_output = uvc_return_mapping(x_sol, test_clean) return [model_output['error'], model_output['area']]
1,769,212,009,327,486,500
Returns the error and the total area of a test and its approximation using the updated Voce-Chaboche material model. :param np.array x_sol: Voce-Chaboche model parameters :param DataFrame test_clean: stress-strain data :return list: (float) total error, (float) total area The strain column in the DataFrame is labeled "e_true" and the stress column is labeled "Sigma_true".
RESSPyLab/uvc_model.py
normalized_error_single_test_uvc
AlbanoCastroSousa/RESSPyLab
python
def normalized_error_single_test_uvc(x_sol, test_clean): ' Returns the error and the total area of a test and its approximation using the updated Voce-Chaboche\n material model.\n\n :param np.array x_sol: Voce-Chaboche model parameters\n :param DataFrame test_clean: stress-strain data\n :return list: (float) total error, (float) total area\n\n The strain column in the DataFrame is labeled "e_true" and the stress column is labeled "Sigma_true".\n ' model_output = uvc_return_mapping(x_sol, test_clean) return [model_output['error'], model_output['area']]
def calc_phi_total(x, data): ' Returns the sum of the normalized relative error of the updated Voce-Chaboche material model given x.\n\n :param np.array x: Updated Voce-Chaboche material model parameters.\n :param list data: (pd.DataFrame) Stress-strain history for each test considered.\n :return float: Normalized error value expressed as a percent (raw value * 100).\n\n The normalized error is defined in de Sousa and Lignos (2017).\n ' error_total = 0.0 area_total = 0.0 for d in data: (error, area) = normalized_error_single_test_uvc(x, d) error_total += error area_total += area return (np.sqrt((error_total / area_total)) * 100.0)
-7,501,822,167,166,433,000
Returns the sum of the normalized relative error of the updated Voce-Chaboche material model given x. :param np.array x: Updated Voce-Chaboche material model parameters. :param list data: (pd.DataFrame) Stress-strain history for each test considered. :return float: Normalized error value expressed as a percent (raw value * 100). The normalized error is defined in de Sousa and Lignos (2017).
RESSPyLab/uvc_model.py
calc_phi_total
AlbanoCastroSousa/RESSPyLab
python
def calc_phi_total(x, data): ' Returns the sum of the normalized relative error of the updated Voce-Chaboche material model given x.\n\n :param np.array x: Updated Voce-Chaboche material model parameters.\n :param list data: (pd.DataFrame) Stress-strain history for each test considered.\n :return float: Normalized error value expressed as a percent (raw value * 100).\n\n The normalized error is defined in de Sousa and Lignos (2017).\n ' error_total = 0.0 area_total = 0.0 for d in data: (error, area) = normalized_error_single_test_uvc(x, d) error_total += error area_total += area return (np.sqrt((error_total / area_total)) * 100.0)
def test_total_area(x, data): ' Returns the total squared area underneath all the tests.\n\n :param np.array x: Updated Voce-Chaboche material model parameters.\n :param list data: (pd.DataFrame) Stress-strain history for each test considered.\n :return float: Total squared area.\n ' area_total = 0.0 for d in data: (_, area) = normalized_error_single_test_uvc(x, d) area_total += area return area_total
-5,041,924,756,357,932,000
Returns the total squared area underneath all the tests. :param np.array x: Updated Voce-Chaboche material model parameters. :param list data: (pd.DataFrame) Stress-strain history for each test considered. :return float: Total squared area.
RESSPyLab/uvc_model.py
test_total_area
AlbanoCastroSousa/RESSPyLab
python
def test_total_area(x, data): ' Returns the total squared area underneath all the tests.\n\n :param np.array x: Updated Voce-Chaboche material model parameters.\n :param list data: (pd.DataFrame) Stress-strain history for each test considered.\n :return float: Total squared area.\n ' area_total = 0.0 for d in data: (_, area) = normalized_error_single_test_uvc(x, d) area_total += area return area_total
def uvc_get_hessian(x, data): ' Returns the Hessian of the material model error function for a given set of test data evaluated at x.\n\n :param np.array x: Updated Voce-Chaboche material model parameters.\n :param list data: (pd.DataFrame) Stress-strain history for each test considered.\n :return np.array: Hessian matrix of the error function.\n ' def f(xi): val = 0.0 for d in data: val += error_single_test_uvc(xi, d) return val hess_fun = nda.Hessian(f) return hess_fun(x)
-5,182,262,053,579,384,000
Returns the Hessian of the material model error function for a given set of test data evaluated at x. :param np.array x: Updated Voce-Chaboche material model parameters. :param list data: (pd.DataFrame) Stress-strain history for each test considered. :return np.array: Hessian matrix of the error function.
RESSPyLab/uvc_model.py
uvc_get_hessian
AlbanoCastroSousa/RESSPyLab
python
def uvc_get_hessian(x, data): ' Returns the Hessian of the material model error function for a given set of test data evaluated at x.\n\n :param np.array x: Updated Voce-Chaboche material model parameters.\n :param list data: (pd.DataFrame) Stress-strain history for each test considered.\n :return np.array: Hessian matrix of the error function.\n ' def f(xi): val = 0.0 for d in data: val += error_single_test_uvc(xi, d) return val hess_fun = nda.Hessian(f) return hess_fun(x)
def uvc_consistency_metric(x_base, x_sample, data): ' Returns the xi_2 consistency metric from de Sousa and Lignos 2019 using the updated Voce-Chaboche model.\n\n :param np.array x_base: Updated Voce-Chaboche material model parameters from the base case.\n :param np.array x_sample: Updated Voce-Chaboche material model parameters from the sample case.\n :param list data: (pd.DataFrame) Stress-strain history for each test considered.\n :return float: Increase in quadratic approximation from the base to the sample case.\n ' x_diff = (x_sample - x_base) hess_base = uvc_get_hessian(x_base, data) numerator = np.dot(x_diff, hess_base.dot(x_diff)) denominator = test_total_area(x_base, data) return np.sqrt((numerator / denominator))
7,123,153,927,627,399,000
Returns the xi_2 consistency metric from de Sousa and Lignos 2019 using the updated Voce-Chaboche model. :param np.array x_base: Updated Voce-Chaboche material model parameters from the base case. :param np.array x_sample: Updated Voce-Chaboche material model parameters from the sample case. :param list data: (pd.DataFrame) Stress-strain history for each test considered. :return float: Increase in quadratic approximation from the base to the sample case.
RESSPyLab/uvc_model.py
uvc_consistency_metric
AlbanoCastroSousa/RESSPyLab
python
def uvc_consistency_metric(x_base, x_sample, data): ' Returns the xi_2 consistency metric from de Sousa and Lignos 2019 using the updated Voce-Chaboche model.\n\n :param np.array x_base: Updated Voce-Chaboche material model parameters from the base case.\n :param np.array x_sample: Updated Voce-Chaboche material model parameters from the sample case.\n :param list data: (pd.DataFrame) Stress-strain history for each test considered.\n :return float: Increase in quadratic approximation from the base to the sample case.\n ' x_diff = (x_sample - x_base) hess_base = uvc_get_hessian(x_base, data) numerator = np.dot(x_diff, hess_base.dot(x_diff)) denominator = test_total_area(x_base, data) return np.sqrt((numerator / denominator))
def uvc_tangent_modulus(x_sol, data, tol=1e-08, maximum_iterations=1000): ' Returns the tangent modulus at each strain step.\n\n :param np.array x_sol: Updated Voce-Chaboche model parameters.\n :param pd.DataFrame data: stress-strain data.\n :param float tol: Local Newton tolerance.\n :param int maximum_iterations: maximum iterations in local Newton procedure, raises RuntimeError if exceeded.\n :return np.ndarray: Tangent modulus array.\n ' if (len(x_sol) < 8): raise RuntimeError('No backstresses or using original V-C params.') n_param_per_back = 2 n_basic_param = 6 E = (x_sol[0] * 1.0) sy_0 = (x_sol[1] * 1.0) Q = (x_sol[2] * 1.0) b = (x_sol[3] * 1.0) D = (x_sol[4] * 1.0) a = (x_sol[5] * 1.0) n_backstresses = int(((len(x_sol) - n_basic_param) / n_param_per_back)) c_k = [] gamma_k = [] for i in range(0, n_backstresses): c_k.append(x_sol[(n_basic_param + (n_param_per_back * i))]) gamma_k.append(x_sol[((n_basic_param + 1) + (n_param_per_back * i))]) alpha_components = np.zeros(n_backstresses, dtype=object) strain = 0.0 stress = 0.0 ep_eq = 0.0 stress_track = [] strain_track = [] strain_inc_track = [] iteration_track = [] tangent_track = [] loading = np.diff(data['e_true']) for (increment_number, strain_inc) in enumerate(loading): strain += strain_inc alpha = np.sum(alpha_components) yield_stress = ((sy_0 + (Q * (1.0 - np.exp(((- b) * ep_eq))))) - (D * (1.0 - np.exp(((- a) * ep_eq))))) trial_stress = (stress + (E * strain_inc)) relative_stress = (trial_stress - alpha) flow_dir = np.sign(relative_stress) yield_condition = (np.abs(relative_stress) - yield_stress) if (yield_condition > tol): is_converged = False else: is_converged = True ep_eq_init = ep_eq alpha_init = alpha consist_param = 0.0 number_of_iterations = 0 while ((is_converged is False) and (number_of_iterations < maximum_iterations)): number_of_iterations += 1 yield_stress = ((sy_0 + (Q * (1.0 - np.exp(((- b) * ep_eq))))) - (D * (1.0 - np.exp(((- a) * ep_eq))))) iso_modulus = (((Q * b) * np.exp(((- b) * ep_eq))) - ((D * a) * np.exp(((- a) * ep_eq)))) alpha = 0.0 kin_modulus = 0.0 for i in range(0, n_backstresses): e_k = np.exp(((- gamma_k[i]) * (ep_eq - ep_eq_init))) alpha += (((flow_dir * c_k[i]) / gamma_k[i]) + ((alpha_components[i] - ((flow_dir * c_k[i]) / gamma_k[i])) * e_k)) kin_modulus += ((c_k[i] * e_k) - (((flow_dir * gamma_k[i]) * e_k) * alpha_components[i])) delta_alpha = (alpha - alpha_init) numerator = (np.abs(relative_stress) - (((consist_param * E) + yield_stress) + (flow_dir * delta_alpha))) denominator = (- ((E + iso_modulus) + kin_modulus)) consist_param = (consist_param - (numerator / denominator)) ep_eq = (ep_eq_init + consist_param) if (np.abs(numerator) < tol): is_converged = True stress = (trial_stress - ((E * flow_dir) * consist_param)) for i in range(0, n_backstresses): e_k = np.exp(((- gamma_k[i]) * (ep_eq - ep_eq_init))) alpha_components[i] = (((flow_dir * c_k[i]) / gamma_k[i]) + ((alpha_components[i] - ((flow_dir * c_k[i]) / gamma_k[i])) * e_k)) stress_track.append(stress) strain_track.append(strain) strain_inc_track.append(strain_inc) iteration_track.append(number_of_iterations) if (number_of_iterations > 0): h_prime = 0.0 for i in range(0, n_backstresses): h_prime += (c_k[i] - ((flow_dir * gamma_k[i]) * alpha_components[i])) k_prime = (((Q * b) * np.exp(((- b) * ep_eq))) - ((D * a) * np.exp(((- a) * ep_eq)))) tangent_track.append(((E * (k_prime + h_prime)) / ((E + k_prime) + h_prime))) else: tangent_track.append(E) return np.append([0.0], np.array(tangent_track))
5,687,772,783,232,525,000
Returns the tangent modulus at each strain step. :param np.array x_sol: Updated Voce-Chaboche model parameters. :param pd.DataFrame data: stress-strain data. :param float tol: Local Newton tolerance. :param int maximum_iterations: maximum iterations in local Newton procedure, raises RuntimeError if exceeded. :return np.ndarray: Tangent modulus array.
RESSPyLab/uvc_model.py
uvc_tangent_modulus
AlbanoCastroSousa/RESSPyLab
python
def uvc_tangent_modulus(x_sol, data, tol=1e-08, maximum_iterations=1000): ' Returns the tangent modulus at each strain step.\n\n :param np.array x_sol: Updated Voce-Chaboche model parameters.\n :param pd.DataFrame data: stress-strain data.\n :param float tol: Local Newton tolerance.\n :param int maximum_iterations: maximum iterations in local Newton procedure, raises RuntimeError if exceeded.\n :return np.ndarray: Tangent modulus array.\n ' if (len(x_sol) < 8): raise RuntimeError('No backstresses or using original V-C params.') n_param_per_back = 2 n_basic_param = 6 E = (x_sol[0] * 1.0) sy_0 = (x_sol[1] * 1.0) Q = (x_sol[2] * 1.0) b = (x_sol[3] * 1.0) D = (x_sol[4] * 1.0) a = (x_sol[5] * 1.0) n_backstresses = int(((len(x_sol) - n_basic_param) / n_param_per_back)) c_k = [] gamma_k = [] for i in range(0, n_backstresses): c_k.append(x_sol[(n_basic_param + (n_param_per_back * i))]) gamma_k.append(x_sol[((n_basic_param + 1) + (n_param_per_back * i))]) alpha_components = np.zeros(n_backstresses, dtype=object) strain = 0.0 stress = 0.0 ep_eq = 0.0 stress_track = [] strain_track = [] strain_inc_track = [] iteration_track = [] tangent_track = [] loading = np.diff(data['e_true']) for (increment_number, strain_inc) in enumerate(loading): strain += strain_inc alpha = np.sum(alpha_components) yield_stress = ((sy_0 + (Q * (1.0 - np.exp(((- b) * ep_eq))))) - (D * (1.0 - np.exp(((- a) * ep_eq))))) trial_stress = (stress + (E * strain_inc)) relative_stress = (trial_stress - alpha) flow_dir = np.sign(relative_stress) yield_condition = (np.abs(relative_stress) - yield_stress) if (yield_condition > tol): is_converged = False else: is_converged = True ep_eq_init = ep_eq alpha_init = alpha consist_param = 0.0 number_of_iterations = 0 while ((is_converged is False) and (number_of_iterations < maximum_iterations)): number_of_iterations += 1 yield_stress = ((sy_0 + (Q * (1.0 - np.exp(((- b) * ep_eq))))) - (D * (1.0 - np.exp(((- a) * ep_eq))))) iso_modulus = (((Q * b) * np.exp(((- b) * ep_eq))) - ((D * a) * np.exp(((- a) * ep_eq)))) alpha = 0.0 kin_modulus = 0.0 for i in range(0, n_backstresses): e_k = np.exp(((- gamma_k[i]) * (ep_eq - ep_eq_init))) alpha += (((flow_dir * c_k[i]) / gamma_k[i]) + ((alpha_components[i] - ((flow_dir * c_k[i]) / gamma_k[i])) * e_k)) kin_modulus += ((c_k[i] * e_k) - (((flow_dir * gamma_k[i]) * e_k) * alpha_components[i])) delta_alpha = (alpha - alpha_init) numerator = (np.abs(relative_stress) - (((consist_param * E) + yield_stress) + (flow_dir * delta_alpha))) denominator = (- ((E + iso_modulus) + kin_modulus)) consist_param = (consist_param - (numerator / denominator)) ep_eq = (ep_eq_init + consist_param) if (np.abs(numerator) < tol): is_converged = True stress = (trial_stress - ((E * flow_dir) * consist_param)) for i in range(0, n_backstresses): e_k = np.exp(((- gamma_k[i]) * (ep_eq - ep_eq_init))) alpha_components[i] = (((flow_dir * c_k[i]) / gamma_k[i]) + ((alpha_components[i] - ((flow_dir * c_k[i]) / gamma_k[i])) * e_k)) stress_track.append(stress) strain_track.append(strain) strain_inc_track.append(strain_inc) iteration_track.append(number_of_iterations) if (number_of_iterations > 0): h_prime = 0.0 for i in range(0, n_backstresses): h_prime += (c_k[i] - ((flow_dir * gamma_k[i]) * alpha_components[i])) k_prime = (((Q * b) * np.exp(((- b) * ep_eq))) - ((D * a) * np.exp(((- a) * ep_eq)))) tangent_track.append(((E * (k_prime + h_prime)) / ((E + k_prime) + h_prime))) else: tangent_track.append(E) return np.append([0.0], np.array(tangent_track))
def get_wiki_references(url, outfile=None): 'get_wiki_references.\n Extracts references from predefined sections of wiki page\n Uses `urlscan`, `refextract`, `doi`, `wikipedia`, and `re` (for ArXiv URLs)\n\n :param url: URL of wiki article to scrape\n :param outfile: File to write extracted references to\n ' def _check(l): return (((not l['doi']) or (l['doi'] == l['refs'][(- 1)]['doi'])) and ((not l['arxiv']) or (l['arxiv'] == l['refs'][(- 1)]['arxiv']))) page = wiki.page(get_wiki_page_id(url)) sections = get_wiki_sections(page.content) lines = sum([get_wiki_lines(s, predicate=any) for s in sections.values()], []) links = sum([wikiparse.parse(s).external_links for s in sections.values()], []) summary = sum([[{'raw': l, 'links': urlscan.parse_text_urls(l), 'refs': refextract.extract_references_from_string(l), 'doi': doi.find_doi_in_text(l), 'arxiv': (m.group(1) if ((m := arxiv_url_regex.matches(l)) is not None) else None)} for l in get_wiki_lines(s, predicate=any)] for s in sections.values()]) failed = [ld for ld in summary if (not _check(ld))] if any(failed): logger.warning('Consistency check failed for the following lines: {}'.format(failed)) return _serialize(summary, outfile)
1,990,428,418,421,912,600
get_wiki_references. Extracts references from predefined sections of wiki page Uses `urlscan`, `refextract`, `doi`, `wikipedia`, and `re` (for ArXiv URLs) :param url: URL of wiki article to scrape :param outfile: File to write extracted references to
scraper/apis/wikipedia.py
get_wiki_references
antimike/citation-scraper
python
def get_wiki_references(url, outfile=None): 'get_wiki_references.\n Extracts references from predefined sections of wiki page\n Uses `urlscan`, `refextract`, `doi`, `wikipedia`, and `re` (for ArXiv URLs)\n\n :param url: URL of wiki article to scrape\n :param outfile: File to write extracted references to\n ' def _check(l): return (((not l['doi']) or (l['doi'] == l['refs'][(- 1)]['doi'])) and ((not l['arxiv']) or (l['arxiv'] == l['refs'][(- 1)]['arxiv']))) page = wiki.page(get_wiki_page_id(url)) sections = get_wiki_sections(page.content) lines = sum([get_wiki_lines(s, predicate=any) for s in sections.values()], []) links = sum([wikiparse.parse(s).external_links for s in sections.values()], []) summary = sum([[{'raw': l, 'links': urlscan.parse_text_urls(l), 'refs': refextract.extract_references_from_string(l), 'doi': doi.find_doi_in_text(l), 'arxiv': (m.group(1) if ((m := arxiv_url_regex.matches(l)) is not None) else None)} for l in get_wiki_lines(s, predicate=any)] for s in sections.values()]) failed = [ld for ld in summary if (not _check(ld))] if any(failed): logger.warning('Consistency check failed for the following lines: {}'.format(failed)) return _serialize(summary, outfile)
def asm_and_link_one_file(asm_path: str, work_dir: str) -> str: 'Assemble and link file at asm_path in work_dir.\n\n Returns the path to the resulting ELF\n\n ' otbn_as = os.path.join(UTIL_DIR, 'otbn-as') otbn_ld = os.path.join(UTIL_DIR, 'otbn-ld') obj_path = os.path.join(work_dir, 'tst.o') elf_path = os.path.join(work_dir, 'tst') subprocess.run([otbn_as, '-o', obj_path, asm_path], check=True) subprocess.run([otbn_ld, '-o', elf_path, obj_path], check=True) return elf_path
-372,252,728,031,894,140
Assemble and link file at asm_path in work_dir. Returns the path to the resulting ELF
hw/ip/otbn/dv/otbnsim/test/testutil.py
asm_and_link_one_file
OneToughMonkey/opentitan
python
def asm_and_link_one_file(asm_path: str, work_dir: str) -> str: 'Assemble and link file at asm_path in work_dir.\n\n Returns the path to the resulting ELF\n\n ' otbn_as = os.path.join(UTIL_DIR, 'otbn-as') otbn_ld = os.path.join(UTIL_DIR, 'otbn-ld') obj_path = os.path.join(work_dir, 'tst.o') elf_path = os.path.join(work_dir, 'tst') subprocess.run([otbn_as, '-o', obj_path, asm_path], check=True) subprocess.run([otbn_ld, '-o', elf_path, obj_path], check=True) return elf_path
def find_two_smallest(L: List[float]) -> Tuple[(int, int)]: ' (see above) ' smallest = min(L) min1 = L.index(smallest) L.remove(smallest) next_smallest = min(L) min2 = L.index(next_smallest) L.insert(min1, smallest) if (min1 <= min2): min2 += 1 return (min1, min2)
-1,861,280,632,368,825,900
(see above)
chapter12/examples/example02.py
find_two_smallest
YordanIH/Intro_to_CS_w_Python
python
def find_two_smallest(L: List[float]) -> Tuple[(int, int)]: ' ' smallest = min(L) min1 = L.index(smallest) L.remove(smallest) next_smallest = min(L) min2 = L.index(next_smallest) L.insert(min1, smallest) if (min1 <= min2): min2 += 1 return (min1, min2)
def paddedInt(i): "\n return a string that contains `i`, left-padded with 0's up to PAD_LEN digits\n " i_str = str(i) pad = (PAD_LEN - len(i_str)) return ((pad * '0') + i_str)
-4,372,382,450,324,855,300
return a string that contains `i`, left-padded with 0's up to PAD_LEN digits
credstash.py
paddedInt
traveloka/credstash
python
def paddedInt(i): "\n \n " i_str = str(i) pad = (PAD_LEN - len(i_str)) return ((pad * '0') + i_str)
def getHighestVersion(name, region='us-east-1', table='credential-store'): '\n Return the highest version of `name` in the table\n ' dynamodb = boto3.resource('dynamodb', region_name=region) secrets = dynamodb.Table(table) response = secrets.query(Limit=1, ScanIndexForward=False, ConsistentRead=True, KeyConditionExpression=boto3.dynamodb.conditions.Key('name').eq(name), ProjectionExpression='version') if (response['Count'] == 0): return 0 return response['Items'][0]['version']
6,380,276,000,185,197,000
Return the highest version of `name` in the table
credstash.py
getHighestVersion
traveloka/credstash
python
def getHighestVersion(name, region='us-east-1', table='credential-store'): '\n \n ' dynamodb = boto3.resource('dynamodb', region_name=region) secrets = dynamodb.Table(table) response = secrets.query(Limit=1, ScanIndexForward=False, ConsistentRead=True, KeyConditionExpression=boto3.dynamodb.conditions.Key('name').eq(name), ProjectionExpression='version') if (response['Count'] == 0): return 0 return response['Items'][0]['version']
def listSecrets(region='us-east-1', table='credential-store'): '\n do a full-table scan of the credential-store,\n and return the names and versions of every credential\n ' dynamodb = boto3.resource('dynamodb', region_name=region) secrets = dynamodb.Table(table) response = secrets.scan(ProjectionExpression='#N, version', ExpressionAttributeNames={'#N': 'name'}) return response['Items']
-3,835,120,575,174,796,300
do a full-table scan of the credential-store, and return the names and versions of every credential
credstash.py
listSecrets
traveloka/credstash
python
def listSecrets(region='us-east-1', table='credential-store'): '\n do a full-table scan of the credential-store,\n and return the names and versions of every credential\n ' dynamodb = boto3.resource('dynamodb', region_name=region) secrets = dynamodb.Table(table) response = secrets.scan(ProjectionExpression='#N, version', ExpressionAttributeNames={'#N': 'name'}) return response['Items']
def putSecret(name, secret, version, kms_key='alias/credstash', region='us-east-1', table='credential-store', context=None): '\n put a secret called `name` into the secret-store,\n protected by the key kms_key\n ' if (not context): context = {} kms = boto3.client('kms', region_name=region) try: kms_response = kms.generate_data_key(KeyId=kms_key, EncryptionContext=context, NumberOfBytes=64) except: raise KmsError(('Could not generate key using KMS key %s' % kms_key)) data_key = kms_response['Plaintext'][:32] hmac_key = kms_response['Plaintext'][32:] wrapped_key = kms_response['CiphertextBlob'] enc_ctr = Counter.new(128) encryptor = AES.new(data_key, AES.MODE_CTR, counter=enc_ctr) c_text = encryptor.encrypt(secret) hmac = HMAC(hmac_key, msg=c_text, digestmod=SHA256) b64hmac = hmac.hexdigest() dynamodb = boto3.resource('dynamodb', region_name=region) secrets = dynamodb.Table(table) data = {} data['name'] = name data['version'] = (version if (version != '') else paddedInt(1)) data['key'] = b64encode(wrapped_key).decode('utf-8') data['contents'] = b64encode(c_text).decode('utf-8') data['hmac'] = b64hmac return secrets.put_item(Item=data, ConditionExpression=Attr('name').not_exists())
-7,699,812,481,823,265,000
put a secret called `name` into the secret-store, protected by the key kms_key
credstash.py
putSecret
traveloka/credstash
python
def putSecret(name, secret, version, kms_key='alias/credstash', region='us-east-1', table='credential-store', context=None): '\n put a secret called `name` into the secret-store,\n protected by the key kms_key\n ' if (not context): context = {} kms = boto3.client('kms', region_name=region) try: kms_response = kms.generate_data_key(KeyId=kms_key, EncryptionContext=context, NumberOfBytes=64) except: raise KmsError(('Could not generate key using KMS key %s' % kms_key)) data_key = kms_response['Plaintext'][:32] hmac_key = kms_response['Plaintext'][32:] wrapped_key = kms_response['CiphertextBlob'] enc_ctr = Counter.new(128) encryptor = AES.new(data_key, AES.MODE_CTR, counter=enc_ctr) c_text = encryptor.encrypt(secret) hmac = HMAC(hmac_key, msg=c_text, digestmod=SHA256) b64hmac = hmac.hexdigest() dynamodb = boto3.resource('dynamodb', region_name=region) secrets = dynamodb.Table(table) data = {} data['name'] = name data['version'] = (version if (version != ) else paddedInt(1)) data['key'] = b64encode(wrapped_key).decode('utf-8') data['contents'] = b64encode(c_text).decode('utf-8') data['hmac'] = b64hmac return secrets.put_item(Item=data, ConditionExpression=Attr('name').not_exists())
def getAllSecrets(version='', region='us-east-1', table='credential-store', context=None): '\n fetch and decrypt all secrets\n ' output = {} secrets = listSecrets(region, table) for credential in set([x['name'] for x in secrets]): try: output[credential] = getSecret(credential, version, region, table, context) except: pass return output
7,797,601,393,189,596,000
fetch and decrypt all secrets
credstash.py
getAllSecrets
traveloka/credstash
python
def getAllSecrets(version=, region='us-east-1', table='credential-store', context=None): '\n \n ' output = {} secrets = listSecrets(region, table) for credential in set([x['name'] for x in secrets]): try: output[credential] = getSecret(credential, version, region, table, context) except: pass return output
def getSecret(name, version='', region='us-east-1', table='credential-store', context=None): '\n fetch and decrypt the secret called `name`\n ' if (not context): context = {} dynamodb = boto3.resource('dynamodb', region_name=region) secrets = dynamodb.Table(table) if (version == ''): response = secrets.query(Limit=1, ScanIndexForward=False, ConsistentRead=True, KeyConditionExpression=boto3.dynamodb.conditions.Key('name').eq(name)) if (response['Count'] == 0): raise ItemNotFound(("Item {'name': '%s'} couldn't be found." % name)) material = response['Items'][0] else: response = secrets.get_item(Key={'name': name, 'version': version}) if ('Item' not in response): raise ItemNotFound(("Item {'name': '%s', 'version': '%s'} couldn't be found." % (name, version))) material = response['Item'] kms = boto3.client('kms', region_name=region) try: kms_response = kms.decrypt(CiphertextBlob=b64decode(material['key']), EncryptionContext=context) except botocore.exceptions.ClientError as e: if (e.response['Error']['Code'] == 'InvalidCiphertextException'): if (context is None): msg = 'Could not decrypt hmac key with KMS. The credential may require that an encryption context be provided to decrypt it.' else: msg = 'Could not decrypt hmac key with KMS. The encryption context provided may not match the one used when the credential was stored.' else: msg = ('Decryption error %s' % e) raise KmsError(msg) except Exception as e: raise KmsError(('Decryption error %s' % e)) key = kms_response['Plaintext'][:32] hmac_key = kms_response['Plaintext'][32:] hmac = HMAC(hmac_key, msg=b64decode(material['contents']), digestmod=SHA256) if (hmac.hexdigest() != material['hmac']): raise IntegrityError(('Computed HMAC on %s does not match stored HMAC' % name)) dec_ctr = Counter.new(128) decryptor = AES.new(key, AES.MODE_CTR, counter=dec_ctr) plaintext = decryptor.decrypt(b64decode(material['contents'])).decode('utf-8') return plaintext
622,606,273,363,065,900
fetch and decrypt the secret called `name`
credstash.py
getSecret
traveloka/credstash
python
def getSecret(name, version=, region='us-east-1', table='credential-store', context=None): '\n \n ' if (not context): context = {} dynamodb = boto3.resource('dynamodb', region_name=region) secrets = dynamodb.Table(table) if (version == ): response = secrets.query(Limit=1, ScanIndexForward=False, ConsistentRead=True, KeyConditionExpression=boto3.dynamodb.conditions.Key('name').eq(name)) if (response['Count'] == 0): raise ItemNotFound(("Item {'name': '%s'} couldn't be found." % name)) material = response['Items'][0] else: response = secrets.get_item(Key={'name': name, 'version': version}) if ('Item' not in response): raise ItemNotFound(("Item {'name': '%s', 'version': '%s'} couldn't be found." % (name, version))) material = response['Item'] kms = boto3.client('kms', region_name=region) try: kms_response = kms.decrypt(CiphertextBlob=b64decode(material['key']), EncryptionContext=context) except botocore.exceptions.ClientError as e: if (e.response['Error']['Code'] == 'InvalidCiphertextException'): if (context is None): msg = 'Could not decrypt hmac key with KMS. The credential may require that an encryption context be provided to decrypt it.' else: msg = 'Could not decrypt hmac key with KMS. The encryption context provided may not match the one used when the credential was stored.' else: msg = ('Decryption error %s' % e) raise KmsError(msg) except Exception as e: raise KmsError(('Decryption error %s' % e)) key = kms_response['Plaintext'][:32] hmac_key = kms_response['Plaintext'][32:] hmac = HMAC(hmac_key, msg=b64decode(material['contents']), digestmod=SHA256) if (hmac.hexdigest() != material['hmac']): raise IntegrityError(('Computed HMAC on %s does not match stored HMAC' % name)) dec_ctr = Counter.new(128) decryptor = AES.new(key, AES.MODE_CTR, counter=dec_ctr) plaintext = decryptor.decrypt(b64decode(material['contents'])).decode('utf-8') return plaintext
def createDdbTable(region='us-east-1', table='credential-store'): '\n create the secret store table in DDB in the specified region\n ' dynamodb = boto3.resource('dynamodb', region_name=region) if (table in (t.name for t in dynamodb.tables.all())): print('Credential Store table already exists') return print('Creating table...') response = dynamodb.create_table(TableName=table, KeySchema=[{'AttributeName': 'name', 'KeyType': 'HASH'}, {'AttributeName': 'version', 'KeyType': 'RANGE'}], AttributeDefinitions=[{'AttributeName': 'name', 'AttributeType': 'S'}, {'AttributeName': 'version', 'AttributeType': 'S'}], ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}) print('Waiting for table to be created...') client = boto3.client('dynamodb', region_name=region) client.get_waiter('table_exists').wait(TableName=table) print('Table has been created. Go read the README about how to create your KMS key')
5,070,826,915,824,553,000
create the secret store table in DDB in the specified region
credstash.py
createDdbTable
traveloka/credstash
python
def createDdbTable(region='us-east-1', table='credential-store'): '\n \n ' dynamodb = boto3.resource('dynamodb', region_name=region) if (table in (t.name for t in dynamodb.tables.all())): print('Credential Store table already exists') return print('Creating table...') response = dynamodb.create_table(TableName=table, KeySchema=[{'AttributeName': 'name', 'KeyType': 'HASH'}, {'AttributeName': 'version', 'KeyType': 'RANGE'}], AttributeDefinitions=[{'AttributeName': 'name', 'AttributeType': 'S'}, {'AttributeName': 'version', 'AttributeType': 'S'}], ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}) print('Waiting for table to be created...') client = boto3.client('dynamodb', region_name=region) client.get_waiter('table_exists').wait(TableName=table) print('Table has been created. Go read the README about how to create your KMS key')
def make_layer(self, out_channels, num_blocks, stride, expand_ratio): 'Stack InvertedResidual blocks to build a layer for MobileNetV2.\n\n Args:\n out_channels (int): out_channels of block.\n num_blocks (int): number of blocks.\n stride (int): stride of the first block. Default: 1\n expand_ratio (int): Expand the number of channels of the\n hidden layer in InvertedResidual by this ratio. Default: 6.\n ' layers = [] for i in range(num_blocks): if (i >= 1): stride = 1 layers.append(InvertedResidual(self.in_channels, out_channels, stride, expand_ratio=expand_ratio, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, with_cp=self.with_cp)) self.in_channels = out_channels return nn.Sequential(*layers)
6,643,845,954,223,003,000
Stack InvertedResidual blocks to build a layer for MobileNetV2. Args: out_channels (int): out_channels of block. num_blocks (int): number of blocks. stride (int): stride of the first block. Default: 1 expand_ratio (int): Expand the number of channels of the hidden layer in InvertedResidual by this ratio. Default: 6.
mmcls/models/backbones/mobilenet_v2.py
make_layer
ChaseMonsterAway/mmclassification
python
def make_layer(self, out_channels, num_blocks, stride, expand_ratio): 'Stack InvertedResidual blocks to build a layer for MobileNetV2.\n\n Args:\n out_channels (int): out_channels of block.\n num_blocks (int): number of blocks.\n stride (int): stride of the first block. Default: 1\n expand_ratio (int): Expand the number of channels of the\n hidden layer in InvertedResidual by this ratio. Default: 6.\n ' layers = [] for i in range(num_blocks): if (i >= 1): stride = 1 layers.append(InvertedResidual(self.in_channels, out_channels, stride, expand_ratio=expand_ratio, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, with_cp=self.with_cp)) self.in_channels = out_channels return nn.Sequential(*layers)
def __init__(self, code): "Initialize a PDBFile object with a pdb file of interest\n\n Parameters\n ----------\n code : the pdb code if interest\n Any valid PDB code can be passed into PDBFile.\n\n Examples\n --------\n >>> pdb_file = PDBFile('1rcy') \n \n " self.code = code.lower()
835,532,312,311,867,000
Initialize a PDBFile object with a pdb file of interest Parameters ---------- code : the pdb code if interest Any valid PDB code can be passed into PDBFile. Examples -------- >>> pdb_file = PDBFile('1rcy')
scalene-triangle/libs/PDB_filegetter.py
__init__
dsw7/BridgingInteractions
python
def __init__(self, code): "Initialize a PDBFile object with a pdb file of interest\n\n Parameters\n ----------\n code : the pdb code if interest\n Any valid PDB code can be passed into PDBFile.\n\n Examples\n --------\n >>> pdb_file = PDBFile('1rcy') \n \n " self.code = code.lower()
def fetch_from_PDB(self): "\n Connects to PDB FTP server, downloads a .gz file of interest,\n decompresses the .gz file into .ent and then dumps a copy of\n the pdb{code}.ent file into cwd.\n\n Parameters\n ----------\n None\n\n Examples\n --------\n \n >>> inst = PDBFile('1rcy')\n >>> path_to_file = inst.fetch_from_PDB()\n >>> print(path_to_file)\n \n " subdir = self.code[1:3] infile = 'pdb{}.ent.gz'.format(self.code) decompressed = infile.strip('.gz') fullpath = ROOT.format(subdir, infile) try: urlcleanup() urlretrieve(fullpath, infile) except Exception: return 'URLError' else: with gzip.open(infile, 'rb') as gz: with open(decompressed, 'wb') as out: out.writelines(gz) remove(infile) return path.join(getcwd(), decompressed)
5,381,435,870,021,593,000
Connects to PDB FTP server, downloads a .gz file of interest, decompresses the .gz file into .ent and then dumps a copy of the pdb{code}.ent file into cwd. Parameters ---------- None Examples -------- >>> inst = PDBFile('1rcy') >>> path_to_file = inst.fetch_from_PDB() >>> print(path_to_file)
scalene-triangle/libs/PDB_filegetter.py
fetch_from_PDB
dsw7/BridgingInteractions
python
def fetch_from_PDB(self): "\n Connects to PDB FTP server, downloads a .gz file of interest,\n decompresses the .gz file into .ent and then dumps a copy of\n the pdb{code}.ent file into cwd.\n\n Parameters\n ----------\n None\n\n Examples\n --------\n \n >>> inst = PDBFile('1rcy')\n >>> path_to_file = inst.fetch_from_PDB()\n >>> print(path_to_file)\n \n " subdir = self.code[1:3] infile = 'pdb{}.ent.gz'.format(self.code) decompressed = infile.strip('.gz') fullpath = ROOT.format(subdir, infile) try: urlcleanup() urlretrieve(fullpath, infile) except Exception: return 'URLError' else: with gzip.open(infile, 'rb') as gz: with open(decompressed, 'wb') as out: out.writelines(gz) remove(infile) return path.join(getcwd(), decompressed)
def clear(self): "\n Deletes file from current working directory after the file has\n been processed by some algorithm.\n\n Parameters\n ----------\n None\n\n Examples\n --------\n >>> inst = PDBFile('1rcy')\n >>> path_to_file = inst.fetch_from_PDB()\n >>> print(path_to_file) # process the file using some algorithm\n >>> inst.clear()\n \n " filename = 'pdb{}.ent'.format(self.code) try: remove(path.join(getcwd(), filename)) except FileNotFoundError: print('Cannot delete file. Does not exist.')
8,477,879,807,243,158,000
Deletes file from current working directory after the file has been processed by some algorithm. Parameters ---------- None Examples -------- >>> inst = PDBFile('1rcy') >>> path_to_file = inst.fetch_from_PDB() >>> print(path_to_file) # process the file using some algorithm >>> inst.clear()
scalene-triangle/libs/PDB_filegetter.py
clear
dsw7/BridgingInteractions
python
def clear(self): "\n Deletes file from current working directory after the file has\n been processed by some algorithm.\n\n Parameters\n ----------\n None\n\n Examples\n --------\n >>> inst = PDBFile('1rcy')\n >>> path_to_file = inst.fetch_from_PDB()\n >>> print(path_to_file) # process the file using some algorithm\n >>> inst.clear()\n \n " filename = 'pdb{}.ent'.format(self.code) try: remove(path.join(getcwd(), filename)) except FileNotFoundError: print('Cannot delete file. Does not exist.')
def gen_captcha_text_image(self, img_name): '\n 返回一个验证码的array形式和对应的字符串标签\n :return:tuple (str, numpy.array)\n ' label = img_name.split('_')[0] img_file = os.path.join(self.img_path, img_name) captcha_image = Image.open(img_file) captcha_array = np.array(captcha_image) return (label, captcha_array)
7,944,805,907,609,061,000
返回一个验证码的array形式和对应的字符串标签 :return:tuple (str, numpy.array)
train_model.py
gen_captcha_text_image
shineyjg/cnn_captcha
python
def gen_captcha_text_image(self, img_name): '\n 返回一个验证码的array形式和对应的字符串标签\n :return:tuple (str, numpy.array)\n ' label = img_name.split('_')[0] img_file = os.path.join(self.img_path, img_name) captcha_image = Image.open(img_file) captcha_array = np.array(captcha_image) return (label, captcha_array)
@staticmethod def convert2gray(img): '\n 图片转为灰度图,如果是3通道图则计算,单通道图则直接返回\n :param img:\n :return:\n ' if (len(img.shape) > 2): (r, g, b) = (img[:, :, 0], img[:, :, 1], img[:, :, 2]) gray = (((0.2989 * r) + (0.587 * g)) + (0.114 * b)) return gray else: return img
611,634,753,502,825,900
图片转为灰度图,如果是3通道图则计算,单通道图则直接返回 :param img: :return:
train_model.py
convert2gray
shineyjg/cnn_captcha
python
@staticmethod def convert2gray(img): '\n 图片转为灰度图,如果是3通道图则计算,单通道图则直接返回\n :param img:\n :return:\n ' if (len(img.shape) > 2): (r, g, b) = (img[:, :, 0], img[:, :, 1], img[:, :, 2]) gray = (((0.2989 * r) + (0.587 * g)) + (0.114 * b)) return gray else: return img
def text2vec(self, text): '\n 转标签为oneHot编码\n :param text: str\n :return: numpy.array\n ' text_len = len(text) if (text_len > self.max_captcha): raise ValueError('验证码最长{}个字符'.format(self.max_captcha)) vector = np.zeros((self.max_captcha * self.char_set_len)) for (i, ch) in enumerate(text): idx = ((i * self.char_set_len) + self.char_set.index(ch)) vector[idx] = 1 return vector
-1,980,550,115,108,716,800
转标签为oneHot编码 :param text: str :return: numpy.array
train_model.py
text2vec
shineyjg/cnn_captcha
python
def text2vec(self, text): '\n 转标签为oneHot编码\n :param text: str\n :return: numpy.array\n ' text_len = len(text) if (text_len > self.max_captcha): raise ValueError('验证码最长{}个字符'.format(self.max_captcha)) vector = np.zeros((self.max_captcha * self.char_set_len)) for (i, ch) in enumerate(text): idx = ((i * self.char_set_len) + self.char_set.index(ch)) vector[idx] = 1 return vector
def get_converter(from_unit, to_unit): 'Like Unit._get_converter, except returns None if no scaling is needed,\n i.e., if the inferred scale is unity.' try: scale = from_unit._to(to_unit) except UnitsError: return from_unit._apply_equivalencies(from_unit, to_unit, get_current_unit_registry().equivalencies) except AttributeError: raise UnitTypeError("Unit '{0}' cannot be converted to '{1}'".format(from_unit, to_unit)) if (scale == 1.0): return None else: return (lambda val: (scale * val))
6,356,987,915,934,134,000
Like Unit._get_converter, except returns None if no scaling is needed, i.e., if the inferred scale is unity.
astropy/units/quantity_helper/helpers.py
get_converter
PriyankaH21/astropy
python
def get_converter(from_unit, to_unit): 'Like Unit._get_converter, except returns None if no scaling is needed,\n i.e., if the inferred scale is unity.' try: scale = from_unit._to(to_unit) except UnitsError: return from_unit._apply_equivalencies(from_unit, to_unit, get_current_unit_registry().equivalencies) except AttributeError: raise UnitTypeError("Unit '{0}' cannot be converted to '{1}'".format(from_unit, to_unit)) if (scale == 1.0): return None else: return (lambda val: (scale * val))
def _raw_fetch(url, logger): '\n Fetch remote data and return the text output.\n\n :param url: The URL to fetch the data from\n :param logger: A logger instance to use.\n :return: Raw text data, None otherwise\n ' ret_data = None try: req = requests.get(url) if (req.status_code == requests.codes.ok): ret_data = req.text except requests.exceptions.ConnectionError as error: logger.warning(error.request) return ret_data
-894,493,403,224,933,800
Fetch remote data and return the text output. :param url: The URL to fetch the data from :param logger: A logger instance to use. :return: Raw text data, None otherwise
atkinson/dlrn/http_data.py
_raw_fetch
jpichon/atkinson
python
def _raw_fetch(url, logger): '\n Fetch remote data and return the text output.\n\n :param url: The URL to fetch the data from\n :param logger: A logger instance to use.\n :return: Raw text data, None otherwise\n ' ret_data = None try: req = requests.get(url) if (req.status_code == requests.codes.ok): ret_data = req.text except requests.exceptions.ConnectionError as error: logger.warning(error.request) return ret_data
def _fetch_yaml(url, logger): '\n Fetch remote data and process the text as yaml.\n\n :param url: The URL to fetch the data from\n :param logger: A logger instance to use.\n :return: Parsed yaml data in the form of a dictionary\n ' ret_data = None raw_data = _raw_fetch(url, logger) if (raw_data is not None): ret_data = yaml.parse(raw_data) return ret_data
-3,088,369,978,945,365,500
Fetch remote data and process the text as yaml. :param url: The URL to fetch the data from :param logger: A logger instance to use. :return: Parsed yaml data in the form of a dictionary
atkinson/dlrn/http_data.py
_fetch_yaml
jpichon/atkinson
python
def _fetch_yaml(url, logger): '\n Fetch remote data and process the text as yaml.\n\n :param url: The URL to fetch the data from\n :param logger: A logger instance to use.\n :return: Parsed yaml data in the form of a dictionary\n ' ret_data = None raw_data = _raw_fetch(url, logger) if (raw_data is not None): ret_data = yaml.parse(raw_data) return ret_data
def dlrn_http_factory(host, config_file=None, link_name=None, logger=getLogger()): '\n Create a DlrnData instance based on a host.\n\n :param host: A host name string to build instances\n :param config_file: A dlrn config file(s) to use in addition to\n the default.\n :param link_name: A dlrn symlink to use. This overrides the config files\n link parameter.\n :param logger: An atkinson logger to use. Default is the base logger.\n :return: A DlrnData instance\n ' manager = None files = ['dlrn.yml'] if (config_file is not None): if isinstance(config_file, list): files.extend(config_file) else: files.append(config_file) local_path = os.path.realpath(os.path.dirname(__file__)) manager = ConfigManager(filenames=files, paths=local_path) if (manager is None): return None config = manager.config if (host not in config): return None link = config[host]['link'] if (link_name is not None): link = link_name return DlrnHttpData(config[host]['url'], config[host]['release'], link_name=link, logger=logger)
-4,437,842,762,096,356,400
Create a DlrnData instance based on a host. :param host: A host name string to build instances :param config_file: A dlrn config file(s) to use in addition to the default. :param link_name: A dlrn symlink to use. This overrides the config files link parameter. :param logger: An atkinson logger to use. Default is the base logger. :return: A DlrnData instance
atkinson/dlrn/http_data.py
dlrn_http_factory
jpichon/atkinson
python
def dlrn_http_factory(host, config_file=None, link_name=None, logger=getLogger()): '\n Create a DlrnData instance based on a host.\n\n :param host: A host name string to build instances\n :param config_file: A dlrn config file(s) to use in addition to\n the default.\n :param link_name: A dlrn symlink to use. This overrides the config files\n link parameter.\n :param logger: An atkinson logger to use. Default is the base logger.\n :return: A DlrnData instance\n ' manager = None files = ['dlrn.yml'] if (config_file is not None): if isinstance(config_file, list): files.extend(config_file) else: files.append(config_file) local_path = os.path.realpath(os.path.dirname(__file__)) manager = ConfigManager(filenames=files, paths=local_path) if (manager is None): return None config = manager.config if (host not in config): return None link = config[host]['link'] if (link_name is not None): link = link_name return DlrnHttpData(config[host]['url'], config[host]['release'], link_name=link, logger=logger)
def __init__(self, url, release, link_name='current', logger=getLogger()): '\n Class constructor\n\n :param url: The URL to the host to obtain data.\n :param releases: The release name to use for lookup.\n :param link_name: The name of the dlrn symlink to fetch data from.\n :param logger: An atkinson logger to use. Default is the base logger.\n ' self.url = os.path.join(url, release) self.release = release self._logger = logger self._link_name = link_name self._commit_data = {} self._fetch_commit()
-1,853,492,324,126,466,600
Class constructor :param url: The URL to the host to obtain data. :param releases: The release name to use for lookup. :param link_name: The name of the dlrn symlink to fetch data from. :param logger: An atkinson logger to use. Default is the base logger.
atkinson/dlrn/http_data.py
__init__
jpichon/atkinson
python
def __init__(self, url, release, link_name='current', logger=getLogger()): '\n Class constructor\n\n :param url: The URL to the host to obtain data.\n :param releases: The release name to use for lookup.\n :param link_name: The name of the dlrn symlink to fetch data from.\n :param logger: An atkinson logger to use. Default is the base logger.\n ' self.url = os.path.join(url, release) self.release = release self._logger = logger self._link_name = link_name self._commit_data = {} self._fetch_commit()
def _fetch_commit(self): '\n Fetch the commit data from dlrn\n ' full_url = os.path.join(self.url, self._link_name, 'commit.yaml') data = _fetch_yaml(full_url, self._logger) if ((data is not None) and ('commits' in data)): pkg = data['commits'][0] if (pkg['status'] == 'SUCCESS'): self._commit_data = {'name': pkg['project_name'], 'dist_hash': pkg['distro_hash'], 'commit_hash': pkg['commit_hash'], 'extended_hash': pkg.get('extended_hash')} else: msg = '{0} has a status of error'.format(str(pkg)) self._logger.warning(msg)
6,997,459,630,592,828,000
Fetch the commit data from dlrn
atkinson/dlrn/http_data.py
_fetch_commit
jpichon/atkinson
python
def _fetch_commit(self): '\n \n ' full_url = os.path.join(self.url, self._link_name, 'commit.yaml') data = _fetch_yaml(full_url, self._logger) if ((data is not None) and ('commits' in data)): pkg = data['commits'][0] if (pkg['status'] == 'SUCCESS'): self._commit_data = {'name': pkg['project_name'], 'dist_hash': pkg['distro_hash'], 'commit_hash': pkg['commit_hash'], 'extended_hash': pkg.get('extended_hash')} else: msg = '{0} has a status of error'.format(str(pkg)) self._logger.warning(msg)
def _build_url(self): '\n Generate a url given a commit hash and distgit hash to match the format\n base/AB/CD/ABCD123_XYZ987 where ABCD123 is the commit hash and XYZ987\n is a portion of the distgit hash.\n\n :return: A string with the full URL.\n ' first = self._commit_data['commit_hash'][0:2] second = self._commit_data['commit_hash'][2:4] third = self._commit_data['commit_hash'] for key in ['dist_hash', 'extended_hash']: if (self._commit_data.get(key, 'None') != 'None'): third += ('_' + self._commit_data[key][0:8]) return os.path.join(self.url, first, second, third)
-3,125,452,940,105,935,000
Generate a url given a commit hash and distgit hash to match the format base/AB/CD/ABCD123_XYZ987 where ABCD123 is the commit hash and XYZ987 is a portion of the distgit hash. :return: A string with the full URL.
atkinson/dlrn/http_data.py
_build_url
jpichon/atkinson
python
def _build_url(self): '\n Generate a url given a commit hash and distgit hash to match the format\n base/AB/CD/ABCD123_XYZ987 where ABCD123 is the commit hash and XYZ987\n is a portion of the distgit hash.\n\n :return: A string with the full URL.\n ' first = self._commit_data['commit_hash'][0:2] second = self._commit_data['commit_hash'][2:4] third = self._commit_data['commit_hash'] for key in ['dist_hash', 'extended_hash']: if (self._commit_data.get(key, 'None') != 'None'): third += ('_' + self._commit_data[key][0:8]) return os.path.join(self.url, first, second, third)
@property def commit(self): '\n Get the dlrn commit information\n\n :return: A dictionary of name, dist-git hash, commit hash and\n extended hash.\n An empty dictionary is returned otherwise.\n ' return self._commit_data
-1,729,170,792,126,949,000
Get the dlrn commit information :return: A dictionary of name, dist-git hash, commit hash and extended hash. An empty dictionary is returned otherwise.
atkinson/dlrn/http_data.py
commit
jpichon/atkinson
python
@property def commit(self): '\n Get the dlrn commit information\n\n :return: A dictionary of name, dist-git hash, commit hash and\n extended hash.\n An empty dictionary is returned otherwise.\n ' return self._commit_data
@property def versions(self): '\n Get the version data for the versions.csv file and return the\n data in a dictionary\n\n :return: A dictionary of packages with commit and dist-git hashes\n ' ret_dict = {} full_url = os.path.join(self._build_url(), 'versions.csv') data = _raw_fetch(full_url, self._logger) if (data is not None): data = data.replace(' ', '_') split_data = data.split() reader = csv.DictReader(split_data) for row in reader: ret_dict[row['Project']] = {'source': row['Source_Sha'], 'state': row['Status'], 'distgit': row['Dist_Sha'], 'nvr': row['Pkg_NVR']} else: msg = 'Could not fetch {0}'.format(full_url) self._logger.error(msg) return ret_dict
-7,811,259,190,884,229,000
Get the version data for the versions.csv file and return the data in a dictionary :return: A dictionary of packages with commit and dist-git hashes
atkinson/dlrn/http_data.py
versions
jpichon/atkinson
python
@property def versions(self): '\n Get the version data for the versions.csv file and return the\n data in a dictionary\n\n :return: A dictionary of packages with commit and dist-git hashes\n ' ret_dict = {} full_url = os.path.join(self._build_url(), 'versions.csv') data = _raw_fetch(full_url, self._logger) if (data is not None): data = data.replace(' ', '_') split_data = data.split() reader = csv.DictReader(split_data) for row in reader: ret_dict[row['Project']] = {'source': row['Source_Sha'], 'state': row['Status'], 'distgit': row['Dist_Sha'], 'nvr': row['Pkg_NVR']} else: msg = 'Could not fetch {0}'.format(full_url) self._logger.error(msg) return ret_dict
def compute_train_val_test(X, y, model, scale=False, test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True): "\n Compute the training-validation-test scores for the given model on the given dataset.\n\n The training and test scores are simply computed by splitting the dataset into the training and test sets. The validation\n score is performed applying the cross validation on the training set.\n\n Parameters\n ----------\n X: np.array\n Two-dimensional np.array, containing the explanatory features of the dataset.\n y: np.array\n Mono dimensional np.array, containing the response feature of the dataset.\n model: sklearn.base.BaseEstimator\n Model to evaluate.\n scale: bool\n Indicates whether to scale or not the features in `X`.\n (The scaling is performed using the sklearn MinMaxScaler).\n test_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set.\n time_series: bool\n Indicates if the given dataset is a time series dataset (i.e. datasets indexed by days).\n (This affects the computing of the scores).\n random_state: int\n Used in the training-test splitting of the dataset.\n n_folds: int\n Indicates how many folds are made in order to compute the k-fold cross validation.\n (It's used only if `time_series` is False).\n regr: bool\n Indicates if it's either a regression or a classification problem.\n\n Returns\n ----------\n train_score: float\n val_score: float\n test_score: float\n\n Notes\n ----------\n - If `regr` is True, the returned scores are errors, computed using the MSE formula (i.e. Mean Squared Error).\n Otherwise, the returned scores are accuracy measures.\n - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\n Otherwise, if `time_series` is True, the training-test sets are obtained simply by splitting the dataset into two\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\n " if regr: scoring = 'neg_mean_squared_error' else: scoring = 'accuracy' if (not time_series): (X_train_80, X_test, y_train_80, y_test) = train_test_split(X, y, test_size=test_size, random_state=random_state) else: train_len = int((X.shape[0] * (1 - test_size))) X_train_80 = X[:train_len] y_train_80 = y[:train_len] X_test = X[train_len:] y_test = y[train_len:] if scale: scaler = MinMaxScaler() scaler.fit(X_train_80) X_train_80 = scaler.transform(X_train_80) X_test = scaler.transform(X_test) if (not time_series): cv = n_folds else: cv = TimeSeriesSplit(n_splits=n_folds) scores = cross_val_score(model, X_train_80, y_train_80, cv=cv, scoring=scoring) val_score = scores.mean() if regr: val_score = (- val_score) model.fit(X_train_80, y_train_80) train_score = 0 test_score = 0 if regr: train_score = mean_squared_error(y_true=y_train_80, y_pred=model.predict(X_train_80)) test_score = mean_squared_error(y_true=y_test, y_pred=model.predict(X_test)) else: train_score = accuracy_score(y_true=y_train_80, y_pred=model.predict(X_train_80)) test_score = accuracy_score(y_true=y_test, y_pred=model.predict(X_test)) return (train_score, val_score, test_score)
-5,066,045,042,697,431,000
Compute the training-validation-test scores for the given model on the given dataset. The training and test scores are simply computed by splitting the dataset into the training and test sets. The validation score is performed applying the cross validation on the training set. Parameters ---------- X: np.array Two-dimensional np.array, containing the explanatory features of the dataset. y: np.array Mono dimensional np.array, containing the response feature of the dataset. model: sklearn.base.BaseEstimator Model to evaluate. scale: bool Indicates whether to scale or not the features in `X`. (The scaling is performed using the sklearn MinMaxScaler). test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set. time_series: bool Indicates if the given dataset is a time series dataset (i.e. datasets indexed by days). (This affects the computing of the scores). random_state: int Used in the training-test splitting of the dataset. n_folds: int Indicates how many folds are made in order to compute the k-fold cross validation. (It's used only if `time_series` is False). regr: bool Indicates if it's either a regression or a classification problem. Returns ---------- train_score: float val_score: float test_score: float Notes ---------- - If `regr` is True, the returned scores are errors, computed using the MSE formula (i.e. Mean Squared Error). Otherwise, the returned scores are accuracy measures. - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`. Otherwise, if `time_series` is True, the training-test sets are obtained simply by splitting the dataset into two contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.
model_selection.py
compute_train_val_test
EnricoPittini/model-selection
python
def compute_train_val_test(X, y, model, scale=False, test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True): "\n Compute the training-validation-test scores for the given model on the given dataset.\n\n The training and test scores are simply computed by splitting the dataset into the training and test sets. The validation\n score is performed applying the cross validation on the training set.\n\n Parameters\n ----------\n X: np.array\n Two-dimensional np.array, containing the explanatory features of the dataset.\n y: np.array\n Mono dimensional np.array, containing the response feature of the dataset.\n model: sklearn.base.BaseEstimator\n Model to evaluate.\n scale: bool\n Indicates whether to scale or not the features in `X`.\n (The scaling is performed using the sklearn MinMaxScaler).\n test_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set.\n time_series: bool\n Indicates if the given dataset is a time series dataset (i.e. datasets indexed by days).\n (This affects the computing of the scores).\n random_state: int\n Used in the training-test splitting of the dataset.\n n_folds: int\n Indicates how many folds are made in order to compute the k-fold cross validation.\n (It's used only if `time_series` is False).\n regr: bool\n Indicates if it's either a regression or a classification problem.\n\n Returns\n ----------\n train_score: float\n val_score: float\n test_score: float\n\n Notes\n ----------\n - If `regr` is True, the returned scores are errors, computed using the MSE formula (i.e. Mean Squared Error).\n Otherwise, the returned scores are accuracy measures.\n - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\n Otherwise, if `time_series` is True, the training-test sets are obtained simply by splitting the dataset into two\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\n " if regr: scoring = 'neg_mean_squared_error' else: scoring = 'accuracy' if (not time_series): (X_train_80, X_test, y_train_80, y_test) = train_test_split(X, y, test_size=test_size, random_state=random_state) else: train_len = int((X.shape[0] * (1 - test_size))) X_train_80 = X[:train_len] y_train_80 = y[:train_len] X_test = X[train_len:] y_test = y[train_len:] if scale: scaler = MinMaxScaler() scaler.fit(X_train_80) X_train_80 = scaler.transform(X_train_80) X_test = scaler.transform(X_test) if (not time_series): cv = n_folds else: cv = TimeSeriesSplit(n_splits=n_folds) scores = cross_val_score(model, X_train_80, y_train_80, cv=cv, scoring=scoring) val_score = scores.mean() if regr: val_score = (- val_score) model.fit(X_train_80, y_train_80) train_score = 0 test_score = 0 if regr: train_score = mean_squared_error(y_true=y_train_80, y_pred=model.predict(X_train_80)) test_score = mean_squared_error(y_true=y_test, y_pred=model.predict(X_test)) else: train_score = accuracy_score(y_true=y_train_80, y_pred=model.predict(X_train_80)) test_score = accuracy_score(y_true=y_test, y_pred=model.predict(X_test)) return (train_score, val_score, test_score)
def compute_bias_variance_error(X, y, model, scale=False, N_TESTS=20, sample_size=0.67): '\n Compute the bias^2-variance-error scores for the given model on the given dataset.\n\n These measures are computed in an approximate way, using `N_TESTS` random samples of size `sample_size` from the\n dataset.\n\n Parameters\n ----------\n X: np.array\n Two-dimensional np.array, containing the explanatory features of the dataset.\n y: np.array\n Mono dimensional np.array, containing the response feature of the dataset.\n model: sklearn.base.BaseEstimator\n Model to evaluate.\n scale: bool\n Indicates whether to scale or not the features in `X`.\n (The scaling is performed using the sklearn MinMaxScaler).\n N_TESTS: int\n Number of samples that are made in order to compute the measures.\n sample_size: float\n Decimal number between 0 and 1, which indicates the proportion of the sample.\n\n Returns\n ----------\n bias: float\n variance: float\n error: float\n ' if scale: scaler = MinMaxScaler() scaler.fit(X) X = scaler.transform(X) vector_ypred = [] for i in range(N_TESTS): (Xs, ys) = resample(X, y, n_samples=int((sample_size * len(y)))) model.fit(Xs, ys) vector_ypred.append(list(model.predict(X))) vector_ypred = np.array(vector_ypred) vector_bias = ((y - np.mean(vector_ypred, axis=0)) ** 2) vector_variance = np.var(vector_ypred, axis=0) vector_error = (np.sum(((vector_ypred - y) ** 2), axis=0) / N_TESTS) bias = np.mean(vector_bias) variance = np.mean(vector_variance) error = np.mean(vector_error) return (bias, variance, error)
1,135,176,463,303,326,600
Compute the bias^2-variance-error scores for the given model on the given dataset. These measures are computed in an approximate way, using `N_TESTS` random samples of size `sample_size` from the dataset. Parameters ---------- X: np.array Two-dimensional np.array, containing the explanatory features of the dataset. y: np.array Mono dimensional np.array, containing the response feature of the dataset. model: sklearn.base.BaseEstimator Model to evaluate. scale: bool Indicates whether to scale or not the features in `X`. (The scaling is performed using the sklearn MinMaxScaler). N_TESTS: int Number of samples that are made in order to compute the measures. sample_size: float Decimal number between 0 and 1, which indicates the proportion of the sample. Returns ---------- bias: float variance: float error: float
model_selection.py
compute_bias_variance_error
EnricoPittini/model-selection
python
def compute_bias_variance_error(X, y, model, scale=False, N_TESTS=20, sample_size=0.67): '\n Compute the bias^2-variance-error scores for the given model on the given dataset.\n\n These measures are computed in an approximate way, using `N_TESTS` random samples of size `sample_size` from the\n dataset.\n\n Parameters\n ----------\n X: np.array\n Two-dimensional np.array, containing the explanatory features of the dataset.\n y: np.array\n Mono dimensional np.array, containing the response feature of the dataset.\n model: sklearn.base.BaseEstimator\n Model to evaluate.\n scale: bool\n Indicates whether to scale or not the features in `X`.\n (The scaling is performed using the sklearn MinMaxScaler).\n N_TESTS: int\n Number of samples that are made in order to compute the measures.\n sample_size: float\n Decimal number between 0 and 1, which indicates the proportion of the sample.\n\n Returns\n ----------\n bias: float\n variance: float\n error: float\n ' if scale: scaler = MinMaxScaler() scaler.fit(X) X = scaler.transform(X) vector_ypred = [] for i in range(N_TESTS): (Xs, ys) = resample(X, y, n_samples=int((sample_size * len(y)))) model.fit(Xs, ys) vector_ypred.append(list(model.predict(X))) vector_ypred = np.array(vector_ypred) vector_bias = ((y - np.mean(vector_ypred, axis=0)) ** 2) vector_variance = np.var(vector_ypred, axis=0) vector_error = (np.sum(((vector_ypred - y) ** 2), axis=0) / N_TESTS) bias = np.mean(vector_bias) variance = np.mean(vector_variance) error = np.mean(vector_error) return (bias, variance, error)
def plot_predictions(X, y, model, scale=False, test_size=0.2, plot_type=0, xvalues=None, xlabel='Index', title='Actual vs Predicted values', figsize=(6, 6)): "\n Plot the predictions made by the given model on the given dataset, versus its actual values.\n\n The dataset is split into training-test sets: the former is used to train the `model`, on the latter the predictions are\n made.\n\n Parameters\n ----------\n X: np.array\n Two-dimensional np.array, containing the explanatory features of the dataset.\n y: np.array\n Mono dimensional np.array, containing the response feature of the dataset.\n model: sklearn.base.BaseEstimator\n Model used to make the predictions.\n scale: bool\n Indicates whether to scale or not the features in `X`.\n (The scaling is performed using the sklearn MinMaxScaler).\n test_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set.\n plot_type: int\n Indicates the type of the plot.\n - 0 -> In the same plot two different curves are drawn: the first has on the x axis `xvalues` and on the y axis\n the actual values (i.e. `y`); the second has on the x axis `xvalues` and on the y axis the computed\n predicted values.\n - 1 -> On the x axis the actual values are put, on the y axis the predicted ones.\n xvalues: list (in general, iterable)\n Values that have to be put in the x axis of the plot.\n (It's used only if `plot_type` is 0).\n xlabel: str\n Label of the x axis of the plot.\n (It's used only if `plot_type` is 0).\n title: str\n Title of the plot.\n figsize: tuple\n Two dimensions of the plot.\n\n Returns\n ----------\n matplotlib.axes.Axes\n The matplotlib Axes where the plot has been made.\n\n Notes\n ----------\n The splitting of the datasets into the training-test sets is simply made by dividing the dataset into two contiguous\n sequences.\n I.e. it is the same technique used usually when the dataset is a time series dataset. (This is done in order to simplify\n the visualization).\n For this reason, typically this function is applied on time series datasets.\n " train_len = int((X.shape[0] * (1 - test_size))) X_train_80 = X[:train_len] y_train_80 = y[:train_len] X_test = X[train_len:] y_test = y[train_len:] if scale: scaler = MinMaxScaler() scaler.fit(X_train_80) X_train_80 = scaler.transform(X_train_80) X_test = scaler.transform(X_test) model.fit(X_train_80, y_train_80) predictions = model.predict(X_test) (fig, ax) = plt.subplots(figsize=figsize) if (plot_type == 0): if (xvalues is None): xvalues = range(len(X)) ax.plot(xvalues, y, 'o:', label='actual values') ax.plot(xvalues[train_len:], predictions, 'o:', label='predicted values') ax.legend() elif (plot_type == 1): ax.plot(y[train_len:], predictions, 'o') ax.plot([0, 1], [0, 1], 'r-', transform=ax.transAxes) xlabel = 'Actual values' ax.set_ylabel('Predicted values') ax.set_xlabel(xlabel) ax.set_title(title) ax.grid() return ax
6,549,853,644,879,781,000
Plot the predictions made by the given model on the given dataset, versus its actual values. The dataset is split into training-test sets: the former is used to train the `model`, on the latter the predictions are made. Parameters ---------- X: np.array Two-dimensional np.array, containing the explanatory features of the dataset. y: np.array Mono dimensional np.array, containing the response feature of the dataset. model: sklearn.base.BaseEstimator Model used to make the predictions. scale: bool Indicates whether to scale or not the features in `X`. (The scaling is performed using the sklearn MinMaxScaler). test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set. plot_type: int Indicates the type of the plot. - 0 -> In the same plot two different curves are drawn: the first has on the x axis `xvalues` and on the y axis the actual values (i.e. `y`); the second has on the x axis `xvalues` and on the y axis the computed predicted values. - 1 -> On the x axis the actual values are put, on the y axis the predicted ones. xvalues: list (in general, iterable) Values that have to be put in the x axis of the plot. (It's used only if `plot_type` is 0). xlabel: str Label of the x axis of the plot. (It's used only if `plot_type` is 0). title: str Title of the plot. figsize: tuple Two dimensions of the plot. Returns ---------- matplotlib.axes.Axes The matplotlib Axes where the plot has been made. Notes ---------- The splitting of the datasets into the training-test sets is simply made by dividing the dataset into two contiguous sequences. I.e. it is the same technique used usually when the dataset is a time series dataset. (This is done in order to simplify the visualization). For this reason, typically this function is applied on time series datasets.
model_selection.py
plot_predictions
EnricoPittini/model-selection
python
def plot_predictions(X, y, model, scale=False, test_size=0.2, plot_type=0, xvalues=None, xlabel='Index', title='Actual vs Predicted values', figsize=(6, 6)): "\n Plot the predictions made by the given model on the given dataset, versus its actual values.\n\n The dataset is split into training-test sets: the former is used to train the `model`, on the latter the predictions are\n made.\n\n Parameters\n ----------\n X: np.array\n Two-dimensional np.array, containing the explanatory features of the dataset.\n y: np.array\n Mono dimensional np.array, containing the response feature of the dataset.\n model: sklearn.base.BaseEstimator\n Model used to make the predictions.\n scale: bool\n Indicates whether to scale or not the features in `X`.\n (The scaling is performed using the sklearn MinMaxScaler).\n test_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set.\n plot_type: int\n Indicates the type of the plot.\n - 0 -> In the same plot two different curves are drawn: the first has on the x axis `xvalues` and on the y axis\n the actual values (i.e. `y`); the second has on the x axis `xvalues` and on the y axis the computed\n predicted values.\n - 1 -> On the x axis the actual values are put, on the y axis the predicted ones.\n xvalues: list (in general, iterable)\n Values that have to be put in the x axis of the plot.\n (It's used only if `plot_type` is 0).\n xlabel: str\n Label of the x axis of the plot.\n (It's used only if `plot_type` is 0).\n title: str\n Title of the plot.\n figsize: tuple\n Two dimensions of the plot.\n\n Returns\n ----------\n matplotlib.axes.Axes\n The matplotlib Axes where the plot has been made.\n\n Notes\n ----------\n The splitting of the datasets into the training-test sets is simply made by dividing the dataset into two contiguous\n sequences.\n I.e. it is the same technique used usually when the dataset is a time series dataset. (This is done in order to simplify\n the visualization).\n For this reason, typically this function is applied on time series datasets.\n " train_len = int((X.shape[0] * (1 - test_size))) X_train_80 = X[:train_len] y_train_80 = y[:train_len] X_test = X[train_len:] y_test = y[train_len:] if scale: scaler = MinMaxScaler() scaler.fit(X_train_80) X_train_80 = scaler.transform(X_train_80) X_test = scaler.transform(X_test) model.fit(X_train_80, y_train_80) predictions = model.predict(X_test) (fig, ax) = plt.subplots(figsize=figsize) if (plot_type == 0): if (xvalues is None): xvalues = range(len(X)) ax.plot(xvalues, y, 'o:', label='actual values') ax.plot(xvalues[train_len:], predictions, 'o:', label='predicted values') ax.legend() elif (plot_type == 1): ax.plot(y[train_len:], predictions, 'o') ax.plot([0, 1], [0, 1], 'r-', transform=ax.transAxes) xlabel = 'Actual values' ax.set_ylabel('Predicted values') ax.set_xlabel(xlabel) ax.set_title(title) ax.grid() return ax
def _plot_TrainVal_values(xvalues, train_val_scores, plot_train, xlabel, title, figsize=(6, 6), bar=False): "\n Plot the given list of training-validation scores.\n\n This function is an auxiliary function for the model selection functions. It's meant to be private in the\n module.\n\n Parameters\n ----------\n xvalues: list (in general iterable)\n Values to put in the x axis of the plot.\n train_val_scores: np.array\n Two dimensional np.array, containing two columns: the first contains the trainining scores, the second the validation\n scores.\n Basically, it is a list of training-validation scores.\n plot_train: bool\n Indicates whether to plot also the training scores or to plot only the validation ones.\n xlabel: str\n Label of the x axis.\n title: str\n Title of the plot.\n figsize: tuple\n Two dimensions of the plot.\n bar: bool\n Indicates whether to plot the scores using bars or using points.\n If `bar` it's True, `xvalues` must contain string (i.e. labels).\n Returns\n ----------\n matplotlib.axes.Axes\n The matplotlib Axes where the plot has been made.\n " (fig, ax) = plt.subplots(figsize=figsize) if (not bar): if plot_train: ax.plot(xvalues, train_val_scores[:, 0], 'o:', label='Train') ax.plot(xvalues, train_val_scores[:, 1], 'o:', label='Validation') elif plot_train: x = np.arange(len(xvalues)) width = 0.35 ax.bar((x - (width / 2)), train_val_scores[:, 0], width=width, label='Train') ax.bar((x + (width / 2)), train_val_scores[:, 1], width=width, label='Validation') ax.set_xticks(x) ax.set_xticklabels(xvalues) else: ax.bar(xvalues, train_val_scores[:, 1], label='Validation') ax.set_xlabel(xlabel) ax.set_title(title) ax.grid() ax.legend() return ax
-2,627,312,043,539,120,600
Plot the given list of training-validation scores. This function is an auxiliary function for the model selection functions. It's meant to be private in the module. Parameters ---------- xvalues: list (in general iterable) Values to put in the x axis of the plot. train_val_scores: np.array Two dimensional np.array, containing two columns: the first contains the trainining scores, the second the validation scores. Basically, it is a list of training-validation scores. plot_train: bool Indicates whether to plot also the training scores or to plot only the validation ones. xlabel: str Label of the x axis. title: str Title of the plot. figsize: tuple Two dimensions of the plot. bar: bool Indicates whether to plot the scores using bars or using points. If `bar` it's True, `xvalues` must contain string (i.e. labels). Returns ---------- matplotlib.axes.Axes The matplotlib Axes where the plot has been made.
model_selection.py
_plot_TrainVal_values
EnricoPittini/model-selection
python
def _plot_TrainVal_values(xvalues, train_val_scores, plot_train, xlabel, title, figsize=(6, 6), bar=False): "\n Plot the given list of training-validation scores.\n\n This function is an auxiliary function for the model selection functions. It's meant to be private in the\n module.\n\n Parameters\n ----------\n xvalues: list (in general iterable)\n Values to put in the x axis of the plot.\n train_val_scores: np.array\n Two dimensional np.array, containing two columns: the first contains the trainining scores, the second the validation\n scores.\n Basically, it is a list of training-validation scores.\n plot_train: bool\n Indicates whether to plot also the training scores or to plot only the validation ones.\n xlabel: str\n Label of the x axis.\n title: str\n Title of the plot.\n figsize: tuple\n Two dimensions of the plot.\n bar: bool\n Indicates whether to plot the scores using bars or using points.\n If `bar` it's True, `xvalues` must contain string (i.e. labels).\n Returns\n ----------\n matplotlib.axes.Axes\n The matplotlib Axes where the plot has been made.\n " (fig, ax) = plt.subplots(figsize=figsize) if (not bar): if plot_train: ax.plot(xvalues, train_val_scores[:, 0], 'o:', label='Train') ax.plot(xvalues, train_val_scores[:, 1], 'o:', label='Validation') elif plot_train: x = np.arange(len(xvalues)) width = 0.35 ax.bar((x - (width / 2)), train_val_scores[:, 0], width=width, label='Train') ax.bar((x + (width / 2)), train_val_scores[:, 1], width=width, label='Validation') ax.set_xticks(x) ax.set_xticklabels(xvalues) else: ax.bar(xvalues, train_val_scores[:, 1], label='Validation') ax.set_xlabel(xlabel) ax.set_title(title) ax.grid() ax.legend() return ax
def hyperparameter_validation(X, y, model, hyperparameter, hyperparameter_values, scale=False, test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None, xlabel=None, title='Hyperparameter validation', figsize=(6, 6)): "\n Select the best value for the specified hyperparameter of the specified model on the given dataset.\n\n In other words, perform the tuning of the `hyperparameter` among the values in `hyperparameter_values`.\n\n This selection is made using the validation score (i.e. the best hyperparameter value is the one with the best validation\n score).\n The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross\n validation on the training set.\n Additionally, the training and test scores are also computed.\n\n Optionally, the validation scores of the `hyperparameter_values` can be plotted, making a graphical visualization of the\n selection.\n\n Parameters\n ----------\n X: np.array\n Two-dimensional np.array, containing the explanatory features of the dataset.\n y: np.array\n Mono dimensional np.array, containing the response feature of the dataset.\n model: sklearn.base.BaseEstimator\n Model which has the specified `hyperparameter`.\n hyperparameter: str\n The name of the hyperparameter that has to be validated.\n hyperparameter_values: list\n List of values for `hyperparameter` that have to be taken into account in the selection.\n scale: bool\n Indicates whether to scale or not the features in `X`.\n (The scaling is performed using the sklearn MinMaxScaler).\n test_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set.\n time_series: bool\n Indicates if the given dataset is a time series dataset (i.e. dataset indexed by days).\n (This affects the computing of the validation score).\n random_state: int\n Used in the training-test splitting of the dataset.\n n_folds: int\n Indicates how many folds are made in order to compute the k-fold cross validation.\n (It's used only if `time_series` is False).\n regr: bool\n Indicates if it's either a regression or a classification problem.\n plot: bool\n Indicates whether to plot or not the validation score values.\n plot_train: bool\n Indicates whether to plot also the training scores.\n (It's considered only if `plot` is True).\n xvalues: list (in general, iterable)\n Values that have to be put in the x axis of the plot.\n xlabel: str\n Label of the x axis of the plot.\n title: str\n Title of the plot.\n figsize: tuple\n Two dimensions of the plot.\n\n Returns\n ----------\n train_val_scores: np.array\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\n scores.\n It has as many rows as the number of values in `hyperparameter_values` (i.e. number of values to be tested).\n best_index: int\n Index of `hyperparameter_values` that indicates which is the best hyperparameter value.\n test_score: float\n Test score associated with the best hyperparameter value.\n ax: matplotlib.axes.Axes\n The matplotlib Axes where the plot has been made.\n If `plot` is False, then it is None.\n\n Notes\n ----------\n - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\n hyperparameter value is the one associated with the minimum validation score.\n Otherwise, the validation scores are accuracies: this means that the best hyperparameter value is the one associated\n with the maximum validation score.\n - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\n " param_grid = {hyperparameter: hyperparameter_values} (params, train_val_scores, best_index, test_score) = hyperparameters_validation(X, y, model, param_grid, scale=scale, test_size=test_size, time_series=time_series, random_state=random_state, n_folds=n_folds, regr=regr) ax = None if plot: if (not xvalues): xvalues = hyperparameter_values if (not xlabel): xlabel = hyperparameter ax = _plot_TrainVal_values(xvalues, train_val_scores, plot_train, xlabel, title, figsize) return (train_val_scores, best_index, test_score, ax)
-3,417,247,821,585,341,400
Select the best value for the specified hyperparameter of the specified model on the given dataset. In other words, perform the tuning of the `hyperparameter` among the values in `hyperparameter_values`. This selection is made using the validation score (i.e. the best hyperparameter value is the one with the best validation score). The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross validation on the training set. Additionally, the training and test scores are also computed. Optionally, the validation scores of the `hyperparameter_values` can be plotted, making a graphical visualization of the selection. Parameters ---------- X: np.array Two-dimensional np.array, containing the explanatory features of the dataset. y: np.array Mono dimensional np.array, containing the response feature of the dataset. model: sklearn.base.BaseEstimator Model which has the specified `hyperparameter`. hyperparameter: str The name of the hyperparameter that has to be validated. hyperparameter_values: list List of values for `hyperparameter` that have to be taken into account in the selection. scale: bool Indicates whether to scale or not the features in `X`. (The scaling is performed using the sklearn MinMaxScaler). test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set. time_series: bool Indicates if the given dataset is a time series dataset (i.e. dataset indexed by days). (This affects the computing of the validation score). random_state: int Used in the training-test splitting of the dataset. n_folds: int Indicates how many folds are made in order to compute the k-fold cross validation. (It's used only if `time_series` is False). regr: bool Indicates if it's either a regression or a classification problem. plot: bool Indicates whether to plot or not the validation score values. plot_train: bool Indicates whether to plot also the training scores. (It's considered only if `plot` is True). xvalues: list (in general, iterable) Values that have to be put in the x axis of the plot. xlabel: str Label of the x axis of the plot. title: str Title of the plot. figsize: tuple Two dimensions of the plot. Returns ---------- train_val_scores: np.array Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation scores. It has as many rows as the number of values in `hyperparameter_values` (i.e. number of values to be tested). best_index: int Index of `hyperparameter_values` that indicates which is the best hyperparameter value. test_score: float Test score associated with the best hyperparameter value. ax: matplotlib.axes.Axes The matplotlib Axes where the plot has been made. If `plot` is False, then it is None. Notes ---------- - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best hyperparameter value is the one associated with the minimum validation score. Otherwise, the validation scores are accuracies: this means that the best hyperparameter value is the one associated with the maximum validation score. - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`. Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.
model_selection.py
hyperparameter_validation
EnricoPittini/model-selection
python
def hyperparameter_validation(X, y, model, hyperparameter, hyperparameter_values, scale=False, test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None, xlabel=None, title='Hyperparameter validation', figsize=(6, 6)): "\n Select the best value for the specified hyperparameter of the specified model on the given dataset.\n\n In other words, perform the tuning of the `hyperparameter` among the values in `hyperparameter_values`.\n\n This selection is made using the validation score (i.e. the best hyperparameter value is the one with the best validation\n score).\n The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross\n validation on the training set.\n Additionally, the training and test scores are also computed.\n\n Optionally, the validation scores of the `hyperparameter_values` can be plotted, making a graphical visualization of the\n selection.\n\n Parameters\n ----------\n X: np.array\n Two-dimensional np.array, containing the explanatory features of the dataset.\n y: np.array\n Mono dimensional np.array, containing the response feature of the dataset.\n model: sklearn.base.BaseEstimator\n Model which has the specified `hyperparameter`.\n hyperparameter: str\n The name of the hyperparameter that has to be validated.\n hyperparameter_values: list\n List of values for `hyperparameter` that have to be taken into account in the selection.\n scale: bool\n Indicates whether to scale or not the features in `X`.\n (The scaling is performed using the sklearn MinMaxScaler).\n test_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set.\n time_series: bool\n Indicates if the given dataset is a time series dataset (i.e. dataset indexed by days).\n (This affects the computing of the validation score).\n random_state: int\n Used in the training-test splitting of the dataset.\n n_folds: int\n Indicates how many folds are made in order to compute the k-fold cross validation.\n (It's used only if `time_series` is False).\n regr: bool\n Indicates if it's either a regression or a classification problem.\n plot: bool\n Indicates whether to plot or not the validation score values.\n plot_train: bool\n Indicates whether to plot also the training scores.\n (It's considered only if `plot` is True).\n xvalues: list (in general, iterable)\n Values that have to be put in the x axis of the plot.\n xlabel: str\n Label of the x axis of the plot.\n title: str\n Title of the plot.\n figsize: tuple\n Two dimensions of the plot.\n\n Returns\n ----------\n train_val_scores: np.array\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\n scores.\n It has as many rows as the number of values in `hyperparameter_values` (i.e. number of values to be tested).\n best_index: int\n Index of `hyperparameter_values` that indicates which is the best hyperparameter value.\n test_score: float\n Test score associated with the best hyperparameter value.\n ax: matplotlib.axes.Axes\n The matplotlib Axes where the plot has been made.\n If `plot` is False, then it is None.\n\n Notes\n ----------\n - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\n hyperparameter value is the one associated with the minimum validation score.\n Otherwise, the validation scores are accuracies: this means that the best hyperparameter value is the one associated\n with the maximum validation score.\n - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\n " param_grid = {hyperparameter: hyperparameter_values} (params, train_val_scores, best_index, test_score) = hyperparameters_validation(X, y, model, param_grid, scale=scale, test_size=test_size, time_series=time_series, random_state=random_state, n_folds=n_folds, regr=regr) ax = None if plot: if (not xvalues): xvalues = hyperparameter_values if (not xlabel): xlabel = hyperparameter ax = _plot_TrainVal_values(xvalues, train_val_scores, plot_train, xlabel, title, figsize) return (train_val_scores, best_index, test_score, ax)
def hyperparameters_validation(X, y, model, param_grid, scale=False, test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True): "\n Select the best combination of values for the specified hyperparameters of the specified model on the given dataset.\n\n In other words, perform the tuning of multiple hyperparameters.\n The parameter `param_grid` is a dictionary that indicates which are the specified hyperparameters and what are the\n associated values to test.\n\n All the possible combinations of values are tested, in an exhaustive way (i.e. grid search).\n\n This selection is made using the validation score (i.e. the best combination of hyperparameters values is the one with\n the best validation score).\n The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross\n validation on the training set.\n Additionally, the training and test scores are also computed.\n\n Parameters\n ----------\n X: np.array\n Two-dimensional np.array, containing the explanatory features of the dataset.\n y: np.array\n Mono dimensional np.array, containing the response feature of the dataset.\n model: sklearn.base.BaseEstimator\n Model which has the specified hyperparameters.\n param_grid: dict\n Dictionary which has as keys the names of the specified hyperparameters and as values the associated list of\n values to test.\n scale: bool\n Indicates whether to scale or not the features in `X`.\n (The scaling is performed using the sklearn MinMaxScaler).\n test_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set.\n time_series: bool\n Indicates if the given dataset is a time series dataset (i.e. dataframe indexed by days).\n (This affects the computing of the validation score).\n random_state: int\n Used in the training-test splitting of the dataset.\n n_folds: int\n Indicates how many folds are made in order to compute the k-fold cross validation.\n (It's used only if `time_series` is False).\n regr: bool\n Indicates if it's either a regression or a classification problem.\n\n Returns\n ----------\n params: list\n List which enumerates all the possible combinations of hyperparameters values.\n It's a list of dictionaries: each dictionary represents a specific combination of hyperparameters values. (It's a\n dictionary which has as keys the hyperparameters names and as values the specific associated values of that combination).\n train_val_scores: np.array\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\n scores.\n It has as many rows as the number of possible combinations of the hyperparameters values.\n (It has as many rows as the elements of `params`).\n best_index: int\n Index of `params` that indicates which is the best combination of hyperparameters values.\n test_score: float\n Test score associated with the best combination of hyperparameters values.\n\n Notes\n ----------\n - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\n combination of hyperparameters values is the one associated with the minimum validation score.\n Otherwise, the validation scores are accuracies: this means that the best combination of hyperparameters values is the\n one associated with the maximum validation score.\n - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\n " if regr: scoring = 'neg_mean_squared_error' else: scoring = 'accuracy' if (not time_series): (X_train_80, X_test, y_train_80, y_test) = train_test_split(X, y, test_size=test_size, random_state=random_state) else: train_len = int((X.shape[0] * (1 - test_size))) X_train_80 = X[:train_len] y_train_80 = y[:train_len] X_test = X[train_len:] y_test = y[train_len:] if scale: scaler = MinMaxScaler() scaler.fit(X_train_80) X_train_80 = scaler.transform(X_train_80) X_test = scaler.transform(X_test) if (not time_series): cv = n_folds else: cv = TimeSeriesSplit(n_splits=n_folds) grid_search = GridSearchCV(model, param_grid, scoring=scoring, cv=cv, return_train_score=True) grid_search.fit(X_train_80, y_train_80) params = grid_search.cv_results_['params'] train_scores = grid_search.cv_results_['mean_train_score'] val_scores = grid_search.cv_results_['mean_test_score'] best_index = grid_search.best_index_ best_model = grid_search.best_estimator_ if regr: train_scores = (train_scores * (- 1)) val_scores = (val_scores * (- 1)) train_val_scores = np.concatenate((train_scores.reshape((- 1), 1), val_scores.reshape((- 1), 1)), axis=1) best_model.fit(X_train_80, y_train_80) test_score = 0 if regr: test_score = mean_squared_error(y_true=y_test, y_pred=best_model.predict(X_test)) else: test_score = accuracy_score(y_true=y_test, y_pred=best_model.predict(X_test)) return (params, train_val_scores, best_index, test_score)
-5,705,085,024,780,375,000
Select the best combination of values for the specified hyperparameters of the specified model on the given dataset. In other words, perform the tuning of multiple hyperparameters. The parameter `param_grid` is a dictionary that indicates which are the specified hyperparameters and what are the associated values to test. All the possible combinations of values are tested, in an exhaustive way (i.e. grid search). This selection is made using the validation score (i.e. the best combination of hyperparameters values is the one with the best validation score). The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross validation on the training set. Additionally, the training and test scores are also computed. Parameters ---------- X: np.array Two-dimensional np.array, containing the explanatory features of the dataset. y: np.array Mono dimensional np.array, containing the response feature of the dataset. model: sklearn.base.BaseEstimator Model which has the specified hyperparameters. param_grid: dict Dictionary which has as keys the names of the specified hyperparameters and as values the associated list of values to test. scale: bool Indicates whether to scale or not the features in `X`. (The scaling is performed using the sklearn MinMaxScaler). test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set. time_series: bool Indicates if the given dataset is a time series dataset (i.e. dataframe indexed by days). (This affects the computing of the validation score). random_state: int Used in the training-test splitting of the dataset. n_folds: int Indicates how many folds are made in order to compute the k-fold cross validation. (It's used only if `time_series` is False). regr: bool Indicates if it's either a regression or a classification problem. Returns ---------- params: list List which enumerates all the possible combinations of hyperparameters values. It's a list of dictionaries: each dictionary represents a specific combination of hyperparameters values. (It's a dictionary which has as keys the hyperparameters names and as values the specific associated values of that combination). train_val_scores: np.array Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation scores. It has as many rows as the number of possible combinations of the hyperparameters values. (It has as many rows as the elements of `params`). best_index: int Index of `params` that indicates which is the best combination of hyperparameters values. test_score: float Test score associated with the best combination of hyperparameters values. Notes ---------- - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best combination of hyperparameters values is the one associated with the minimum validation score. Otherwise, the validation scores are accuracies: this means that the best combination of hyperparameters values is the one associated with the maximum validation score. - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`. Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.
model_selection.py
hyperparameters_validation
EnricoPittini/model-selection
python
def hyperparameters_validation(X, y, model, param_grid, scale=False, test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True): "\n Select the best combination of values for the specified hyperparameters of the specified model on the given dataset.\n\n In other words, perform the tuning of multiple hyperparameters.\n The parameter `param_grid` is a dictionary that indicates which are the specified hyperparameters and what are the\n associated values to test.\n\n All the possible combinations of values are tested, in an exhaustive way (i.e. grid search).\n\n This selection is made using the validation score (i.e. the best combination of hyperparameters values is the one with\n the best validation score).\n The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross\n validation on the training set.\n Additionally, the training and test scores are also computed.\n\n Parameters\n ----------\n X: np.array\n Two-dimensional np.array, containing the explanatory features of the dataset.\n y: np.array\n Mono dimensional np.array, containing the response feature of the dataset.\n model: sklearn.base.BaseEstimator\n Model which has the specified hyperparameters.\n param_grid: dict\n Dictionary which has as keys the names of the specified hyperparameters and as values the associated list of\n values to test.\n scale: bool\n Indicates whether to scale or not the features in `X`.\n (The scaling is performed using the sklearn MinMaxScaler).\n test_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set.\n time_series: bool\n Indicates if the given dataset is a time series dataset (i.e. dataframe indexed by days).\n (This affects the computing of the validation score).\n random_state: int\n Used in the training-test splitting of the dataset.\n n_folds: int\n Indicates how many folds are made in order to compute the k-fold cross validation.\n (It's used only if `time_series` is False).\n regr: bool\n Indicates if it's either a regression or a classification problem.\n\n Returns\n ----------\n params: list\n List which enumerates all the possible combinations of hyperparameters values.\n It's a list of dictionaries: each dictionary represents a specific combination of hyperparameters values. (It's a\n dictionary which has as keys the hyperparameters names and as values the specific associated values of that combination).\n train_val_scores: np.array\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\n scores.\n It has as many rows as the number of possible combinations of the hyperparameters values.\n (It has as many rows as the elements of `params`).\n best_index: int\n Index of `params` that indicates which is the best combination of hyperparameters values.\n test_score: float\n Test score associated with the best combination of hyperparameters values.\n\n Notes\n ----------\n - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\n combination of hyperparameters values is the one associated with the minimum validation score.\n Otherwise, the validation scores are accuracies: this means that the best combination of hyperparameters values is the\n one associated with the maximum validation score.\n - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\n " if regr: scoring = 'neg_mean_squared_error' else: scoring = 'accuracy' if (not time_series): (X_train_80, X_test, y_train_80, y_test) = train_test_split(X, y, test_size=test_size, random_state=random_state) else: train_len = int((X.shape[0] * (1 - test_size))) X_train_80 = X[:train_len] y_train_80 = y[:train_len] X_test = X[train_len:] y_test = y[train_len:] if scale: scaler = MinMaxScaler() scaler.fit(X_train_80) X_train_80 = scaler.transform(X_train_80) X_test = scaler.transform(X_test) if (not time_series): cv = n_folds else: cv = TimeSeriesSplit(n_splits=n_folds) grid_search = GridSearchCV(model, param_grid, scoring=scoring, cv=cv, return_train_score=True) grid_search.fit(X_train_80, y_train_80) params = grid_search.cv_results_['params'] train_scores = grid_search.cv_results_['mean_train_score'] val_scores = grid_search.cv_results_['mean_test_score'] best_index = grid_search.best_index_ best_model = grid_search.best_estimator_ if regr: train_scores = (train_scores * (- 1)) val_scores = (val_scores * (- 1)) train_val_scores = np.concatenate((train_scores.reshape((- 1), 1), val_scores.reshape((- 1), 1)), axis=1) best_model.fit(X_train_80, y_train_80) test_score = 0 if regr: test_score = mean_squared_error(y_true=y_test, y_pred=best_model.predict(X_test)) else: test_score = accuracy_score(y_true=y_test, y_pred=best_model.predict(X_test)) return (params, train_val_scores, best_index, test_score)
def models_validation(X, y, model_paramGrid_list, scale_list=None, test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None, xlabel='Models', title='Models validation', figsize=(6, 6)): "\n Select the best model on the given dataset.\n\n The parameter `model_paramGrid_list` is the list of the models to test. It also contains, for each model, the grid of\n hyperparameters that have to be tested on that model (i.e. the grid which contains the values to test for each\n specified hyperparameter of the model).\n (That grid has the same structure as the `param_grid` parameter of the function `hyperparameters_validation`. See\n `hyperparameters_validation`).\n\n For each specified model, the best combination of hyperparameters values is selected in an exhaustive way (i.e. grid\n search).\n Actually, the function `hyperparameters_validation` is used.\n (See `hyperparameters_validation`).\n\n The selection of the best model is made using the validation score (i.e. the best model is the one with the best\n validation score).\n The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross\n validation on the training set.\n Additionally, the training and test scores are also computed.\n\n Optionally, the validation scores of the different models can be plotted, making a graphical visualization of the\n selection.\n\n Parameters\n ----------\n X: np.array\n Two-dimensional np.array, containing the explanatory features of the dataset.\n y: np.array\n Mono dimensional np.array, containing the response feature of the dataset.\n model_paramGrid_list: list\n List that specifies the models and the relative grids of hyperparameters to be tested.\n It's a list of triples (i.e. tuples), where each triple represents a model:\n - the first element is a string, which is a mnemonic name of that model;\n - the second element is the sklearn model;\n - the third element is the grid of hyperparameters to test for that model. It's a dictionary, with the same\n structure of the parameter `param_grid` of the function `hyperparameters_validation`.\n scale_list: list or bool\n List of booleans, which has as many elements as the models to test (i.e. as the elements of the\n `model_paramGrid_list` list).\n This list indicates, for each different model, if the features in `X` have to be scaled or not.\n `scale_list` can be None or False: in this case the `X` features aren't scaled for any model. `scale_list` can be\n True: in this case the `X` features are scaled for all the models.\n test_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set.\n time_series: bool\n Indicates if the given dataset is a time series dataset (i.e. dataset indexed by days).\n (This affects the computing of the validation score).\n random_state: int\n Used in the training-test splitting of the dataset.\n n_folds: int\n Indicates how many folds are made in order to compute the k-fold cross validation.\n (It's used only if `time_series` is False).\n regr: bool\n Indicates if it's either a regression or a classification problem.\n plot: bool\n Indicates whether to plot or not the validation score values.\n plot_train: bool\n Indicates whether to plot also the training scores.\n (It's considered only if `plot` is True).\n xvalues: list (in general, iterable)\n Values that have to be put in the x axis of the plot.\n xlabel: str\n Label of the x axis of the plot.\n title: str\n Title of the plot.\n figsize: tuple\n Two dimensions of the plot.\n\n Returns\n ----------\n models_train_val_score: np.array\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\n scores.\n It has as many rows as the number of models to test (i.e. number of elements in the `model_paramGrid_list` list).\n models_best_params: list\n List which indicates, for each model, the best combination of the hyperparameters values for that model.\n It has as many elements as the models to test (i.e. as the elements of the `model_paramGrid_list` list), and it\n contains dictionaries: each dictionary represents the best combination of the hyperparameters values for the\n associated model.\n best_index: int\n Index of `model_paramGrid_list` that indicates which is the best model.\n test_score: float\n Test score associated with the best model.\n ax: matplotlib.axes.Axes\n The matplotlib Axes where the plot has been made.\n If `plot` is False, then it is None.\n\n See also\n ----------\n hyperparameters_validation:\n select the best combination of values for the specified hyperparameters of the specified model on the given dataset.\n\n Notes\n ----------\n - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\n model is the one associated with the minimum validation score.\n Otherwise, the validation scores are accuracies: this means that the best model is the one associated with the\n maximum validation score.\n - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\n " if (not scale_list): scale_list = ([False] * len(model_paramGrid_list)) elif (scale_list is True): scale_list = ([True] * len(model_paramGrid_list)) models_train_val_score = [] models_best_params = [] models_test_score = [] for (i, triple) in enumerate(model_paramGrid_list): (model, param_grid) = triple[1:] (params, train_val_scores, best_index, test_score) = hyperparameters_validation(X, y, model, param_grid, scale=scale_list[i], test_size=test_size, time_series=time_series, random_state=random_state, n_folds=n_folds, regr=regr) models_train_val_score.append(tuple(train_val_scores[best_index])) models_best_params.append(params[best_index]) models_test_score.append(test_score) models_train_val_score = np.array(models_train_val_score) if regr: best_index = np.argmin(models_train_val_score, axis=0)[1] else: best_index = np.argmax(models_train_val_score, axis=0)[1] test_score = models_test_score[best_index] ax = None if plot: if (not xvalues): xvalues = [model_paramGrid_list[i][0] for i in range(len(model_paramGrid_list))] ax = _plot_TrainVal_values(xvalues, models_train_val_score, plot_train, xlabel, title, figsize, bar=True) return (models_train_val_score, models_best_params, best_index, test_score, ax)
-7,523,235,934,046,416,000
Select the best model on the given dataset. The parameter `model_paramGrid_list` is the list of the models to test. It also contains, for each model, the grid of hyperparameters that have to be tested on that model (i.e. the grid which contains the values to test for each specified hyperparameter of the model). (That grid has the same structure as the `param_grid` parameter of the function `hyperparameters_validation`. See `hyperparameters_validation`). For each specified model, the best combination of hyperparameters values is selected in an exhaustive way (i.e. grid search). Actually, the function `hyperparameters_validation` is used. (See `hyperparameters_validation`). The selection of the best model is made using the validation score (i.e. the best model is the one with the best validation score). The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross validation on the training set. Additionally, the training and test scores are also computed. Optionally, the validation scores of the different models can be plotted, making a graphical visualization of the selection. Parameters ---------- X: np.array Two-dimensional np.array, containing the explanatory features of the dataset. y: np.array Mono dimensional np.array, containing the response feature of the dataset. model_paramGrid_list: list List that specifies the models and the relative grids of hyperparameters to be tested. It's a list of triples (i.e. tuples), where each triple represents a model: - the first element is a string, which is a mnemonic name of that model; - the second element is the sklearn model; - the third element is the grid of hyperparameters to test for that model. It's a dictionary, with the same structure of the parameter `param_grid` of the function `hyperparameters_validation`. scale_list: list or bool List of booleans, which has as many elements as the models to test (i.e. as the elements of the `model_paramGrid_list` list). This list indicates, for each different model, if the features in `X` have to be scaled or not. `scale_list` can be None or False: in this case the `X` features aren't scaled for any model. `scale_list` can be True: in this case the `X` features are scaled for all the models. test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set. time_series: bool Indicates if the given dataset is a time series dataset (i.e. dataset indexed by days). (This affects the computing of the validation score). random_state: int Used in the training-test splitting of the dataset. n_folds: int Indicates how many folds are made in order to compute the k-fold cross validation. (It's used only if `time_series` is False). regr: bool Indicates if it's either a regression or a classification problem. plot: bool Indicates whether to plot or not the validation score values. plot_train: bool Indicates whether to plot also the training scores. (It's considered only if `plot` is True). xvalues: list (in general, iterable) Values that have to be put in the x axis of the plot. xlabel: str Label of the x axis of the plot. title: str Title of the plot. figsize: tuple Two dimensions of the plot. Returns ---------- models_train_val_score: np.array Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation scores. It has as many rows as the number of models to test (i.e. number of elements in the `model_paramGrid_list` list). models_best_params: list List which indicates, for each model, the best combination of the hyperparameters values for that model. It has as many elements as the models to test (i.e. as the elements of the `model_paramGrid_list` list), and it contains dictionaries: each dictionary represents the best combination of the hyperparameters values for the associated model. best_index: int Index of `model_paramGrid_list` that indicates which is the best model. test_score: float Test score associated with the best model. ax: matplotlib.axes.Axes The matplotlib Axes where the plot has been made. If `plot` is False, then it is None. See also ---------- hyperparameters_validation: select the best combination of values for the specified hyperparameters of the specified model on the given dataset. Notes ---------- - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best model is the one associated with the minimum validation score. Otherwise, the validation scores are accuracies: this means that the best model is the one associated with the maximum validation score. - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`. Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.
model_selection.py
models_validation
EnricoPittini/model-selection
python
def models_validation(X, y, model_paramGrid_list, scale_list=None, test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None, xlabel='Models', title='Models validation', figsize=(6, 6)): "\n Select the best model on the given dataset.\n\n The parameter `model_paramGrid_list` is the list of the models to test. It also contains, for each model, the grid of\n hyperparameters that have to be tested on that model (i.e. the grid which contains the values to test for each\n specified hyperparameter of the model).\n (That grid has the same structure as the `param_grid` parameter of the function `hyperparameters_validation`. See\n `hyperparameters_validation`).\n\n For each specified model, the best combination of hyperparameters values is selected in an exhaustive way (i.e. grid\n search).\n Actually, the function `hyperparameters_validation` is used.\n (See `hyperparameters_validation`).\n\n The selection of the best model is made using the validation score (i.e. the best model is the one with the best\n validation score).\n The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross\n validation on the training set.\n Additionally, the training and test scores are also computed.\n\n Optionally, the validation scores of the different models can be plotted, making a graphical visualization of the\n selection.\n\n Parameters\n ----------\n X: np.array\n Two-dimensional np.array, containing the explanatory features of the dataset.\n y: np.array\n Mono dimensional np.array, containing the response feature of the dataset.\n model_paramGrid_list: list\n List that specifies the models and the relative grids of hyperparameters to be tested.\n It's a list of triples (i.e. tuples), where each triple represents a model:\n - the first element is a string, which is a mnemonic name of that model;\n - the second element is the sklearn model;\n - the third element is the grid of hyperparameters to test for that model. It's a dictionary, with the same\n structure of the parameter `param_grid` of the function `hyperparameters_validation`.\n scale_list: list or bool\n List of booleans, which has as many elements as the models to test (i.e. as the elements of the\n `model_paramGrid_list` list).\n This list indicates, for each different model, if the features in `X` have to be scaled or not.\n `scale_list` can be None or False: in this case the `X` features aren't scaled for any model. `scale_list` can be\n True: in this case the `X` features are scaled for all the models.\n test_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set.\n time_series: bool\n Indicates if the given dataset is a time series dataset (i.e. dataset indexed by days).\n (This affects the computing of the validation score).\n random_state: int\n Used in the training-test splitting of the dataset.\n n_folds: int\n Indicates how many folds are made in order to compute the k-fold cross validation.\n (It's used only if `time_series` is False).\n regr: bool\n Indicates if it's either a regression or a classification problem.\n plot: bool\n Indicates whether to plot or not the validation score values.\n plot_train: bool\n Indicates whether to plot also the training scores.\n (It's considered only if `plot` is True).\n xvalues: list (in general, iterable)\n Values that have to be put in the x axis of the plot.\n xlabel: str\n Label of the x axis of the plot.\n title: str\n Title of the plot.\n figsize: tuple\n Two dimensions of the plot.\n\n Returns\n ----------\n models_train_val_score: np.array\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\n scores.\n It has as many rows as the number of models to test (i.e. number of elements in the `model_paramGrid_list` list).\n models_best_params: list\n List which indicates, for each model, the best combination of the hyperparameters values for that model.\n It has as many elements as the models to test (i.e. as the elements of the `model_paramGrid_list` list), and it\n contains dictionaries: each dictionary represents the best combination of the hyperparameters values for the\n associated model.\n best_index: int\n Index of `model_paramGrid_list` that indicates which is the best model.\n test_score: float\n Test score associated with the best model.\n ax: matplotlib.axes.Axes\n The matplotlib Axes where the plot has been made.\n If `plot` is False, then it is None.\n\n See also\n ----------\n hyperparameters_validation:\n select the best combination of values for the specified hyperparameters of the specified model on the given dataset.\n\n Notes\n ----------\n - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\n model is the one associated with the minimum validation score.\n Otherwise, the validation scores are accuracies: this means that the best model is the one associated with the\n maximum validation score.\n - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\n " if (not scale_list): scale_list = ([False] * len(model_paramGrid_list)) elif (scale_list is True): scale_list = ([True] * len(model_paramGrid_list)) models_train_val_score = [] models_best_params = [] models_test_score = [] for (i, triple) in enumerate(model_paramGrid_list): (model, param_grid) = triple[1:] (params, train_val_scores, best_index, test_score) = hyperparameters_validation(X, y, model, param_grid, scale=scale_list[i], test_size=test_size, time_series=time_series, random_state=random_state, n_folds=n_folds, regr=regr) models_train_val_score.append(tuple(train_val_scores[best_index])) models_best_params.append(params[best_index]) models_test_score.append(test_score) models_train_val_score = np.array(models_train_val_score) if regr: best_index = np.argmin(models_train_val_score, axis=0)[1] else: best_index = np.argmax(models_train_val_score, axis=0)[1] test_score = models_test_score[best_index] ax = None if plot: if (not xvalues): xvalues = [model_paramGrid_list[i][0] for i in range(len(model_paramGrid_list))] ax = _plot_TrainVal_values(xvalues, models_train_val_score, plot_train, xlabel, title, figsize, bar=True) return (models_train_val_score, models_best_params, best_index, test_score, ax)
def datasets_hyperparameter_validation(dataset_list, model, hyperparameter, hyperparameter_values, scale=False, test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None, xlabel='Datasets', title='Datasets validation', figsize=(6, 6), verbose=False, figsize_verbose=(6, 6)): "\n Select the best dataset and the best value for the specified hyperparameter of the specified model (i.e. select the best\n couple dataset-hyperparameter value).\n\n For each dataset in `dataset_list`, all the specified values `hyperparameter_values` are tested for the specified\n `hyperparameter` of `model`.\n In other words, on each dataset the tuning of `hyperparameter` is performed: in fact, on each dataset, the function\n `hyperparameter_validation` is applied. (See `hyperparameter_validation`).\n In the end, the best couple dataset-hyperparameter value is selected.\n\n Despite the fact that a couple dataset-hyperparameter value is selected, the main viewpoint is focused with respect to\n the datasets. It's a validation focused on the datasets.\n In fact, first of all, for each dataset the hyperparameter tuning is performed: in this way the best value is selected\n and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other words, on each\n dataset the function `hyperparameter_validation` is applied). Finally, after that, the best dataset is selected.\n It's a two-levels selection.\n\n This selection is made using the validation score (i.e. the best couple dataset-hyperparameter value is the one with the\n best validation score).\n The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross\n validation on the training set.\n Additionally, the training and test scores are also computed.\n\n Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset\n selection. This is the 'main' plot.\n Moreover, still optionally, the 'secondary' plots can be done: for each dataset, the validation scores of the\n `hyperparameter_values` are plotted, making a graphical visualization of the hyperparameter tuning on that dataset.\n (As the plot made by the `hyperparameter_validation` function).\n\n Parameters\n ----------\n dataset_list: list\n List of couples, where each couple is a dataset.\n - The first element is X, the two-dimensional np.array containing the explanatory features of the dataset.\n - The second element is y, the mono dimensional np.array containing the response feature of the dataset.\n model: sklearn.base.BaseEstimator\n Model which has the specified `hyperparameter`.\n hyperparameter: str\n The name of the hyperparameter that has to be validated.\n hyperparameter_values: list\n List of values for `hyperparameter` that have to be taken into account in the selection.\n scale: bool\n Indicates whether to scale or not the features in 'X' (for all the datasets).\n (The scaling is performed using the sklearn MinMaxScaler).\n test_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset).\n time_series: bool\n Indicates if the given datasets are time series dataset (i.e. datasets indexed by days).\n (This affects the computing of the validation scores).\n random_state: int\n Used in the training-test splitting of the datasets.\n n_folds: int\n Indicates how many folds are made in order to compute the k-fold cross validation.\n (It's used only if `time_series` is False).\n regr: bool\n Indicates if it's either a regression or a classification problem.\n plot: bool\n Indicates whether to plot or not the validation score values of the datasets (i.e. this is the 'main' plot).\n plot_train: bool\n Indicates whether to plot also the training scores (both in the 'main' and 'secondary' plots).\n xvalues: list (in general, iterable)\n Values that have to be put in the x axis of the 'main' plot.\n xlabel: str\n Label of the x axis of the 'main' plot.\n title: str\n Title of the 'main' plot.\n figsize: tuple\n Two dimensions of the 'main' plot.\n verbose: bool\n If True, for each dataset are plotted the validation scores of the hyperparameter tuning (these are the 'secondary'\n plots).\n (See 'hyperparameter_validation').\n figsize_verbose: tuple\n Two dimensions of the 'secondary' plots.\n\n Returns\n ----------\n datasets_train_val_score: np.array\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\n scores.\n It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`.\n datasets_best_hyperparameter_value: list\n List which has as many elements as the number of the datasets (i.e. as the number of elements in `dataset_list`). For\n each dataset, it contains the best `hyperparameter` value on that dataset.\n best_index: int\n Index of `dataset_list` that indicates which is the best dataset.\n test_score: float\n Test score associated with the best couple dataset-hyperparameter value.\n axes: list\n List of the matplotlib Axes where the plots have been made.\n Firstly, the 'secondary' plots are put (if any). And, as last, the 'main' plot is put (if any).\n If no plot has been made, `axes` is an empty list.\n\n See also\n ----------\n hyperparameter_validation:\n select the best value for the specified hyperparameter of the specified model on the given dataset.\n\n Notes\n ----------\n - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\n couple dataset-hyperparameter value is the one associated with the minimum validation score.\n Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the\n maximum validation score.\n - If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\n " datasets_train_val_score = [] datasets_best_hyperparameter_value = [] datasets_test_score = [] axes = [] for (i, dataset) in enumerate(dataset_list): (X, y) = dataset (train_val_scores, best_index, test_score, ax) = hyperparameter_validation(X, y, model, hyperparameter, hyperparameter_values, scale=scale, test_size=test_size, time_series=time_series, random_state=random_state, n_folds=n_folds, regr=regr, plot=verbose, plot_train=plot_train, xvalues=hyperparameter_values, xlabel=hyperparameter, title=(('Dataset ' + str(i)) + ' : hyperparameter validation'), figsize=figsize_verbose) datasets_train_val_score.append(tuple(train_val_scores[best_index, :])) datasets_best_hyperparameter_value.append(hyperparameter_values[best_index]) datasets_test_score.append(test_score) if ax: axes.append(ax) datasets_train_val_score = np.array(datasets_train_val_score) if regr: best_index = np.argmin(datasets_train_val_score, axis=0)[1] else: best_index = np.argmax(datasets_train_val_score, axis=0)[1] test_score = datasets_test_score[best_index] if plot: if (not xvalues): xvalues = range(len(dataset_list)) ax = _plot_TrainVal_values(xvalues, datasets_train_val_score, plot_train, xlabel, title, figsize, bar=True) axes.append(ax) return (datasets_train_val_score, datasets_best_hyperparameter_value, best_index, test_score, axes)
-8,298,506,111,670,680,000
Select the best dataset and the best value for the specified hyperparameter of the specified model (i.e. select the best couple dataset-hyperparameter value). For each dataset in `dataset_list`, all the specified values `hyperparameter_values` are tested for the specified `hyperparameter` of `model`. In other words, on each dataset the tuning of `hyperparameter` is performed: in fact, on each dataset, the function `hyperparameter_validation` is applied. (See `hyperparameter_validation`). In the end, the best couple dataset-hyperparameter value is selected. Despite the fact that a couple dataset-hyperparameter value is selected, the main viewpoint is focused with respect to the datasets. It's a validation focused on the datasets. In fact, first of all, for each dataset the hyperparameter tuning is performed: in this way the best value is selected and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other words, on each dataset the function `hyperparameter_validation` is applied). Finally, after that, the best dataset is selected. It's a two-levels selection. This selection is made using the validation score (i.e. the best couple dataset-hyperparameter value is the one with the best validation score). The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross validation on the training set. Additionally, the training and test scores are also computed. Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset selection. This is the 'main' plot. Moreover, still optionally, the 'secondary' plots can be done: for each dataset, the validation scores of the `hyperparameter_values` are plotted, making a graphical visualization of the hyperparameter tuning on that dataset. (As the plot made by the `hyperparameter_validation` function). Parameters ---------- dataset_list: list List of couples, where each couple is a dataset. - The first element is X, the two-dimensional np.array containing the explanatory features of the dataset. - The second element is y, the mono dimensional np.array containing the response feature of the dataset. model: sklearn.base.BaseEstimator Model which has the specified `hyperparameter`. hyperparameter: str The name of the hyperparameter that has to be validated. hyperparameter_values: list List of values for `hyperparameter` that have to be taken into account in the selection. scale: bool Indicates whether to scale or not the features in 'X' (for all the datasets). (The scaling is performed using the sklearn MinMaxScaler). test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset). time_series: bool Indicates if the given datasets are time series dataset (i.e. datasets indexed by days). (This affects the computing of the validation scores). random_state: int Used in the training-test splitting of the datasets. n_folds: int Indicates how many folds are made in order to compute the k-fold cross validation. (It's used only if `time_series` is False). regr: bool Indicates if it's either a regression or a classification problem. plot: bool Indicates whether to plot or not the validation score values of the datasets (i.e. this is the 'main' plot). plot_train: bool Indicates whether to plot also the training scores (both in the 'main' and 'secondary' plots). xvalues: list (in general, iterable) Values that have to be put in the x axis of the 'main' plot. xlabel: str Label of the x axis of the 'main' plot. title: str Title of the 'main' plot. figsize: tuple Two dimensions of the 'main' plot. verbose: bool If True, for each dataset are plotted the validation scores of the hyperparameter tuning (these are the 'secondary' plots). (See 'hyperparameter_validation'). figsize_verbose: tuple Two dimensions of the 'secondary' plots. Returns ---------- datasets_train_val_score: np.array Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation scores. It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`. datasets_best_hyperparameter_value: list List which has as many elements as the number of the datasets (i.e. as the number of elements in `dataset_list`). For each dataset, it contains the best `hyperparameter` value on that dataset. best_index: int Index of `dataset_list` that indicates which is the best dataset. test_score: float Test score associated with the best couple dataset-hyperparameter value. axes: list List of the matplotlib Axes where the plots have been made. Firstly, the 'secondary' plots are put (if any). And, as last, the 'main' plot is put (if any). If no plot has been made, `axes` is an empty list. See also ---------- hyperparameter_validation: select the best value for the specified hyperparameter of the specified model on the given dataset. Notes ---------- - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best couple dataset-hyperparameter value is the one associated with the minimum validation score. Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the maximum validation score. - If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`. Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.
model_selection.py
datasets_hyperparameter_validation
EnricoPittini/model-selection
python
def datasets_hyperparameter_validation(dataset_list, model, hyperparameter, hyperparameter_values, scale=False, test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None, xlabel='Datasets', title='Datasets validation', figsize=(6, 6), verbose=False, figsize_verbose=(6, 6)): "\n Select the best dataset and the best value for the specified hyperparameter of the specified model (i.e. select the best\n couple dataset-hyperparameter value).\n\n For each dataset in `dataset_list`, all the specified values `hyperparameter_values` are tested for the specified\n `hyperparameter` of `model`.\n In other words, on each dataset the tuning of `hyperparameter` is performed: in fact, on each dataset, the function\n `hyperparameter_validation` is applied. (See `hyperparameter_validation`).\n In the end, the best couple dataset-hyperparameter value is selected.\n\n Despite the fact that a couple dataset-hyperparameter value is selected, the main viewpoint is focused with respect to\n the datasets. It's a validation focused on the datasets.\n In fact, first of all, for each dataset the hyperparameter tuning is performed: in this way the best value is selected\n and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other words, on each\n dataset the function `hyperparameter_validation` is applied). Finally, after that, the best dataset is selected.\n It's a two-levels selection.\n\n This selection is made using the validation score (i.e. the best couple dataset-hyperparameter value is the one with the\n best validation score).\n The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross\n validation on the training set.\n Additionally, the training and test scores are also computed.\n\n Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset\n selection. This is the 'main' plot.\n Moreover, still optionally, the 'secondary' plots can be done: for each dataset, the validation scores of the\n `hyperparameter_values` are plotted, making a graphical visualization of the hyperparameter tuning on that dataset.\n (As the plot made by the `hyperparameter_validation` function).\n\n Parameters\n ----------\n dataset_list: list\n List of couples, where each couple is a dataset.\n - The first element is X, the two-dimensional np.array containing the explanatory features of the dataset.\n - The second element is y, the mono dimensional np.array containing the response feature of the dataset.\n model: sklearn.base.BaseEstimator\n Model which has the specified `hyperparameter`.\n hyperparameter: str\n The name of the hyperparameter that has to be validated.\n hyperparameter_values: list\n List of values for `hyperparameter` that have to be taken into account in the selection.\n scale: bool\n Indicates whether to scale or not the features in 'X' (for all the datasets).\n (The scaling is performed using the sklearn MinMaxScaler).\n test_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset).\n time_series: bool\n Indicates if the given datasets are time series dataset (i.e. datasets indexed by days).\n (This affects the computing of the validation scores).\n random_state: int\n Used in the training-test splitting of the datasets.\n n_folds: int\n Indicates how many folds are made in order to compute the k-fold cross validation.\n (It's used only if `time_series` is False).\n regr: bool\n Indicates if it's either a regression or a classification problem.\n plot: bool\n Indicates whether to plot or not the validation score values of the datasets (i.e. this is the 'main' plot).\n plot_train: bool\n Indicates whether to plot also the training scores (both in the 'main' and 'secondary' plots).\n xvalues: list (in general, iterable)\n Values that have to be put in the x axis of the 'main' plot.\n xlabel: str\n Label of the x axis of the 'main' plot.\n title: str\n Title of the 'main' plot.\n figsize: tuple\n Two dimensions of the 'main' plot.\n verbose: bool\n If True, for each dataset are plotted the validation scores of the hyperparameter tuning (these are the 'secondary'\n plots).\n (See 'hyperparameter_validation').\n figsize_verbose: tuple\n Two dimensions of the 'secondary' plots.\n\n Returns\n ----------\n datasets_train_val_score: np.array\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\n scores.\n It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`.\n datasets_best_hyperparameter_value: list\n List which has as many elements as the number of the datasets (i.e. as the number of elements in `dataset_list`). For\n each dataset, it contains the best `hyperparameter` value on that dataset.\n best_index: int\n Index of `dataset_list` that indicates which is the best dataset.\n test_score: float\n Test score associated with the best couple dataset-hyperparameter value.\n axes: list\n List of the matplotlib Axes where the plots have been made.\n Firstly, the 'secondary' plots are put (if any). And, as last, the 'main' plot is put (if any).\n If no plot has been made, `axes` is an empty list.\n\n See also\n ----------\n hyperparameter_validation:\n select the best value for the specified hyperparameter of the specified model on the given dataset.\n\n Notes\n ----------\n - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\n couple dataset-hyperparameter value is the one associated with the minimum validation score.\n Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the\n maximum validation score.\n - If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\n " datasets_train_val_score = [] datasets_best_hyperparameter_value = [] datasets_test_score = [] axes = [] for (i, dataset) in enumerate(dataset_list): (X, y) = dataset (train_val_scores, best_index, test_score, ax) = hyperparameter_validation(X, y, model, hyperparameter, hyperparameter_values, scale=scale, test_size=test_size, time_series=time_series, random_state=random_state, n_folds=n_folds, regr=regr, plot=verbose, plot_train=plot_train, xvalues=hyperparameter_values, xlabel=hyperparameter, title=(('Dataset ' + str(i)) + ' : hyperparameter validation'), figsize=figsize_verbose) datasets_train_val_score.append(tuple(train_val_scores[best_index, :])) datasets_best_hyperparameter_value.append(hyperparameter_values[best_index]) datasets_test_score.append(test_score) if ax: axes.append(ax) datasets_train_val_score = np.array(datasets_train_val_score) if regr: best_index = np.argmin(datasets_train_val_score, axis=0)[1] else: best_index = np.argmax(datasets_train_val_score, axis=0)[1] test_score = datasets_test_score[best_index] if plot: if (not xvalues): xvalues = range(len(dataset_list)) ax = _plot_TrainVal_values(xvalues, datasets_train_val_score, plot_train, xlabel, title, figsize, bar=True) axes.append(ax) return (datasets_train_val_score, datasets_best_hyperparameter_value, best_index, test_score, axes)
def datasets_hyperparameters_validation(dataset_list, model, param_grid, scale=False, test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None, xlabel='Datasets', title='Datasets validation', figsize=(6, 6)): "\n Select the best dataset and the best combination of values for the specified hyperparameters of the specified model (i.e.\n select the best couple dataset-combination of hyperparameters values).\n\n For each dataset in `dataset_list`, all the possible combinations of the hyperparameters values for `model` (specified\n with `param_grid`) are tested.\n In other words, on each dataset the tuning of the specified hyperparameters is performed in an exhaustive way: in fact,\n on each dataset, the function `hyperparameters_validation` is applied. (See `hyperparameters_validation`).\n In the end, the best couple dataset-combination of hyperparameters values is selected.\n\n Despite the fact that a couple dataset-combination of hyperparameters values is selected, the main viewpoint is focused\n with respect to the datasets. It's a validation focused on the datasets.\n In fact, first of all, for each dataset the hyperparameters tuning is performed: in this way the best combination of\n values is selected and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other\n words, on each dataset the function `hyperparameters_validation` is applied). Finally, after that, the best dataset is\n selected. It's a two-levels selection.\n\n This selection is made using the validation score (i.e. the best couple dataset-combination of hyperparameters values, is\n the one with best validation score).\n The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross\n validation on the training set.\n Additionally, the training and test scores are also computed.\n\n Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset\n selection.\n\n Parameters\n ----------\n dataset_list: list\n List of couple, where each couple is a dataset.\n - The first element is X, the two-dimensional np.array containing the explanatory features of the dataset.\n - The second element is y, the mono dimensional np.array containing the response feature of the dataset.\n model: sklearn.base.BaseEstimator\n Model which has the specified hyperparameters.\n param_grid: dict\n Dictionary which has as keys the names of the specified hyperparameters and as values the associated list of\n values to test.\n scale: bool\n Indicates whether to scale or not the features in 'X' (for all the datasets).\n (The scaling is performed using the sklearn MinMaxScaler).\n test_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset).\n time_series: bool\n Indicates if the given datasets are time series datasets (i.e. datasets indexed by days).\n (This affects the computing of the validation score).\n random_state: int\n Used in the training-test splitting of the datasets.\n n_folds: int\n Indicates how many folds are made in order to compute the k-fold cross validation.\n (It's used only if `time_series` is False).\n regr: bool\n Indicates if it's either a regression or a classification problem.\n plot: bool\n Indicates whether to plot or not the validation score values of the datasets.\n plot_train: bool\n Indicates whether to plot also the training scores.\n (It's considered only if `plot` is True).\n xvalues: list (in general, iterable)\n Values that have to be put in the x axis of the plot.\n xlabel: str\n Label of the x axis of the plot.\n title: str\n Title of the plot.\n figsize: tuple\n Two dimensions of the plot.\n\n Returns\n ----------\n datasets_train_val_score: np.array\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\n scores.\n It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`.\n datasets_best_params: list\n List which has as many elements as the number of the datasets (i.e. as the number of elements in `dataset_list`). For\n each dataset, it contains the best combination of hyperparameters values on that dataset.\n Each combination is represented as a dictionary, with keys the hyperparameters names and values the associated\n values.\n best_index: int\n Index of `dataset_list` that indicates which is the best dataset.\n test_score: float\n Test score associated with the best couple dataset-combination of hyperparameters values.\n ax: matplotlib.axes.Axes\n The matplotlib Axes where the plot has been made.\n\n See also\n ----------\n hyperparameters_validation:\n select the best combination of values for the specified hyperparameters of the specified model on the given dataset.\n\n Notes\n ----------\n - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\n couple dataset-combination of hyperparameters values is the one associated with the minimum validation score.\n Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the\n maximum validation score.\n - If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\n " datasets_train_val_score = [] datasets_best_params = [] datasets_test_score = [] for (X, y) in dataset_list: (params, train_val_scores, best_index, test_score) = hyperparameters_validation(X, y, model, param_grid, scale=scale, test_size=test_size, time_series=time_series, random_state=random_state, n_folds=n_folds, regr=regr) datasets_train_val_score.append(tuple(train_val_scores[best_index, :])) datasets_best_params.append(params[best_index]) datasets_test_score.append(test_score) datasets_train_val_score = np.array(datasets_train_val_score) if regr: best_index = np.argmin(datasets_train_val_score, axis=0)[1] else: best_index = np.argmax(datasets_train_val_score, axis=0)[1] test_score = datasets_test_score[best_index] ax = None if plot: if (not xvalues): xvalues = range(len(dataset_list)) ax = _plot_TrainVal_values(xvalues, datasets_train_val_score, plot_train, xlabel, title, figsize, bar=True) return (datasets_train_val_score, datasets_best_params, best_index, test_score, ax)
-182,712,746,719,899,140
Select the best dataset and the best combination of values for the specified hyperparameters of the specified model (i.e. select the best couple dataset-combination of hyperparameters values). For each dataset in `dataset_list`, all the possible combinations of the hyperparameters values for `model` (specified with `param_grid`) are tested. In other words, on each dataset the tuning of the specified hyperparameters is performed in an exhaustive way: in fact, on each dataset, the function `hyperparameters_validation` is applied. (See `hyperparameters_validation`). In the end, the best couple dataset-combination of hyperparameters values is selected. Despite the fact that a couple dataset-combination of hyperparameters values is selected, the main viewpoint is focused with respect to the datasets. It's a validation focused on the datasets. In fact, first of all, for each dataset the hyperparameters tuning is performed: in this way the best combination of values is selected and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other words, on each dataset the function `hyperparameters_validation` is applied). Finally, after that, the best dataset is selected. It's a two-levels selection. This selection is made using the validation score (i.e. the best couple dataset-combination of hyperparameters values, is the one with best validation score). The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross validation on the training set. Additionally, the training and test scores are also computed. Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset selection. Parameters ---------- dataset_list: list List of couple, where each couple is a dataset. - The first element is X, the two-dimensional np.array containing the explanatory features of the dataset. - The second element is y, the mono dimensional np.array containing the response feature of the dataset. model: sklearn.base.BaseEstimator Model which has the specified hyperparameters. param_grid: dict Dictionary which has as keys the names of the specified hyperparameters and as values the associated list of values to test. scale: bool Indicates whether to scale or not the features in 'X' (for all the datasets). (The scaling is performed using the sklearn MinMaxScaler). test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset). time_series: bool Indicates if the given datasets are time series datasets (i.e. datasets indexed by days). (This affects the computing of the validation score). random_state: int Used in the training-test splitting of the datasets. n_folds: int Indicates how many folds are made in order to compute the k-fold cross validation. (It's used only if `time_series` is False). regr: bool Indicates if it's either a regression or a classification problem. plot: bool Indicates whether to plot or not the validation score values of the datasets. plot_train: bool Indicates whether to plot also the training scores. (It's considered only if `plot` is True). xvalues: list (in general, iterable) Values that have to be put in the x axis of the plot. xlabel: str Label of the x axis of the plot. title: str Title of the plot. figsize: tuple Two dimensions of the plot. Returns ---------- datasets_train_val_score: np.array Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation scores. It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`. datasets_best_params: list List which has as many elements as the number of the datasets (i.e. as the number of elements in `dataset_list`). For each dataset, it contains the best combination of hyperparameters values on that dataset. Each combination is represented as a dictionary, with keys the hyperparameters names and values the associated values. best_index: int Index of `dataset_list` that indicates which is the best dataset. test_score: float Test score associated with the best couple dataset-combination of hyperparameters values. ax: matplotlib.axes.Axes The matplotlib Axes where the plot has been made. See also ---------- hyperparameters_validation: select the best combination of values for the specified hyperparameters of the specified model on the given dataset. Notes ---------- - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best couple dataset-combination of hyperparameters values is the one associated with the minimum validation score. Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the maximum validation score. - If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`. Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.
model_selection.py
datasets_hyperparameters_validation
EnricoPittini/model-selection
python
def datasets_hyperparameters_validation(dataset_list, model, param_grid, scale=False, test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None, xlabel='Datasets', title='Datasets validation', figsize=(6, 6)): "\n Select the best dataset and the best combination of values for the specified hyperparameters of the specified model (i.e.\n select the best couple dataset-combination of hyperparameters values).\n\n For each dataset in `dataset_list`, all the possible combinations of the hyperparameters values for `model` (specified\n with `param_grid`) are tested.\n In other words, on each dataset the tuning of the specified hyperparameters is performed in an exhaustive way: in fact,\n on each dataset, the function `hyperparameters_validation` is applied. (See `hyperparameters_validation`).\n In the end, the best couple dataset-combination of hyperparameters values is selected.\n\n Despite the fact that a couple dataset-combination of hyperparameters values is selected, the main viewpoint is focused\n with respect to the datasets. It's a validation focused on the datasets.\n In fact, first of all, for each dataset the hyperparameters tuning is performed: in this way the best combination of\n values is selected and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other\n words, on each dataset the function `hyperparameters_validation` is applied). Finally, after that, the best dataset is\n selected. It's a two-levels selection.\n\n This selection is made using the validation score (i.e. the best couple dataset-combination of hyperparameters values, is\n the one with best validation score).\n The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross\n validation on the training set.\n Additionally, the training and test scores are also computed.\n\n Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset\n selection.\n\n Parameters\n ----------\n dataset_list: list\n List of couple, where each couple is a dataset.\n - The first element is X, the two-dimensional np.array containing the explanatory features of the dataset.\n - The second element is y, the mono dimensional np.array containing the response feature of the dataset.\n model: sklearn.base.BaseEstimator\n Model which has the specified hyperparameters.\n param_grid: dict\n Dictionary which has as keys the names of the specified hyperparameters and as values the associated list of\n values to test.\n scale: bool\n Indicates whether to scale or not the features in 'X' (for all the datasets).\n (The scaling is performed using the sklearn MinMaxScaler).\n test_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset).\n time_series: bool\n Indicates if the given datasets are time series datasets (i.e. datasets indexed by days).\n (This affects the computing of the validation score).\n random_state: int\n Used in the training-test splitting of the datasets.\n n_folds: int\n Indicates how many folds are made in order to compute the k-fold cross validation.\n (It's used only if `time_series` is False).\n regr: bool\n Indicates if it's either a regression or a classification problem.\n plot: bool\n Indicates whether to plot or not the validation score values of the datasets.\n plot_train: bool\n Indicates whether to plot also the training scores.\n (It's considered only if `plot` is True).\n xvalues: list (in general, iterable)\n Values that have to be put in the x axis of the plot.\n xlabel: str\n Label of the x axis of the plot.\n title: str\n Title of the plot.\n figsize: tuple\n Two dimensions of the plot.\n\n Returns\n ----------\n datasets_train_val_score: np.array\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\n scores.\n It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`.\n datasets_best_params: list\n List which has as many elements as the number of the datasets (i.e. as the number of elements in `dataset_list`). For\n each dataset, it contains the best combination of hyperparameters values on that dataset.\n Each combination is represented as a dictionary, with keys the hyperparameters names and values the associated\n values.\n best_index: int\n Index of `dataset_list` that indicates which is the best dataset.\n test_score: float\n Test score associated with the best couple dataset-combination of hyperparameters values.\n ax: matplotlib.axes.Axes\n The matplotlib Axes where the plot has been made.\n\n See also\n ----------\n hyperparameters_validation:\n select the best combination of values for the specified hyperparameters of the specified model on the given dataset.\n\n Notes\n ----------\n - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\n couple dataset-combination of hyperparameters values is the one associated with the minimum validation score.\n Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the\n maximum validation score.\n - If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\n " datasets_train_val_score = [] datasets_best_params = [] datasets_test_score = [] for (X, y) in dataset_list: (params, train_val_scores, best_index, test_score) = hyperparameters_validation(X, y, model, param_grid, scale=scale, test_size=test_size, time_series=time_series, random_state=random_state, n_folds=n_folds, regr=regr) datasets_train_val_score.append(tuple(train_val_scores[best_index, :])) datasets_best_params.append(params[best_index]) datasets_test_score.append(test_score) datasets_train_val_score = np.array(datasets_train_val_score) if regr: best_index = np.argmin(datasets_train_val_score, axis=0)[1] else: best_index = np.argmax(datasets_train_val_score, axis=0)[1] test_score = datasets_test_score[best_index] ax = None if plot: if (not xvalues): xvalues = range(len(dataset_list)) ax = _plot_TrainVal_values(xvalues, datasets_train_val_score, plot_train, xlabel, title, figsize, bar=True) return (datasets_train_val_score, datasets_best_params, best_index, test_score, ax)
def datasets_models_validation(dataset_list, model_paramGrid_list, scale_list=None, test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None, xlabel='Datasets', title='Datasets validation', figsize=(6, 6), verbose=False, figsize_verbose=(6, 6)): "\n Select the best dataset and the best model (i.e. select the best couple dataset-model).\n\n For each dataset in `dataset_list`, all the models in `model_paramGrid_list` are tested: each model is tested performing\n an exhaustive tuning of the specified hyperparameters. In fact, `model_paramGrid_list` also contains, for each model, the\n grid of the hyperparameters that have to be tested on that model (i.e. the grid which contains the values to test for\n each specified hyperparameter of the model).\n In other words, on each dataset the selection of the best model is performed: in fact, on each dataset, the function\n `models_validation` is applied. (See `models_validation`).\n In the end, the best couple dataset-model is selected.\n\n Despite the fact that a couple dataset-model is selected, the main viewpoint is focused with respect to the datasets.\n It's a validation focused on the datasets.\n In fact, first of all, for each dataset the model selection is performed: in this way the best model is selected\n and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other words, on each\n dataset the function `models_validation` is applied). Finally, after that, the best dataset is selected.\n It's a two-levels selection.\n\n This selection is made using the validation score (i.e. the best couple dataset-model is the one with best validation\n score).\n The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross\n validation on the training set.\n Additionally, the training and test scores are also computed.\n\n Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset\n selection. This is the 'main' plot.\n Moreover, still optionally, the 'secondary' plots can be done: for each dataset, the validation scores of the models are\n plotted, making a graphical visualization of the models selection on that dataset. (As the plot made by the\n `models_validation` function).\n\n Parameters\n ----------\n dataset_list: list\n List of couples, where each couple is a dataset.\n - The first element is X, the two-dimensional np.array containing the explanatory features of the dataset.\n - The second element is y, the mono dimensional np.array containing the response feature of the dataset.\n model_paramGrid_list: list\n List that specifies the models and the relative grid of hyperparameters to be tested.\n It's a list of triples (i.e. tuples), where each triple represents a model:\n - the first element is a string, which is a mnemonic name of that model;\n - the second element is the sklearn model;\n - the third element is the grid of hyperparameters to test for that model. It's a dictionary, with the same\n structure of parameter `param_grid` of the function `hyperparameters_validation`.\n scale_list: list or bool\n List of booleans, which has as many elements as the number of models to test (i.e. number of elements in the\n `model_paramGrid_list` list).\n This list indicates, for each different model, if the features in 'X' have to be scaled or not (for all the datasets).\n `scale_list` can be None or False: in this case the 'X' features aren't scaled for any model. `scale_list` can be\n True: in this case the 'X' features are scaled for all the models.\n test_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset).\n time_series: bool\n Indicates if the given datasets are time series dataset (i.e. datasets indexed by days).\n (This affects the computing of the validation score).\n random_state: int\n Used in the training-test splitting of the datasets.\n n_folds: int\n Indicates how many folds are made in order to compute the k-fold cross validation.\n (It's used only if `time_series` is False).\n regr: bool\n Indicates if it's either a regression or a classification problem.\n plot: bool\n Indicates whether to plot or not the validation score values of the datasets (i.e. this is the 'main' plot).\n plot_train: bool\n Indicates whether to plot also the training scores (both in the 'main' and 'secondary' plots).\n xvalues: list (in general, iterable)\n Values that have to be put in the x axis of the 'main' plot.\n xlabel: str\n Label of the x axis of the 'main' plot.\n title: str\n Title of the 'main' plot.\n figsize: tuple\n Two dimensions of the 'main' plot.\n verbose: bool\n If True, for each dataset the validation scores of the models are plotted (i.e. these are the 'secondary' plots).\n (See 'models_validation').\n figsize_verbose: tuple\n Two dimensions of the 'secondary' plots.\n\n Returns\n ----------\n datasets_train_val_score: np.array\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\n scores.\n It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`.\n datasets_best_model: list\n List which has as many elements as the number of the datasets (i.e. number of elements in `dataset_list`). For\n each dataset, it contains the best model for that dataset.\n More precisely, it is a list of triple:\n - the first element is the index of `model_paramGrid_list` which indicates the best model;\n - the second element is the mnemonic name of the best model;\n - the third element is the best combination of hyperparameters values on that best model (i.e. it's a dictionary\n which has as keys the hyperparameters names and as values their associated values).\n best_index: int\n Index of `dataset_list` that indicates which is the best dataset.\n test_score: float\n Test score associated with the best couple dataset-model.\n axes: list\n List of the matplotlib Axes where the plots have been made.\n Firstly, the 'secondary' plots are put (if any). And, as last, the 'main' plot is put (if any).\n If no plot has been made, `axes` is an empty list.\n\n See also\n ----------\n models_validation: select the best model on the given dataset.\n\n Notes\n ----------\n - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\n couple dataset-model is the one associated with the minimum validation score.\n Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the\n maximum validation score.\n - If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\n " datasets_train_val_score = [] datasets_best_model = [] datasets_test_score = [] axes = [] for (i, dataset) in enumerate(dataset_list): (X, y) = dataset (models_train_val_score, models_best_params, best_index, test_score, ax) = models_validation(X, y, model_paramGrid_list, scale_list=scale_list, test_size=test_size, time_series=time_series, random_state=random_state, n_folds=n_folds, regr=regr, plot=verbose, plot_train=plot_train, xlabel='Models', title=(('Dataset ' + str(i)) + ' : models validation'), figsize=figsize_verbose) datasets_train_val_score.append(tuple(models_train_val_score[best_index, :])) datasets_best_model.append((best_index, model_paramGrid_list[best_index][0], models_best_params[best_index])) datasets_test_score.append(test_score) if ax: axes.append(ax) datasets_train_val_score = np.array(datasets_train_val_score) if regr: best_index = np.argmin(datasets_train_val_score, axis=0)[1] else: best_index = np.argmax(datasets_train_val_score, axis=0)[1] test_score = datasets_test_score[best_index] if plot: if (not xvalues): xvalues = range(len(dataset_list)) ax = _plot_TrainVal_values(xvalues, datasets_train_val_score, plot_train, xlabel, title, figsize, bar=True) axes.append(ax) return (datasets_train_val_score, datasets_best_model, best_index, test_score, axes)
2,050,898,115,793,827,300
Select the best dataset and the best model (i.e. select the best couple dataset-model). For each dataset in `dataset_list`, all the models in `model_paramGrid_list` are tested: each model is tested performing an exhaustive tuning of the specified hyperparameters. In fact, `model_paramGrid_list` also contains, for each model, the grid of the hyperparameters that have to be tested on that model (i.e. the grid which contains the values to test for each specified hyperparameter of the model). In other words, on each dataset the selection of the best model is performed: in fact, on each dataset, the function `models_validation` is applied. (See `models_validation`). In the end, the best couple dataset-model is selected. Despite the fact that a couple dataset-model is selected, the main viewpoint is focused with respect to the datasets. It's a validation focused on the datasets. In fact, first of all, for each dataset the model selection is performed: in this way the best model is selected and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other words, on each dataset the function `models_validation` is applied). Finally, after that, the best dataset is selected. It's a two-levels selection. This selection is made using the validation score (i.e. the best couple dataset-model is the one with best validation score). The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross validation on the training set. Additionally, the training and test scores are also computed. Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset selection. This is the 'main' plot. Moreover, still optionally, the 'secondary' plots can be done: for each dataset, the validation scores of the models are plotted, making a graphical visualization of the models selection on that dataset. (As the plot made by the `models_validation` function). Parameters ---------- dataset_list: list List of couples, where each couple is a dataset. - The first element is X, the two-dimensional np.array containing the explanatory features of the dataset. - The second element is y, the mono dimensional np.array containing the response feature of the dataset. model_paramGrid_list: list List that specifies the models and the relative grid of hyperparameters to be tested. It's a list of triples (i.e. tuples), where each triple represents a model: - the first element is a string, which is a mnemonic name of that model; - the second element is the sklearn model; - the third element is the grid of hyperparameters to test for that model. It's a dictionary, with the same structure of parameter `param_grid` of the function `hyperparameters_validation`. scale_list: list or bool List of booleans, which has as many elements as the number of models to test (i.e. number of elements in the `model_paramGrid_list` list). This list indicates, for each different model, if the features in 'X' have to be scaled or not (for all the datasets). `scale_list` can be None or False: in this case the 'X' features aren't scaled for any model. `scale_list` can be True: in this case the 'X' features are scaled for all the models. test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset). time_series: bool Indicates if the given datasets are time series dataset (i.e. datasets indexed by days). (This affects the computing of the validation score). random_state: int Used in the training-test splitting of the datasets. n_folds: int Indicates how many folds are made in order to compute the k-fold cross validation. (It's used only if `time_series` is False). regr: bool Indicates if it's either a regression or a classification problem. plot: bool Indicates whether to plot or not the validation score values of the datasets (i.e. this is the 'main' plot). plot_train: bool Indicates whether to plot also the training scores (both in the 'main' and 'secondary' plots). xvalues: list (in general, iterable) Values that have to be put in the x axis of the 'main' plot. xlabel: str Label of the x axis of the 'main' plot. title: str Title of the 'main' plot. figsize: tuple Two dimensions of the 'main' plot. verbose: bool If True, for each dataset the validation scores of the models are plotted (i.e. these are the 'secondary' plots). (See 'models_validation'). figsize_verbose: tuple Two dimensions of the 'secondary' plots. Returns ---------- datasets_train_val_score: np.array Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation scores. It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`. datasets_best_model: list List which has as many elements as the number of the datasets (i.e. number of elements in `dataset_list`). For each dataset, it contains the best model for that dataset. More precisely, it is a list of triple: - the first element is the index of `model_paramGrid_list` which indicates the best model; - the second element is the mnemonic name of the best model; - the third element is the best combination of hyperparameters values on that best model (i.e. it's a dictionary which has as keys the hyperparameters names and as values their associated values). best_index: int Index of `dataset_list` that indicates which is the best dataset. test_score: float Test score associated with the best couple dataset-model. axes: list List of the matplotlib Axes where the plots have been made. Firstly, the 'secondary' plots are put (if any). And, as last, the 'main' plot is put (if any). If no plot has been made, `axes` is an empty list. See also ---------- models_validation: select the best model on the given dataset. Notes ---------- - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best couple dataset-model is the one associated with the minimum validation score. Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the maximum validation score. - If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`. Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.
model_selection.py
datasets_models_validation
EnricoPittini/model-selection
python
def datasets_models_validation(dataset_list, model_paramGrid_list, scale_list=None, test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None, xlabel='Datasets', title='Datasets validation', figsize=(6, 6), verbose=False, figsize_verbose=(6, 6)): "\n Select the best dataset and the best model (i.e. select the best couple dataset-model).\n\n For each dataset in `dataset_list`, all the models in `model_paramGrid_list` are tested: each model is tested performing\n an exhaustive tuning of the specified hyperparameters. In fact, `model_paramGrid_list` also contains, for each model, the\n grid of the hyperparameters that have to be tested on that model (i.e. the grid which contains the values to test for\n each specified hyperparameter of the model).\n In other words, on each dataset the selection of the best model is performed: in fact, on each dataset, the function\n `models_validation` is applied. (See `models_validation`).\n In the end, the best couple dataset-model is selected.\n\n Despite the fact that a couple dataset-model is selected, the main viewpoint is focused with respect to the datasets.\n It's a validation focused on the datasets.\n In fact, first of all, for each dataset the model selection is performed: in this way the best model is selected\n and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other words, on each\n dataset the function `models_validation` is applied). Finally, after that, the best dataset is selected.\n It's a two-levels selection.\n\n This selection is made using the validation score (i.e. the best couple dataset-model is the one with best validation\n score).\n The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross\n validation on the training set.\n Additionally, the training and test scores are also computed.\n\n Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset\n selection. This is the 'main' plot.\n Moreover, still optionally, the 'secondary' plots can be done: for each dataset, the validation scores of the models are\n plotted, making a graphical visualization of the models selection on that dataset. (As the plot made by the\n `models_validation` function).\n\n Parameters\n ----------\n dataset_list: list\n List of couples, where each couple is a dataset.\n - The first element is X, the two-dimensional np.array containing the explanatory features of the dataset.\n - The second element is y, the mono dimensional np.array containing the response feature of the dataset.\n model_paramGrid_list: list\n List that specifies the models and the relative grid of hyperparameters to be tested.\n It's a list of triples (i.e. tuples), where each triple represents a model:\n - the first element is a string, which is a mnemonic name of that model;\n - the second element is the sklearn model;\n - the third element is the grid of hyperparameters to test for that model. It's a dictionary, with the same\n structure of parameter `param_grid` of the function `hyperparameters_validation`.\n scale_list: list or bool\n List of booleans, which has as many elements as the number of models to test (i.e. number of elements in the\n `model_paramGrid_list` list).\n This list indicates, for each different model, if the features in 'X' have to be scaled or not (for all the datasets).\n `scale_list` can be None or False: in this case the 'X' features aren't scaled for any model. `scale_list` can be\n True: in this case the 'X' features are scaled for all the models.\n test_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset).\n time_series: bool\n Indicates if the given datasets are time series dataset (i.e. datasets indexed by days).\n (This affects the computing of the validation score).\n random_state: int\n Used in the training-test splitting of the datasets.\n n_folds: int\n Indicates how many folds are made in order to compute the k-fold cross validation.\n (It's used only if `time_series` is False).\n regr: bool\n Indicates if it's either a regression or a classification problem.\n plot: bool\n Indicates whether to plot or not the validation score values of the datasets (i.e. this is the 'main' plot).\n plot_train: bool\n Indicates whether to plot also the training scores (both in the 'main' and 'secondary' plots).\n xvalues: list (in general, iterable)\n Values that have to be put in the x axis of the 'main' plot.\n xlabel: str\n Label of the x axis of the 'main' plot.\n title: str\n Title of the 'main' plot.\n figsize: tuple\n Two dimensions of the 'main' plot.\n verbose: bool\n If True, for each dataset the validation scores of the models are plotted (i.e. these are the 'secondary' plots).\n (See 'models_validation').\n figsize_verbose: tuple\n Two dimensions of the 'secondary' plots.\n\n Returns\n ----------\n datasets_train_val_score: np.array\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\n scores.\n It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`.\n datasets_best_model: list\n List which has as many elements as the number of the datasets (i.e. number of elements in `dataset_list`). For\n each dataset, it contains the best model for that dataset.\n More precisely, it is a list of triple:\n - the first element is the index of `model_paramGrid_list` which indicates the best model;\n - the second element is the mnemonic name of the best model;\n - the third element is the best combination of hyperparameters values on that best model (i.e. it's a dictionary\n which has as keys the hyperparameters names and as values their associated values).\n best_index: int\n Index of `dataset_list` that indicates which is the best dataset.\n test_score: float\n Test score associated with the best couple dataset-model.\n axes: list\n List of the matplotlib Axes where the plots have been made.\n Firstly, the 'secondary' plots are put (if any). And, as last, the 'main' plot is put (if any).\n If no plot has been made, `axes` is an empty list.\n\n See also\n ----------\n models_validation: select the best model on the given dataset.\n\n Notes\n ----------\n - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\n couple dataset-model is the one associated with the minimum validation score.\n Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the\n maximum validation score.\n - If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\n " datasets_train_val_score = [] datasets_best_model = [] datasets_test_score = [] axes = [] for (i, dataset) in enumerate(dataset_list): (X, y) = dataset (models_train_val_score, models_best_params, best_index, test_score, ax) = models_validation(X, y, model_paramGrid_list, scale_list=scale_list, test_size=test_size, time_series=time_series, random_state=random_state, n_folds=n_folds, regr=regr, plot=verbose, plot_train=plot_train, xlabel='Models', title=(('Dataset ' + str(i)) + ' : models validation'), figsize=figsize_verbose) datasets_train_val_score.append(tuple(models_train_val_score[best_index, :])) datasets_best_model.append((best_index, model_paramGrid_list[best_index][0], models_best_params[best_index])) datasets_test_score.append(test_score) if ax: axes.append(ax) datasets_train_val_score = np.array(datasets_train_val_score) if regr: best_index = np.argmin(datasets_train_val_score, axis=0)[1] else: best_index = np.argmax(datasets_train_val_score, axis=0)[1] test_score = datasets_test_score[best_index] if plot: if (not xvalues): xvalues = range(len(dataset_list)) ax = _plot_TrainVal_values(xvalues, datasets_train_val_score, plot_train, xlabel, title, figsize, bar=True) axes.append(ax) return (datasets_train_val_score, datasets_best_model, best_index, test_score, axes)
def append(self, other): ' append the recarrays from one MfList to another\n Parameters\n ----------\n other: variable: an item that can be cast in to an MfList\n that corresponds with self\n Returns\n -------\n dict of {kper:recarray}\n ' if (not isinstance(other, MfList)): other = MfList(self.package, data=other, dtype=self.dtype, model=self._model, list_free_format=self.list_free_format) msg = ('MfList.append(): other arg must be ' + 'MfList or dict, not {0}'.format(type(other))) assert isinstance(other, MfList), msg other_kpers = list(other.data.keys()) other_kpers.sort() self_kpers = list(self.data.keys()) self_kpers.sort() new_dict = {} for kper in range(self._model.nper): other_data = other[kper].copy() self_data = self[kper].copy() other_len = other_data.shape[0] self_len = self_data.shape[0] if (((other_len == 0) and (self_len == 0)) or ((kper not in self_kpers) and (kper not in other_kpers))): continue elif (self_len == 0): new_dict[kper] = other_data elif (other_len == 0): new_dict[kper] = self_data else: new_len = (other_data.shape[0] + self_data.shape[0]) new_data = np.recarray(new_len, dtype=self.dtype) new_data[:self_len] = self_data new_data[self_len:(self_len + other_len)] = other_data new_dict[kper] = new_data return new_dict
3,458,584,039,420,723,000
append the recarrays from one MfList to another Parameters ---------- other: variable: an item that can be cast in to an MfList that corresponds with self Returns ------- dict of {kper:recarray}
flopy/utils/util_list.py
append
aleaf/flopy
python
def append(self, other): ' append the recarrays from one MfList to another\n Parameters\n ----------\n other: variable: an item that can be cast in to an MfList\n that corresponds with self\n Returns\n -------\n dict of {kper:recarray}\n ' if (not isinstance(other, MfList)): other = MfList(self.package, data=other, dtype=self.dtype, model=self._model, list_free_format=self.list_free_format) msg = ('MfList.append(): other arg must be ' + 'MfList or dict, not {0}'.format(type(other))) assert isinstance(other, MfList), msg other_kpers = list(other.data.keys()) other_kpers.sort() self_kpers = list(self.data.keys()) self_kpers.sort() new_dict = {} for kper in range(self._model.nper): other_data = other[kper].copy() self_data = self[kper].copy() other_len = other_data.shape[0] self_len = self_data.shape[0] if (((other_len == 0) and (self_len == 0)) or ((kper not in self_kpers) and (kper not in other_kpers))): continue elif (self_len == 0): new_dict[kper] = other_data elif (other_len == 0): new_dict[kper] = self_data else: new_len = (other_data.shape[0] + self_data.shape[0]) new_data = np.recarray(new_len, dtype=self.dtype) new_data[:self_len] = self_data new_data[self_len:(self_len + other_len)] = other_data new_dict[kper] = new_data return new_dict
def drop(self, fields): 'drop fields from an MfList\n\n Parameters\n ----------\n fields : list or set of field names to drop\n\n Returns\n -------\n dropped : MfList without the dropped fields\n ' if (not isinstance(fields, list)): fields = [fields] names = [n for n in self.dtype.names if (n not in fields)] dtype = np.dtype([(k, d) for (k, d) in self.dtype.descr if (k not in fields)]) spd = {} for (k, v) in self.data.items(): newarr = np.array([self.data[k][n] for n in names]).transpose() newarr = np.array(list(map(tuple, newarr)), dtype=dtype).view(np.recarray) for n in dtype.names: newarr[n] = self.data[k][n] spd[k] = newarr return MfList(self.package, spd, dtype=dtype)
1,912,237,700,778,697,200
drop fields from an MfList Parameters ---------- fields : list or set of field names to drop Returns ------- dropped : MfList without the dropped fields
flopy/utils/util_list.py
drop
aleaf/flopy
python
def drop(self, fields): 'drop fields from an MfList\n\n Parameters\n ----------\n fields : list or set of field names to drop\n\n Returns\n -------\n dropped : MfList without the dropped fields\n ' if (not isinstance(fields, list)): fields = [fields] names = [n for n in self.dtype.names if (n not in fields)] dtype = np.dtype([(k, d) for (k, d) in self.dtype.descr if (k not in fields)]) spd = {} for (k, v) in self.data.items(): newarr = np.array([self.data[k][n] for n in names]).transpose() newarr = np.array(list(map(tuple, newarr)), dtype=dtype).view(np.recarray) for n in dtype.names: newarr[n] = self.data[k][n] spd[k] = newarr return MfList(self.package, spd, dtype=dtype)
@property def fmt_string(self): 'Returns a C-style fmt string for numpy savetxt that corresponds to\n the dtype' if (self.list_free_format is not None): use_free = self.list_free_format else: use_free = True if self.package.parent.has_package('bas6'): use_free = self.package.parent.bas6.ifrefm if ('mt3d' in self.package.parent.version.lower()): use_free = False fmts = [] for field in self.dtype.descr: vtype = field[1][1].lower() if (vtype in ('i', 'b')): if use_free: fmts.append('%9d') else: fmts.append('%10d') elif (vtype == 'f'): if use_free: if numpy114: fmts.append('%15s') else: fmts.append('%15.7E') else: fmts.append('%10G') elif (vtype == 'o'): if use_free: fmts.append('%9s') else: fmts.append('%10s') elif (vtype == 's'): msg = "MfList.fmt_string error: 'str' type found in dtype. This gives unpredictable results when recarray to file - change to 'object' type" raise TypeError(msg) else: raise TypeError('MfList.fmt_string error: unknown vtype in field: {}'.format(field)) if use_free: fmt_string = (' ' + ' '.join(fmts)) else: fmt_string = ''.join(fmts) return fmt_string
8,351,358,389,882,369,000
Returns a C-style fmt string for numpy savetxt that corresponds to the dtype
flopy/utils/util_list.py
fmt_string
aleaf/flopy
python
@property def fmt_string(self): 'Returns a C-style fmt string for numpy savetxt that corresponds to\n the dtype' if (self.list_free_format is not None): use_free = self.list_free_format else: use_free = True if self.package.parent.has_package('bas6'): use_free = self.package.parent.bas6.ifrefm if ('mt3d' in self.package.parent.version.lower()): use_free = False fmts = [] for field in self.dtype.descr: vtype = field[1][1].lower() if (vtype in ('i', 'b')): if use_free: fmts.append('%9d') else: fmts.append('%10d') elif (vtype == 'f'): if use_free: if numpy114: fmts.append('%15s') else: fmts.append('%15.7E') else: fmts.append('%10G') elif (vtype == 'o'): if use_free: fmts.append('%9s') else: fmts.append('%10s') elif (vtype == 's'): msg = "MfList.fmt_string error: 'str' type found in dtype. This gives unpredictable results when recarray to file - change to 'object' type" raise TypeError(msg) else: raise TypeError('MfList.fmt_string error: unknown vtype in field: {}'.format(field)) if use_free: fmt_string = (' ' + ' '.join(fmts)) else: fmt_string = .join(fmts) return fmt_string