code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def sccs_bit_sync(y,Ns): """ rx_symb_d,clk,track = sccs_bit_sync(y,Ns) ////////////////////////////////////////////////////// Symbol synchronization algorithm using SCCS ////////////////////////////////////////////////////// y = baseband NRZ data waveform Ns = nominal number of samples per symbol Reworked from ECE 5675 Project Translated from m-code version Mark Wickert April 2014 """ # decimated symbol sequence for SEP rx_symb_d = np.zeros(int(np.fix(len(y)/Ns))) track = np.zeros(int(np.fix(len(y)/Ns))) bit_count = -1 y_abs = np.zeros(len(y)) clk = np.zeros(len(y)) k = Ns+1 #initial 1-of-Ns symbol synch clock phase # Sample-by-sample processing required for i in range(len(y)): #y_abs(i) = abs(round(real(y(i)))) if i >= Ns: # do not process first Ns samples # Collect timing decision unit (TDU) samples y_abs[i] = np.abs(np.sum(y[i-Ns+1:i+1])) # Update sampling instant and take a sample # For causality reason the early sample is 'i', # the on-time or prompt sample is 'i-1', and # the late sample is 'i-2'. if (k == 0): # Load the samples into the 3x1 TDU register w_hat. # w_hat[1] = late, w_hat[2] = on-time; w_hat[3] = early. w_hat = y_abs[i-2:i+1] bit_count += 1 if w_hat[1] != 0: if w_hat[0] < w_hat[2]: k = Ns-1 clk[i-2] = 1 rx_symb_d[bit_count] = y[i-2-int(np.round(Ns/2))-1] elif w_hat[0] > w_hat[2]: k = Ns+1 clk[i] = 1 rx_symb_d[bit_count] = y[i-int(np.round(Ns/2))-1] else: k = Ns clk[i-1] = 1 rx_symb_d[bit_count] = y[i-1-int(np.round(Ns/2))-1] else: k = Ns clk[i-1] = 1 rx_symb_d[bit_count] = y[i-1-int(np.round(Ns/2))] track[bit_count] = np.mod(i,Ns) k -= 1 # Trim the final output to bit_count rx_symb_d = rx_symb_d[:bit_count] return rx_symb_d, clk, track
def function[sccs_bit_sync, parameter[y, Ns]]: constant[ rx_symb_d,clk,track = sccs_bit_sync(y,Ns) ////////////////////////////////////////////////////// Symbol synchronization algorithm using SCCS ////////////////////////////////////////////////////// y = baseband NRZ data waveform Ns = nominal number of samples per symbol Reworked from ECE 5675 Project Translated from m-code version Mark Wickert April 2014 ] variable[rx_symb_d] assign[=] call[name[np].zeros, parameter[call[name[int], parameter[call[name[np].fix, parameter[binary_operation[call[name[len], parameter[name[y]]] / name[Ns]]]]]]]] variable[track] assign[=] call[name[np].zeros, parameter[call[name[int], parameter[call[name[np].fix, parameter[binary_operation[call[name[len], parameter[name[y]]] / name[Ns]]]]]]]] variable[bit_count] assign[=] <ast.UnaryOp object at 0x7da18f00d630> variable[y_abs] assign[=] call[name[np].zeros, parameter[call[name[len], parameter[name[y]]]]] variable[clk] assign[=] call[name[np].zeros, parameter[call[name[len], parameter[name[y]]]]] variable[k] assign[=] binary_operation[name[Ns] + constant[1]] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[y]]]]]] begin[:] if compare[name[i] greater_or_equal[>=] name[Ns]] begin[:] call[name[y_abs]][name[i]] assign[=] call[name[np].abs, parameter[call[name[np].sum, parameter[call[name[y]][<ast.Slice object at 0x7da18f00c040>]]]]] if compare[name[k] equal[==] constant[0]] begin[:] variable[w_hat] assign[=] call[name[y_abs]][<ast.Slice object at 0x7da18f00e050>] <ast.AugAssign object at 0x7da18f00cd90> if compare[call[name[w_hat]][constant[1]] not_equal[!=] constant[0]] begin[:] if compare[call[name[w_hat]][constant[0]] less[<] call[name[w_hat]][constant[2]]] begin[:] variable[k] assign[=] binary_operation[name[Ns] - constant[1]] call[name[clk]][binary_operation[name[i] - constant[2]]] assign[=] constant[1] call[name[rx_symb_d]][name[bit_count]] assign[=] call[name[y]][binary_operation[binary_operation[binary_operation[name[i] - constant[2]] - call[name[int], parameter[call[name[np].round, parameter[binary_operation[name[Ns] / constant[2]]]]]]] - constant[1]]] call[name[track]][name[bit_count]] assign[=] call[name[np].mod, parameter[name[i], name[Ns]]] <ast.AugAssign object at 0x7da20c6e5c30> variable[rx_symb_d] assign[=] call[name[rx_symb_d]][<ast.Slice object at 0x7da20c6e5120>] return[tuple[[<ast.Name object at 0x7da20c6e7d90>, <ast.Name object at 0x7da20c6e4640>, <ast.Name object at 0x7da20c6e4b50>]]]
keyword[def] identifier[sccs_bit_sync] ( identifier[y] , identifier[Ns] ): literal[string] identifier[rx_symb_d] = identifier[np] . identifier[zeros] ( identifier[int] ( identifier[np] . identifier[fix] ( identifier[len] ( identifier[y] )/ identifier[Ns] ))) identifier[track] = identifier[np] . identifier[zeros] ( identifier[int] ( identifier[np] . identifier[fix] ( identifier[len] ( identifier[y] )/ identifier[Ns] ))) identifier[bit_count] =- literal[int] identifier[y_abs] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[y] )) identifier[clk] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[y] )) identifier[k] = identifier[Ns] + literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[y] )): keyword[if] identifier[i] >= identifier[Ns] : identifier[y_abs] [ identifier[i] ]= identifier[np] . identifier[abs] ( identifier[np] . identifier[sum] ( identifier[y] [ identifier[i] - identifier[Ns] + literal[int] : identifier[i] + literal[int] ])) keyword[if] ( identifier[k] == literal[int] ): identifier[w_hat] = identifier[y_abs] [ identifier[i] - literal[int] : identifier[i] + literal[int] ] identifier[bit_count] += literal[int] keyword[if] identifier[w_hat] [ literal[int] ]!= literal[int] : keyword[if] identifier[w_hat] [ literal[int] ]< identifier[w_hat] [ literal[int] ]: identifier[k] = identifier[Ns] - literal[int] identifier[clk] [ identifier[i] - literal[int] ]= literal[int] identifier[rx_symb_d] [ identifier[bit_count] ]= identifier[y] [ identifier[i] - literal[int] - identifier[int] ( identifier[np] . identifier[round] ( identifier[Ns] / literal[int] ))- literal[int] ] keyword[elif] identifier[w_hat] [ literal[int] ]> identifier[w_hat] [ literal[int] ]: identifier[k] = identifier[Ns] + literal[int] identifier[clk] [ identifier[i] ]= literal[int] identifier[rx_symb_d] [ identifier[bit_count] ]= identifier[y] [ identifier[i] - identifier[int] ( identifier[np] . identifier[round] ( identifier[Ns] / literal[int] ))- literal[int] ] keyword[else] : identifier[k] = identifier[Ns] identifier[clk] [ identifier[i] - literal[int] ]= literal[int] identifier[rx_symb_d] [ identifier[bit_count] ]= identifier[y] [ identifier[i] - literal[int] - identifier[int] ( identifier[np] . identifier[round] ( identifier[Ns] / literal[int] ))- literal[int] ] keyword[else] : identifier[k] = identifier[Ns] identifier[clk] [ identifier[i] - literal[int] ]= literal[int] identifier[rx_symb_d] [ identifier[bit_count] ]= identifier[y] [ identifier[i] - literal[int] - identifier[int] ( identifier[np] . identifier[round] ( identifier[Ns] / literal[int] ))] identifier[track] [ identifier[bit_count] ]= identifier[np] . identifier[mod] ( identifier[i] , identifier[Ns] ) identifier[k] -= literal[int] identifier[rx_symb_d] = identifier[rx_symb_d] [: identifier[bit_count] ] keyword[return] identifier[rx_symb_d] , identifier[clk] , identifier[track]
def sccs_bit_sync(y, Ns): """ rx_symb_d,clk,track = sccs_bit_sync(y,Ns) ////////////////////////////////////////////////////// Symbol synchronization algorithm using SCCS ////////////////////////////////////////////////////// y = baseband NRZ data waveform Ns = nominal number of samples per symbol Reworked from ECE 5675 Project Translated from m-code version Mark Wickert April 2014 """ # decimated symbol sequence for SEP rx_symb_d = np.zeros(int(np.fix(len(y) / Ns))) track = np.zeros(int(np.fix(len(y) / Ns))) bit_count = -1 y_abs = np.zeros(len(y)) clk = np.zeros(len(y)) k = Ns + 1 #initial 1-of-Ns symbol synch clock phase # Sample-by-sample processing required for i in range(len(y)): #y_abs(i) = abs(round(real(y(i)))) if i >= Ns: # do not process first Ns samples # Collect timing decision unit (TDU) samples y_abs[i] = np.abs(np.sum(y[i - Ns + 1:i + 1])) # Update sampling instant and take a sample # For causality reason the early sample is 'i', # the on-time or prompt sample is 'i-1', and # the late sample is 'i-2'. if k == 0: # Load the samples into the 3x1 TDU register w_hat. # w_hat[1] = late, w_hat[2] = on-time; w_hat[3] = early. w_hat = y_abs[i - 2:i + 1] bit_count += 1 if w_hat[1] != 0: if w_hat[0] < w_hat[2]: k = Ns - 1 clk[i - 2] = 1 rx_symb_d[bit_count] = y[i - 2 - int(np.round(Ns / 2)) - 1] # depends on [control=['if'], data=[]] elif w_hat[0] > w_hat[2]: k = Ns + 1 clk[i] = 1 rx_symb_d[bit_count] = y[i - int(np.round(Ns / 2)) - 1] # depends on [control=['if'], data=[]] else: k = Ns clk[i - 1] = 1 rx_symb_d[bit_count] = y[i - 1 - int(np.round(Ns / 2)) - 1] # depends on [control=['if'], data=[]] else: k = Ns clk[i - 1] = 1 rx_symb_d[bit_count] = y[i - 1 - int(np.round(Ns / 2))] track[bit_count] = np.mod(i, Ns) # depends on [control=['if'], data=['k']] # depends on [control=['if'], data=['i', 'Ns']] k -= 1 # depends on [control=['for'], data=['i']] # Trim the final output to bit_count rx_symb_d = rx_symb_d[:bit_count] return (rx_symb_d, clk, track)
def _index_audio_ibm(self, basename=None, replace_already_indexed=False, continuous=True, model="en-US_BroadbandModel", word_confidence=True, word_alternatives_threshold=0.9, profanity_filter_for_US_results=False): """ Implements a search-suitable interface for Watson speech API. Some explaination of the parameters here have been taken from [1]_ Parameters ---------- basename : str, optional A specific basename to be indexed and is placed in src_dir e.g `audio.wav`. If `None` is selected, all the valid audio files would be indexed. Default is `None`. replace_already_indexed : bool `True`, To reindex some audio file that's already in the timestamps. Default is `False`. continuous : bool Indicates whether multiple final results that represent consecutive phrases separated by long pauses are returned. If true, such phrases are returned; if false (the default), recognition ends after the first end-of-speech (EOS) incident is detected. Default is `True`. model : { 'ar-AR_BroadbandModel', 'en-UK_BroadbandModel' 'en-UK_NarrowbandModel', 'en-US_BroadbandModel', (the default) 'en-US_NarrowbandModel', 'es-ES_BroadbandModel', 'es-ES_NarrowbandModel', 'fr-FR_BroadbandModel', 'ja-JP_BroadbandModel', 'ja-JP_NarrowbandModel', 'pt-BR_BroadbandModel', 'pt-BR_NarrowbandModel', 'zh-CN_BroadbandModel', 'zh-CN_NarrowbandModel' } The identifier of the model to be used for the recognition Default is 'en-US_BroadbandModel' word_confidence : bool Indicates whether a confidence measure in the range of 0 to 1 is returned for each word. The default is True. (It's False in the original) word_alternatives_threshold : numeric A confidence value that is the lower bound for identifying a hypothesis as a possible word alternative (also known as "Confusion Networks"). An alternative word is considered if its confidence is greater than or equal to the threshold. Specify a probability between 0 and 1 inclusive. Default is `0.9`. profanity_filter_for_US_results : bool Indicates whether profanity filtering is performed on the transcript. If true, the service filters profanity from all output by replacing inappropriate words with a series of asterisks. If false, the service returns results with no censoring. Applies to US English transcription only. Default is `False`. References ---------- .. [1] : https://ibm.com/watson/developercloud/speech-to-text/api/v1/ """ params = {'continuous': continuous, 'model': model, 'word_alternatives_threshold': word_alternatives_threshold, 'word_confidence': word_confidence, 'timestamps': True, 'inactivity_timeout': str(-1), 'profanity_filter': profanity_filter_for_US_results} self._prepare_audio(basename=basename, replace_already_indexed=replace_already_indexed) for staging_audio_basename in self._list_audio_files( sub_dir="staging"): original_audio_name = ''.join( staging_audio_basename.split('.')[:-1])[:-3] with open("{}/staging/{}".format( self.src_dir, staging_audio_basename), "rb") as f: if self.get_verbosity(): print("Uploading {}...".format(staging_audio_basename)) response = requests.post( url=("https://stream.watsonplatform.net/" "speech-to-text/api/v1/recognize"), auth=(self.get_username_ibm(), self.get_password_ibm()), headers={'content-type': 'audio/wav'}, data=f.read(), params=params) if self.get_verbosity(): print("Indexing {}...".format(staging_audio_basename)) self.__timestamps_unregulated[ original_audio_name + ".wav"].append( self._timestamp_extractor_ibm( staging_audio_basename, json.loads(response.text))) if self.get_verbosity(): print("Done indexing {}".format(staging_audio_basename)) self._timestamp_regulator() if self.get_verbosity(): print("Indexing procedure finished")
def function[_index_audio_ibm, parameter[self, basename, replace_already_indexed, continuous, model, word_confidence, word_alternatives_threshold, profanity_filter_for_US_results]]: constant[ Implements a search-suitable interface for Watson speech API. Some explaination of the parameters here have been taken from [1]_ Parameters ---------- basename : str, optional A specific basename to be indexed and is placed in src_dir e.g `audio.wav`. If `None` is selected, all the valid audio files would be indexed. Default is `None`. replace_already_indexed : bool `True`, To reindex some audio file that's already in the timestamps. Default is `False`. continuous : bool Indicates whether multiple final results that represent consecutive phrases separated by long pauses are returned. If true, such phrases are returned; if false (the default), recognition ends after the first end-of-speech (EOS) incident is detected. Default is `True`. model : { 'ar-AR_BroadbandModel', 'en-UK_BroadbandModel' 'en-UK_NarrowbandModel', 'en-US_BroadbandModel', (the default) 'en-US_NarrowbandModel', 'es-ES_BroadbandModel', 'es-ES_NarrowbandModel', 'fr-FR_BroadbandModel', 'ja-JP_BroadbandModel', 'ja-JP_NarrowbandModel', 'pt-BR_BroadbandModel', 'pt-BR_NarrowbandModel', 'zh-CN_BroadbandModel', 'zh-CN_NarrowbandModel' } The identifier of the model to be used for the recognition Default is 'en-US_BroadbandModel' word_confidence : bool Indicates whether a confidence measure in the range of 0 to 1 is returned for each word. The default is True. (It's False in the original) word_alternatives_threshold : numeric A confidence value that is the lower bound for identifying a hypothesis as a possible word alternative (also known as "Confusion Networks"). An alternative word is considered if its confidence is greater than or equal to the threshold. Specify a probability between 0 and 1 inclusive. Default is `0.9`. profanity_filter_for_US_results : bool Indicates whether profanity filtering is performed on the transcript. If true, the service filters profanity from all output by replacing inappropriate words with a series of asterisks. If false, the service returns results with no censoring. Applies to US English transcription only. Default is `False`. References ---------- .. [1] : https://ibm.com/watson/developercloud/speech-to-text/api/v1/ ] variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da20c6e5210>, <ast.Constant object at 0x7da20c6e4a90>, <ast.Constant object at 0x7da20c6e4760>, <ast.Constant object at 0x7da20c6e7af0>, <ast.Constant object at 0x7da20c6e6020>, <ast.Constant object at 0x7da20c6e7580>, <ast.Constant object at 0x7da20c6e7a30>], [<ast.Name object at 0x7da20c6e56f0>, <ast.Name object at 0x7da20c6e7f10>, <ast.Name object at 0x7da20c6e7b20>, <ast.Name object at 0x7da20c6e55d0>, <ast.Constant object at 0x7da20c6e4c10>, <ast.Call object at 0x7da20c6e4430>, <ast.Name object at 0x7da20c6e6200>]] call[name[self]._prepare_audio, parameter[]] for taget[name[staging_audio_basename]] in starred[call[name[self]._list_audio_files, parameter[]]] begin[:] variable[original_audio_name] assign[=] call[call[constant[].join, parameter[call[call[name[staging_audio_basename].split, parameter[constant[.]]]][<ast.Slice object at 0x7da20c6e66b0>]]]][<ast.Slice object at 0x7da20c6e41c0>] with call[name[open], parameter[call[constant[{}/staging/{}].format, parameter[name[self].src_dir, name[staging_audio_basename]]], constant[rb]]] begin[:] if call[name[self].get_verbosity, parameter[]] begin[:] call[name[print], parameter[call[constant[Uploading {}...].format, parameter[name[staging_audio_basename]]]]] variable[response] assign[=] call[name[requests].post, parameter[]] if call[name[self].get_verbosity, parameter[]] begin[:] call[name[print], parameter[call[constant[Indexing {}...].format, parameter[name[staging_audio_basename]]]]] call[call[name[self].__timestamps_unregulated][binary_operation[name[original_audio_name] + constant[.wav]]].append, parameter[call[name[self]._timestamp_extractor_ibm, parameter[name[staging_audio_basename], call[name[json].loads, parameter[name[response].text]]]]]] if call[name[self].get_verbosity, parameter[]] begin[:] call[name[print], parameter[call[constant[Done indexing {}].format, parameter[name[staging_audio_basename]]]]] call[name[self]._timestamp_regulator, parameter[]] if call[name[self].get_verbosity, parameter[]] begin[:] call[name[print], parameter[constant[Indexing procedure finished]]]
keyword[def] identifier[_index_audio_ibm] ( identifier[self] , identifier[basename] = keyword[None] , identifier[replace_already_indexed] = keyword[False] , identifier[continuous] = keyword[True] , identifier[model] = literal[string] , identifier[word_confidence] = keyword[True] , identifier[word_alternatives_threshold] = literal[int] , identifier[profanity_filter_for_US_results] = keyword[False] ): literal[string] identifier[params] ={ literal[string] : identifier[continuous] , literal[string] : identifier[model] , literal[string] : identifier[word_alternatives_threshold] , literal[string] : identifier[word_confidence] , literal[string] : keyword[True] , literal[string] : identifier[str] (- literal[int] ), literal[string] : identifier[profanity_filter_for_US_results] } identifier[self] . identifier[_prepare_audio] ( identifier[basename] = identifier[basename] , identifier[replace_already_indexed] = identifier[replace_already_indexed] ) keyword[for] identifier[staging_audio_basename] keyword[in] identifier[self] . identifier[_list_audio_files] ( identifier[sub_dir] = literal[string] ): identifier[original_audio_name] = literal[string] . identifier[join] ( identifier[staging_audio_basename] . identifier[split] ( literal[string] )[:- literal[int] ])[:- literal[int] ] keyword[with] identifier[open] ( literal[string] . identifier[format] ( identifier[self] . identifier[src_dir] , identifier[staging_audio_basename] ), literal[string] ) keyword[as] identifier[f] : keyword[if] identifier[self] . identifier[get_verbosity] (): identifier[print] ( literal[string] . identifier[format] ( identifier[staging_audio_basename] )) identifier[response] = identifier[requests] . identifier[post] ( identifier[url] =( literal[string] literal[string] ), identifier[auth] =( identifier[self] . identifier[get_username_ibm] (), identifier[self] . identifier[get_password_ibm] ()), identifier[headers] ={ literal[string] : literal[string] }, identifier[data] = identifier[f] . identifier[read] (), identifier[params] = identifier[params] ) keyword[if] identifier[self] . identifier[get_verbosity] (): identifier[print] ( literal[string] . identifier[format] ( identifier[staging_audio_basename] )) identifier[self] . identifier[__timestamps_unregulated] [ identifier[original_audio_name] + literal[string] ]. identifier[append] ( identifier[self] . identifier[_timestamp_extractor_ibm] ( identifier[staging_audio_basename] , identifier[json] . identifier[loads] ( identifier[response] . identifier[text] ))) keyword[if] identifier[self] . identifier[get_verbosity] (): identifier[print] ( literal[string] . identifier[format] ( identifier[staging_audio_basename] )) identifier[self] . identifier[_timestamp_regulator] () keyword[if] identifier[self] . identifier[get_verbosity] (): identifier[print] ( literal[string] )
def _index_audio_ibm(self, basename=None, replace_already_indexed=False, continuous=True, model='en-US_BroadbandModel', word_confidence=True, word_alternatives_threshold=0.9, profanity_filter_for_US_results=False): """ Implements a search-suitable interface for Watson speech API. Some explaination of the parameters here have been taken from [1]_ Parameters ---------- basename : str, optional A specific basename to be indexed and is placed in src_dir e.g `audio.wav`. If `None` is selected, all the valid audio files would be indexed. Default is `None`. replace_already_indexed : bool `True`, To reindex some audio file that's already in the timestamps. Default is `False`. continuous : bool Indicates whether multiple final results that represent consecutive phrases separated by long pauses are returned. If true, such phrases are returned; if false (the default), recognition ends after the first end-of-speech (EOS) incident is detected. Default is `True`. model : { 'ar-AR_BroadbandModel', 'en-UK_BroadbandModel' 'en-UK_NarrowbandModel', 'en-US_BroadbandModel', (the default) 'en-US_NarrowbandModel', 'es-ES_BroadbandModel', 'es-ES_NarrowbandModel', 'fr-FR_BroadbandModel', 'ja-JP_BroadbandModel', 'ja-JP_NarrowbandModel', 'pt-BR_BroadbandModel', 'pt-BR_NarrowbandModel', 'zh-CN_BroadbandModel', 'zh-CN_NarrowbandModel' } The identifier of the model to be used for the recognition Default is 'en-US_BroadbandModel' word_confidence : bool Indicates whether a confidence measure in the range of 0 to 1 is returned for each word. The default is True. (It's False in the original) word_alternatives_threshold : numeric A confidence value that is the lower bound for identifying a hypothesis as a possible word alternative (also known as "Confusion Networks"). An alternative word is considered if its confidence is greater than or equal to the threshold. Specify a probability between 0 and 1 inclusive. Default is `0.9`. profanity_filter_for_US_results : bool Indicates whether profanity filtering is performed on the transcript. If true, the service filters profanity from all output by replacing inappropriate words with a series of asterisks. If false, the service returns results with no censoring. Applies to US English transcription only. Default is `False`. References ---------- .. [1] : https://ibm.com/watson/developercloud/speech-to-text/api/v1/ """ params = {'continuous': continuous, 'model': model, 'word_alternatives_threshold': word_alternatives_threshold, 'word_confidence': word_confidence, 'timestamps': True, 'inactivity_timeout': str(-1), 'profanity_filter': profanity_filter_for_US_results} self._prepare_audio(basename=basename, replace_already_indexed=replace_already_indexed) for staging_audio_basename in self._list_audio_files(sub_dir='staging'): original_audio_name = ''.join(staging_audio_basename.split('.')[:-1])[:-3] with open('{}/staging/{}'.format(self.src_dir, staging_audio_basename), 'rb') as f: if self.get_verbosity(): print('Uploading {}...'.format(staging_audio_basename)) # depends on [control=['if'], data=[]] response = requests.post(url='https://stream.watsonplatform.net/speech-to-text/api/v1/recognize', auth=(self.get_username_ibm(), self.get_password_ibm()), headers={'content-type': 'audio/wav'}, data=f.read(), params=params) if self.get_verbosity(): print('Indexing {}...'.format(staging_audio_basename)) # depends on [control=['if'], data=[]] self.__timestamps_unregulated[original_audio_name + '.wav'].append(self._timestamp_extractor_ibm(staging_audio_basename, json.loads(response.text))) if self.get_verbosity(): print('Done indexing {}'.format(staging_audio_basename)) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['f']] # depends on [control=['for'], data=['staging_audio_basename']] self._timestamp_regulator() if self.get_verbosity(): print('Indexing procedure finished') # depends on [control=['if'], data=[]]
def addFixedEffect(self,F=None,A=None,index=None): """ set sample and trait designs F: NxK sample design A: LxP sample design fast_computations: False deactivates the fast computations for any and common effects (for debugging) """ if F is None: F = np.ones((self.N,1)) else: assert F.shape[0]==self.N, "F dimension mismatch" if ((A is None) or ( (A.shape == (self.P,self.P)) and (A==np.eye(self.P)).all() )): #case any effect self.F_any = np.hstack((self.F_any,F)) elif (index is not None) and ((A==self.A[index]).all()): #case common effect self.F[index] = np.hstack((self.F_index,F)) else: #case general A assert A.shape[1]==self.P, "A dimension mismatch" self.F.append(F) self.A.append(A) self.clear_cache()
def function[addFixedEffect, parameter[self, F, A, index]]: constant[ set sample and trait designs F: NxK sample design A: LxP sample design fast_computations: False deactivates the fast computations for any and common effects (for debugging) ] if compare[name[F] is constant[None]] begin[:] variable[F] assign[=] call[name[np].ones, parameter[tuple[[<ast.Attribute object at 0x7da18f722f50>, <ast.Constant object at 0x7da18f721870>]]]] if <ast.BoolOp object at 0x7da2041db250> begin[:] name[self].F_any assign[=] call[name[np].hstack, parameter[tuple[[<ast.Attribute object at 0x7da2041db910>, <ast.Name object at 0x7da2041d98d0>]]]] call[name[self].clear_cache, parameter[]]
keyword[def] identifier[addFixedEffect] ( identifier[self] , identifier[F] = keyword[None] , identifier[A] = keyword[None] , identifier[index] = keyword[None] ): literal[string] keyword[if] identifier[F] keyword[is] keyword[None] : identifier[F] = identifier[np] . identifier[ones] (( identifier[self] . identifier[N] , literal[int] )) keyword[else] : keyword[assert] identifier[F] . identifier[shape] [ literal[int] ]== identifier[self] . identifier[N] , literal[string] keyword[if] (( identifier[A] keyword[is] keyword[None] ) keyword[or] (( identifier[A] . identifier[shape] ==( identifier[self] . identifier[P] , identifier[self] . identifier[P] )) keyword[and] ( identifier[A] == identifier[np] . identifier[eye] ( identifier[self] . identifier[P] )). identifier[all] ())): identifier[self] . identifier[F_any] = identifier[np] . identifier[hstack] (( identifier[self] . identifier[F_any] , identifier[F] )) keyword[elif] ( identifier[index] keyword[is] keyword[not] keyword[None] ) keyword[and] (( identifier[A] == identifier[self] . identifier[A] [ identifier[index] ]). identifier[all] ()): identifier[self] . identifier[F] [ identifier[index] ]= identifier[np] . identifier[hstack] (( identifier[self] . identifier[F_index] , identifier[F] )) keyword[else] : keyword[assert] identifier[A] . identifier[shape] [ literal[int] ]== identifier[self] . identifier[P] , literal[string] identifier[self] . identifier[F] . identifier[append] ( identifier[F] ) identifier[self] . identifier[A] . identifier[append] ( identifier[A] ) identifier[self] . identifier[clear_cache] ()
def addFixedEffect(self, F=None, A=None, index=None): """ set sample and trait designs F: NxK sample design A: LxP sample design fast_computations: False deactivates the fast computations for any and common effects (for debugging) """ if F is None: F = np.ones((self.N, 1)) # depends on [control=['if'], data=['F']] else: assert F.shape[0] == self.N, 'F dimension mismatch' if A is None or (A.shape == (self.P, self.P) and (A == np.eye(self.P)).all()): #case any effect self.F_any = np.hstack((self.F_any, F)) # depends on [control=['if'], data=[]] elif index is not None and (A == self.A[index]).all(): #case common effect self.F[index] = np.hstack((self.F_index, F)) # depends on [control=['if'], data=[]] else: #case general A assert A.shape[1] == self.P, 'A dimension mismatch' self.F.append(F) self.A.append(A) self.clear_cache()
def neighbors_from_pixelization(self, pixels, ridge_points): """Compute the neighbors of every Voronoi pixel as an ndarray of the pixel index's each pixel shares a \ vertex with. The ridge points of the Voronoi grid are used to derive this. Parameters ---------- ridge_points : scipy.spatial.Voronoi.ridge_points Each Voronoi-ridge (two indexes representing a pixel mapping_matrix). """ return pixelization_util.voronoi_neighbors_from_pixels_and_ridge_points(pixels=pixels, ridge_points=np.asarray(ridge_points))
def function[neighbors_from_pixelization, parameter[self, pixels, ridge_points]]: constant[Compute the neighbors of every Voronoi pixel as an ndarray of the pixel index's each pixel shares a vertex with. The ridge points of the Voronoi grid are used to derive this. Parameters ---------- ridge_points : scipy.spatial.Voronoi.ridge_points Each Voronoi-ridge (two indexes representing a pixel mapping_matrix). ] return[call[name[pixelization_util].voronoi_neighbors_from_pixels_and_ridge_points, parameter[]]]
keyword[def] identifier[neighbors_from_pixelization] ( identifier[self] , identifier[pixels] , identifier[ridge_points] ): literal[string] keyword[return] identifier[pixelization_util] . identifier[voronoi_neighbors_from_pixels_and_ridge_points] ( identifier[pixels] = identifier[pixels] , identifier[ridge_points] = identifier[np] . identifier[asarray] ( identifier[ridge_points] ))
def neighbors_from_pixelization(self, pixels, ridge_points): """Compute the neighbors of every Voronoi pixel as an ndarray of the pixel index's each pixel shares a vertex with. The ridge points of the Voronoi grid are used to derive this. Parameters ---------- ridge_points : scipy.spatial.Voronoi.ridge_points Each Voronoi-ridge (two indexes representing a pixel mapping_matrix). """ return pixelization_util.voronoi_neighbors_from_pixels_and_ridge_points(pixels=pixels, ridge_points=np.asarray(ridge_points))
def __constructMetricsModules(self, metricSpecs): """ Creates the required metrics modules Parameters: ----------------------------------------------------------------------- metricSpecs: A sequence of MetricSpec objects that specify which metric modules to instantiate """ if not metricSpecs: return self.__metricSpecs = metricSpecs for spec in metricSpecs: if not InferenceElement.validate(spec.inferenceElement): raise ValueError("Invalid inference element for metric spec: %r" %spec) self.__metrics.append(metrics.getModule(spec)) self.__metricLabels.append(spec.getLabel())
def function[__constructMetricsModules, parameter[self, metricSpecs]]: constant[ Creates the required metrics modules Parameters: ----------------------------------------------------------------------- metricSpecs: A sequence of MetricSpec objects that specify which metric modules to instantiate ] if <ast.UnaryOp object at 0x7da20c993b80> begin[:] return[None] name[self].__metricSpecs assign[=] name[metricSpecs] for taget[name[spec]] in starred[name[metricSpecs]] begin[:] if <ast.UnaryOp object at 0x7da20c990b50> begin[:] <ast.Raise object at 0x7da1b0146ce0> call[name[self].__metrics.append, parameter[call[name[metrics].getModule, parameter[name[spec]]]]] call[name[self].__metricLabels.append, parameter[call[name[spec].getLabel, parameter[]]]]
keyword[def] identifier[__constructMetricsModules] ( identifier[self] , identifier[metricSpecs] ): literal[string] keyword[if] keyword[not] identifier[metricSpecs] : keyword[return] identifier[self] . identifier[__metricSpecs] = identifier[metricSpecs] keyword[for] identifier[spec] keyword[in] identifier[metricSpecs] : keyword[if] keyword[not] identifier[InferenceElement] . identifier[validate] ( identifier[spec] . identifier[inferenceElement] ): keyword[raise] identifier[ValueError] ( literal[string] % identifier[spec] ) identifier[self] . identifier[__metrics] . identifier[append] ( identifier[metrics] . identifier[getModule] ( identifier[spec] )) identifier[self] . identifier[__metricLabels] . identifier[append] ( identifier[spec] . identifier[getLabel] ())
def __constructMetricsModules(self, metricSpecs): """ Creates the required metrics modules Parameters: ----------------------------------------------------------------------- metricSpecs: A sequence of MetricSpec objects that specify which metric modules to instantiate """ if not metricSpecs: return # depends on [control=['if'], data=[]] self.__metricSpecs = metricSpecs for spec in metricSpecs: if not InferenceElement.validate(spec.inferenceElement): raise ValueError('Invalid inference element for metric spec: %r' % spec) # depends on [control=['if'], data=[]] self.__metrics.append(metrics.getModule(spec)) self.__metricLabels.append(spec.getLabel()) # depends on [control=['for'], data=['spec']]
def run_via_api(self, container_params=None): """ create a container using this image and run it in background via Docker-py API. https://docker-py.readthedocs.io/en/stable/api.html Note: If you are using Healthchecks, be aware that support of some options were introduced just with version of Docker-py API 1.29 :param container_params: DockerContainerParameters :return: instance of DockerContainer """ if not container_params: container_params = DockerContainerParameters() # Host-specific configuration host_config = self.d.create_host_config(auto_remove=container_params.remove, cap_add=container_params.cap_add, cap_drop=container_params.cap_drop, devices=container_params.devices, dns=container_params.dns, group_add=container_params.group_add, init=container_params.init, ipc_mode=container_params.ipc_mode, isolation=container_params.isolation, mem_limit=container_params.mem_limit, mounts=container_params.mounts, pids_limit=container_params.pids_limit, privileged=container_params.privileged, publish_all_ports=container_params.publish_all_ports, port_bindings=container_params.port_mappings, read_only=container_params.read_only) container = self.d.create_container(self.get_id(), command=container_params.command, detach=True, hostname=container_params.hostname, user=container_params.user, stdin_open=container_params.stdin_open, tty=container_params.tty, ports=container_params.exposed_ports, environment=container_params.env_variables, volumes=container_params.volumes, name=container_params.name, entrypoint=container_params.entrypoint, working_dir=container_params.working_dir, host_config=host_config, mac_address=container_params.mac_address, labels=container_params.labels, stop_signal=container_params.stop_signal, healthcheck=container_params.healthcheck, runtime=container_params.runtime) return DockerContainer(self, container['Id'], name=container_params.name)
def function[run_via_api, parameter[self, container_params]]: constant[ create a container using this image and run it in background via Docker-py API. https://docker-py.readthedocs.io/en/stable/api.html Note: If you are using Healthchecks, be aware that support of some options were introduced just with version of Docker-py API 1.29 :param container_params: DockerContainerParameters :return: instance of DockerContainer ] if <ast.UnaryOp object at 0x7da1b12f2020> begin[:] variable[container_params] assign[=] call[name[DockerContainerParameters], parameter[]] variable[host_config] assign[=] call[name[self].d.create_host_config, parameter[]] variable[container] assign[=] call[name[self].d.create_container, parameter[call[name[self].get_id, parameter[]]]] return[call[name[DockerContainer], parameter[name[self], call[name[container]][constant[Id]]]]]
keyword[def] identifier[run_via_api] ( identifier[self] , identifier[container_params] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[container_params] : identifier[container_params] = identifier[DockerContainerParameters] () identifier[host_config] = identifier[self] . identifier[d] . identifier[create_host_config] ( identifier[auto_remove] = identifier[container_params] . identifier[remove] , identifier[cap_add] = identifier[container_params] . identifier[cap_add] , identifier[cap_drop] = identifier[container_params] . identifier[cap_drop] , identifier[devices] = identifier[container_params] . identifier[devices] , identifier[dns] = identifier[container_params] . identifier[dns] , identifier[group_add] = identifier[container_params] . identifier[group_add] , identifier[init] = identifier[container_params] . identifier[init] , identifier[ipc_mode] = identifier[container_params] . identifier[ipc_mode] , identifier[isolation] = identifier[container_params] . identifier[isolation] , identifier[mem_limit] = identifier[container_params] . identifier[mem_limit] , identifier[mounts] = identifier[container_params] . identifier[mounts] , identifier[pids_limit] = identifier[container_params] . identifier[pids_limit] , identifier[privileged] = identifier[container_params] . identifier[privileged] , identifier[publish_all_ports] = identifier[container_params] . identifier[publish_all_ports] , identifier[port_bindings] = identifier[container_params] . identifier[port_mappings] , identifier[read_only] = identifier[container_params] . identifier[read_only] ) identifier[container] = identifier[self] . identifier[d] . identifier[create_container] ( identifier[self] . identifier[get_id] (), identifier[command] = identifier[container_params] . identifier[command] , identifier[detach] = keyword[True] , identifier[hostname] = identifier[container_params] . identifier[hostname] , identifier[user] = identifier[container_params] . identifier[user] , identifier[stdin_open] = identifier[container_params] . identifier[stdin_open] , identifier[tty] = identifier[container_params] . identifier[tty] , identifier[ports] = identifier[container_params] . identifier[exposed_ports] , identifier[environment] = identifier[container_params] . identifier[env_variables] , identifier[volumes] = identifier[container_params] . identifier[volumes] , identifier[name] = identifier[container_params] . identifier[name] , identifier[entrypoint] = identifier[container_params] . identifier[entrypoint] , identifier[working_dir] = identifier[container_params] . identifier[working_dir] , identifier[host_config] = identifier[host_config] , identifier[mac_address] = identifier[container_params] . identifier[mac_address] , identifier[labels] = identifier[container_params] . identifier[labels] , identifier[stop_signal] = identifier[container_params] . identifier[stop_signal] , identifier[healthcheck] = identifier[container_params] . identifier[healthcheck] , identifier[runtime] = identifier[container_params] . identifier[runtime] ) keyword[return] identifier[DockerContainer] ( identifier[self] , identifier[container] [ literal[string] ], identifier[name] = identifier[container_params] . identifier[name] )
def run_via_api(self, container_params=None): """ create a container using this image and run it in background via Docker-py API. https://docker-py.readthedocs.io/en/stable/api.html Note: If you are using Healthchecks, be aware that support of some options were introduced just with version of Docker-py API 1.29 :param container_params: DockerContainerParameters :return: instance of DockerContainer """ if not container_params: container_params = DockerContainerParameters() # depends on [control=['if'], data=[]] # Host-specific configuration host_config = self.d.create_host_config(auto_remove=container_params.remove, cap_add=container_params.cap_add, cap_drop=container_params.cap_drop, devices=container_params.devices, dns=container_params.dns, group_add=container_params.group_add, init=container_params.init, ipc_mode=container_params.ipc_mode, isolation=container_params.isolation, mem_limit=container_params.mem_limit, mounts=container_params.mounts, pids_limit=container_params.pids_limit, privileged=container_params.privileged, publish_all_ports=container_params.publish_all_ports, port_bindings=container_params.port_mappings, read_only=container_params.read_only) container = self.d.create_container(self.get_id(), command=container_params.command, detach=True, hostname=container_params.hostname, user=container_params.user, stdin_open=container_params.stdin_open, tty=container_params.tty, ports=container_params.exposed_ports, environment=container_params.env_variables, volumes=container_params.volumes, name=container_params.name, entrypoint=container_params.entrypoint, working_dir=container_params.working_dir, host_config=host_config, mac_address=container_params.mac_address, labels=container_params.labels, stop_signal=container_params.stop_signal, healthcheck=container_params.healthcheck, runtime=container_params.runtime) return DockerContainer(self, container['Id'], name=container_params.name)
def p_assignment(self, p): 'assignment : ASSIGN lvalue EQUALS rvalue SEMICOLON' p[0] = Assign(p[2], p[4], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
def function[p_assignment, parameter[self, p]]: constant[assignment : ASSIGN lvalue EQUALS rvalue SEMICOLON] call[name[p]][constant[0]] assign[=] call[name[Assign], parameter[call[name[p]][constant[2]], call[name[p]][constant[4]]]] call[name[p].set_lineno, parameter[constant[0], call[name[p].lineno, parameter[constant[1]]]]]
keyword[def] identifier[p_assignment] ( identifier[self] , identifier[p] ): literal[string] identifier[p] [ literal[int] ]= identifier[Assign] ( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] )) identifier[p] . identifier[set_lineno] ( literal[int] , identifier[p] . identifier[lineno] ( literal[int] ))
def p_assignment(self, p): """assignment : ASSIGN lvalue EQUALS rvalue SEMICOLON""" p[0] = Assign(p[2], p[4], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
def init(self, force_deploy=False): """Reserve and deploys the vagrant boxes. Args: force_deploy (bool): True iff new machines should be started """ machines = self.provider_conf.machines networks = self.provider_conf.networks _networks = [] for network in networks: ipnet = IPNetwork(network.cidr) _networks.append({ "netpool": list(ipnet)[10:-10], "cidr": network.cidr, "roles": network.roles, "gateway": ipnet.ip }) vagrant_machines = [] vagrant_roles = {} j = 0 for machine in machines: for _ in range(machine.number): vagrant_machine = { "name": "enos-%s" % j, "cpu": machine.flavour_desc["core"], "mem": machine.flavour_desc["mem"], "ips": [n["netpool"].pop() for n in _networks], } vagrant_machines.append(vagrant_machine) # Assign the machines to the right roles for role in machine.roles: vagrant_roles.setdefault(role, []).append(vagrant_machine) j = j + 1 logger.debug(vagrant_roles) loader = FileSystemLoader(searchpath=TEMPLATE_DIR) env = Environment(loader=loader, autoescape=True) template = env.get_template('Vagrantfile.j2') vagrantfile = template.render(machines=vagrant_machines, provider_conf=self.provider_conf) vagrantfile_path = os.path.join(os.getcwd(), "Vagrantfile") with open(vagrantfile_path, 'w') as f: f.write(vagrantfile) # Build env for Vagrant with a copy of env variables (needed by # subprocess opened by vagrant v_env = dict(os.environ) v_env['VAGRANT_DEFAULT_PROVIDER'] = self.provider_conf.backend v = vagrant.Vagrant(root=os.getcwd(), quiet_stdout=False, quiet_stderr=False, env=v_env) if force_deploy: v.destroy() v.up() v.provision() roles = {} for role, machines in vagrant_roles.items(): for machine in machines: keyfile = v.keyfile(vm_name=machine['name']) port = v.port(vm_name=machine['name']) address = v.hostname(vm_name=machine['name']) roles.setdefault(role, []).append( Host(address, alias=machine['name'], user=self.provider_conf.user, port=port, keyfile=keyfile)) networks = [{ 'cidr': str(n["cidr"]), 'start': str(n["netpool"][0]), 'end': str(n["netpool"][-1]), 'dns': '8.8.8.8', 'gateway': str(n["gateway"]), 'roles': n["roles"] } for n in _networks] logger.debug(roles) logger.debug(networks) return (roles, networks)
def function[init, parameter[self, force_deploy]]: constant[Reserve and deploys the vagrant boxes. Args: force_deploy (bool): True iff new machines should be started ] variable[machines] assign[=] name[self].provider_conf.machines variable[networks] assign[=] name[self].provider_conf.networks variable[_networks] assign[=] list[[]] for taget[name[network]] in starred[name[networks]] begin[:] variable[ipnet] assign[=] call[name[IPNetwork], parameter[name[network].cidr]] call[name[_networks].append, parameter[dictionary[[<ast.Constant object at 0x7da18fe93940>, <ast.Constant object at 0x7da18fe92470>, <ast.Constant object at 0x7da18fe92560>, <ast.Constant object at 0x7da18fe92920>], [<ast.Subscript object at 0x7da18fe92290>, <ast.Attribute object at 0x7da18fe92380>, <ast.Attribute object at 0x7da18fe93dc0>, <ast.Attribute object at 0x7da18fe90bb0>]]]] variable[vagrant_machines] assign[=] list[[]] variable[vagrant_roles] assign[=] dictionary[[], []] variable[j] assign[=] constant[0] for taget[name[machine]] in starred[name[machines]] begin[:] for taget[name[_]] in starred[call[name[range], parameter[name[machine].number]]] begin[:] variable[vagrant_machine] assign[=] dictionary[[<ast.Constant object at 0x7da18fe92bc0>, <ast.Constant object at 0x7da18fe90b80>, <ast.Constant object at 0x7da18fe92ec0>, <ast.Constant object at 0x7da18fe92d10>], [<ast.BinOp object at 0x7da18fe939d0>, <ast.Subscript object at 0x7da18fe91c00>, <ast.Subscript object at 0x7da18fe92170>, <ast.ListComp object at 0x7da18fe913c0>]] call[name[vagrant_machines].append, parameter[name[vagrant_machine]]] for taget[name[role]] in starred[name[machine].roles] begin[:] call[call[name[vagrant_roles].setdefault, parameter[name[role], list[[]]]].append, parameter[name[vagrant_machine]]] variable[j] assign[=] binary_operation[name[j] + constant[1]] call[name[logger].debug, parameter[name[vagrant_roles]]] variable[loader] assign[=] call[name[FileSystemLoader], parameter[]] variable[env] assign[=] call[name[Environment], parameter[]] variable[template] assign[=] call[name[env].get_template, parameter[constant[Vagrantfile.j2]]] variable[vagrantfile] assign[=] call[name[template].render, parameter[]] variable[vagrantfile_path] assign[=] call[name[os].path.join, parameter[call[name[os].getcwd, parameter[]], constant[Vagrantfile]]] with call[name[open], parameter[name[vagrantfile_path], constant[w]]] begin[:] call[name[f].write, parameter[name[vagrantfile]]] variable[v_env] assign[=] call[name[dict], parameter[name[os].environ]] call[name[v_env]][constant[VAGRANT_DEFAULT_PROVIDER]] assign[=] name[self].provider_conf.backend variable[v] assign[=] call[name[vagrant].Vagrant, parameter[]] if name[force_deploy] begin[:] call[name[v].destroy, parameter[]] call[name[v].up, parameter[]] call[name[v].provision, parameter[]] variable[roles] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da18f09cbe0>, <ast.Name object at 0x7da18f09e920>]]] in starred[call[name[vagrant_roles].items, parameter[]]] begin[:] for taget[name[machine]] in starred[name[machines]] begin[:] variable[keyfile] assign[=] call[name[v].keyfile, parameter[]] variable[port] assign[=] call[name[v].port, parameter[]] variable[address] assign[=] call[name[v].hostname, parameter[]] call[call[name[roles].setdefault, parameter[name[role], list[[]]]].append, parameter[call[name[Host], parameter[name[address]]]]] variable[networks] assign[=] <ast.ListComp object at 0x7da18f09ce80> call[name[logger].debug, parameter[name[roles]]] call[name[logger].debug, parameter[name[networks]]] return[tuple[[<ast.Name object at 0x7da18f09c250>, <ast.Name object at 0x7da18f09d4e0>]]]
keyword[def] identifier[init] ( identifier[self] , identifier[force_deploy] = keyword[False] ): literal[string] identifier[machines] = identifier[self] . identifier[provider_conf] . identifier[machines] identifier[networks] = identifier[self] . identifier[provider_conf] . identifier[networks] identifier[_networks] =[] keyword[for] identifier[network] keyword[in] identifier[networks] : identifier[ipnet] = identifier[IPNetwork] ( identifier[network] . identifier[cidr] ) identifier[_networks] . identifier[append] ({ literal[string] : identifier[list] ( identifier[ipnet] )[ literal[int] :- literal[int] ], literal[string] : identifier[network] . identifier[cidr] , literal[string] : identifier[network] . identifier[roles] , literal[string] : identifier[ipnet] . identifier[ip] }) identifier[vagrant_machines] =[] identifier[vagrant_roles] ={} identifier[j] = literal[int] keyword[for] identifier[machine] keyword[in] identifier[machines] : keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[machine] . identifier[number] ): identifier[vagrant_machine] ={ literal[string] : literal[string] % identifier[j] , literal[string] : identifier[machine] . identifier[flavour_desc] [ literal[string] ], literal[string] : identifier[machine] . identifier[flavour_desc] [ literal[string] ], literal[string] :[ identifier[n] [ literal[string] ]. identifier[pop] () keyword[for] identifier[n] keyword[in] identifier[_networks] ], } identifier[vagrant_machines] . identifier[append] ( identifier[vagrant_machine] ) keyword[for] identifier[role] keyword[in] identifier[machine] . identifier[roles] : identifier[vagrant_roles] . identifier[setdefault] ( identifier[role] ,[]). identifier[append] ( identifier[vagrant_machine] ) identifier[j] = identifier[j] + literal[int] identifier[logger] . identifier[debug] ( identifier[vagrant_roles] ) identifier[loader] = identifier[FileSystemLoader] ( identifier[searchpath] = identifier[TEMPLATE_DIR] ) identifier[env] = identifier[Environment] ( identifier[loader] = identifier[loader] , identifier[autoescape] = keyword[True] ) identifier[template] = identifier[env] . identifier[get_template] ( literal[string] ) identifier[vagrantfile] = identifier[template] . identifier[render] ( identifier[machines] = identifier[vagrant_machines] , identifier[provider_conf] = identifier[self] . identifier[provider_conf] ) identifier[vagrantfile_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[getcwd] (), literal[string] ) keyword[with] identifier[open] ( identifier[vagrantfile_path] , literal[string] ) keyword[as] identifier[f] : identifier[f] . identifier[write] ( identifier[vagrantfile] ) identifier[v_env] = identifier[dict] ( identifier[os] . identifier[environ] ) identifier[v_env] [ literal[string] ]= identifier[self] . identifier[provider_conf] . identifier[backend] identifier[v] = identifier[vagrant] . identifier[Vagrant] ( identifier[root] = identifier[os] . identifier[getcwd] (), identifier[quiet_stdout] = keyword[False] , identifier[quiet_stderr] = keyword[False] , identifier[env] = identifier[v_env] ) keyword[if] identifier[force_deploy] : identifier[v] . identifier[destroy] () identifier[v] . identifier[up] () identifier[v] . identifier[provision] () identifier[roles] ={} keyword[for] identifier[role] , identifier[machines] keyword[in] identifier[vagrant_roles] . identifier[items] (): keyword[for] identifier[machine] keyword[in] identifier[machines] : identifier[keyfile] = identifier[v] . identifier[keyfile] ( identifier[vm_name] = identifier[machine] [ literal[string] ]) identifier[port] = identifier[v] . identifier[port] ( identifier[vm_name] = identifier[machine] [ literal[string] ]) identifier[address] = identifier[v] . identifier[hostname] ( identifier[vm_name] = identifier[machine] [ literal[string] ]) identifier[roles] . identifier[setdefault] ( identifier[role] ,[]). identifier[append] ( identifier[Host] ( identifier[address] , identifier[alias] = identifier[machine] [ literal[string] ], identifier[user] = identifier[self] . identifier[provider_conf] . identifier[user] , identifier[port] = identifier[port] , identifier[keyfile] = identifier[keyfile] )) identifier[networks] =[{ literal[string] : identifier[str] ( identifier[n] [ literal[string] ]), literal[string] : identifier[str] ( identifier[n] [ literal[string] ][ literal[int] ]), literal[string] : identifier[str] ( identifier[n] [ literal[string] ][- literal[int] ]), literal[string] : literal[string] , literal[string] : identifier[str] ( identifier[n] [ literal[string] ]), literal[string] : identifier[n] [ literal[string] ] } keyword[for] identifier[n] keyword[in] identifier[_networks] ] identifier[logger] . identifier[debug] ( identifier[roles] ) identifier[logger] . identifier[debug] ( identifier[networks] ) keyword[return] ( identifier[roles] , identifier[networks] )
def init(self, force_deploy=False): """Reserve and deploys the vagrant boxes. Args: force_deploy (bool): True iff new machines should be started """ machines = self.provider_conf.machines networks = self.provider_conf.networks _networks = [] for network in networks: ipnet = IPNetwork(network.cidr) _networks.append({'netpool': list(ipnet)[10:-10], 'cidr': network.cidr, 'roles': network.roles, 'gateway': ipnet.ip}) # depends on [control=['for'], data=['network']] vagrant_machines = [] vagrant_roles = {} j = 0 for machine in machines: for _ in range(machine.number): vagrant_machine = {'name': 'enos-%s' % j, 'cpu': machine.flavour_desc['core'], 'mem': machine.flavour_desc['mem'], 'ips': [n['netpool'].pop() for n in _networks]} vagrant_machines.append(vagrant_machine) # Assign the machines to the right roles for role in machine.roles: vagrant_roles.setdefault(role, []).append(vagrant_machine) # depends on [control=['for'], data=['role']] j = j + 1 # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['machine']] logger.debug(vagrant_roles) loader = FileSystemLoader(searchpath=TEMPLATE_DIR) env = Environment(loader=loader, autoescape=True) template = env.get_template('Vagrantfile.j2') vagrantfile = template.render(machines=vagrant_machines, provider_conf=self.provider_conf) vagrantfile_path = os.path.join(os.getcwd(), 'Vagrantfile') with open(vagrantfile_path, 'w') as f: f.write(vagrantfile) # depends on [control=['with'], data=['f']] # Build env for Vagrant with a copy of env variables (needed by # subprocess opened by vagrant v_env = dict(os.environ) v_env['VAGRANT_DEFAULT_PROVIDER'] = self.provider_conf.backend v = vagrant.Vagrant(root=os.getcwd(), quiet_stdout=False, quiet_stderr=False, env=v_env) if force_deploy: v.destroy() # depends on [control=['if'], data=[]] v.up() v.provision() roles = {} for (role, machines) in vagrant_roles.items(): for machine in machines: keyfile = v.keyfile(vm_name=machine['name']) port = v.port(vm_name=machine['name']) address = v.hostname(vm_name=machine['name']) roles.setdefault(role, []).append(Host(address, alias=machine['name'], user=self.provider_conf.user, port=port, keyfile=keyfile)) # depends on [control=['for'], data=['machine']] # depends on [control=['for'], data=[]] networks = [{'cidr': str(n['cidr']), 'start': str(n['netpool'][0]), 'end': str(n['netpool'][-1]), 'dns': '8.8.8.8', 'gateway': str(n['gateway']), 'roles': n['roles']} for n in _networks] logger.debug(roles) logger.debug(networks) return (roles, networks)
def from_api_repr(cls, resource): """Factory: construct a table reference given its API representation Args: resource (Dict[str, object]): Table reference representation returned from the API Returns: google.cloud.bigquery.table.TableReference: Table reference parsed from ``resource``. """ from google.cloud.bigquery.dataset import DatasetReference project = resource["projectId"] dataset_id = resource["datasetId"] table_id = resource["tableId"] return cls(DatasetReference(project, dataset_id), table_id)
def function[from_api_repr, parameter[cls, resource]]: constant[Factory: construct a table reference given its API representation Args: resource (Dict[str, object]): Table reference representation returned from the API Returns: google.cloud.bigquery.table.TableReference: Table reference parsed from ``resource``. ] from relative_module[google.cloud.bigquery.dataset] import module[DatasetReference] variable[project] assign[=] call[name[resource]][constant[projectId]] variable[dataset_id] assign[=] call[name[resource]][constant[datasetId]] variable[table_id] assign[=] call[name[resource]][constant[tableId]] return[call[name[cls], parameter[call[name[DatasetReference], parameter[name[project], name[dataset_id]]], name[table_id]]]]
keyword[def] identifier[from_api_repr] ( identifier[cls] , identifier[resource] ): literal[string] keyword[from] identifier[google] . identifier[cloud] . identifier[bigquery] . identifier[dataset] keyword[import] identifier[DatasetReference] identifier[project] = identifier[resource] [ literal[string] ] identifier[dataset_id] = identifier[resource] [ literal[string] ] identifier[table_id] = identifier[resource] [ literal[string] ] keyword[return] identifier[cls] ( identifier[DatasetReference] ( identifier[project] , identifier[dataset_id] ), identifier[table_id] )
def from_api_repr(cls, resource): """Factory: construct a table reference given its API representation Args: resource (Dict[str, object]): Table reference representation returned from the API Returns: google.cloud.bigquery.table.TableReference: Table reference parsed from ``resource``. """ from google.cloud.bigquery.dataset import DatasetReference project = resource['projectId'] dataset_id = resource['datasetId'] table_id = resource['tableId'] return cls(DatasetReference(project, dataset_id), table_id)
def set_window_size_and_position(window, window_key): """Adjust GTK Window's size, position and maximized state according to the corresponding values in the runtime_config file. The maximize method is triggered last to restore also the last stored size and position of the window. If the runtime_config does not exist, or the corresponding values are missing in the file, default values for the window size are used, and the mouse position is used to adjust the window's position. :param window: The GTK Window to be adjusted :param window_key: The window's key stored in the runtime config file """ size = global_runtime_config.get_config_value(window_key + '_WINDOW_SIZE') position = global_runtime_config.get_config_value(window_key + '_WINDOW_POS') maximized = global_runtime_config.get_config_value(window_key + '_WINDOW_MAXIMIZED') # un-maximize here on purpose otherwise resize and reposition fails if not maximized: window.unmaximize() if not size: size = constants.WINDOW_SIZE[window_key + '_WINDOW'] window.resize(*size) if position: position = (max(0, position[0]), max(0, position[1])) screen_width = Gdk.Screen.width() screen_height = Gdk.Screen.height() if position[0] < screen_width and position[1] < screen_height: window.move(*position) else: window.set_position(Gtk.WindowPosition.MOUSE) if maximized: window.maximize() window.show()
def function[set_window_size_and_position, parameter[window, window_key]]: constant[Adjust GTK Window's size, position and maximized state according to the corresponding values in the runtime_config file. The maximize method is triggered last to restore also the last stored size and position of the window. If the runtime_config does not exist, or the corresponding values are missing in the file, default values for the window size are used, and the mouse position is used to adjust the window's position. :param window: The GTK Window to be adjusted :param window_key: The window's key stored in the runtime config file ] variable[size] assign[=] call[name[global_runtime_config].get_config_value, parameter[binary_operation[name[window_key] + constant[_WINDOW_SIZE]]]] variable[position] assign[=] call[name[global_runtime_config].get_config_value, parameter[binary_operation[name[window_key] + constant[_WINDOW_POS]]]] variable[maximized] assign[=] call[name[global_runtime_config].get_config_value, parameter[binary_operation[name[window_key] + constant[_WINDOW_MAXIMIZED]]]] if <ast.UnaryOp object at 0x7da1b1c7ffa0> begin[:] call[name[window].unmaximize, parameter[]] if <ast.UnaryOp object at 0x7da1b1c7ed40> begin[:] variable[size] assign[=] call[name[constants].WINDOW_SIZE][binary_operation[name[window_key] + constant[_WINDOW]]] call[name[window].resize, parameter[<ast.Starred object at 0x7da1b1c7f310>]] if name[position] begin[:] variable[position] assign[=] tuple[[<ast.Call object at 0x7da1b1c7d300>, <ast.Call object at 0x7da1b1c7c0a0>]] variable[screen_width] assign[=] call[name[Gdk].Screen.width, parameter[]] variable[screen_height] assign[=] call[name[Gdk].Screen.height, parameter[]] if <ast.BoolOp object at 0x7da1b1c7f4c0> begin[:] call[name[window].move, parameter[<ast.Starred object at 0x7da1b1a8c9a0>]] if name[maximized] begin[:] call[name[window].maximize, parameter[]] call[name[window].show, parameter[]]
keyword[def] identifier[set_window_size_and_position] ( identifier[window] , identifier[window_key] ): literal[string] identifier[size] = identifier[global_runtime_config] . identifier[get_config_value] ( identifier[window_key] + literal[string] ) identifier[position] = identifier[global_runtime_config] . identifier[get_config_value] ( identifier[window_key] + literal[string] ) identifier[maximized] = identifier[global_runtime_config] . identifier[get_config_value] ( identifier[window_key] + literal[string] ) keyword[if] keyword[not] identifier[maximized] : identifier[window] . identifier[unmaximize] () keyword[if] keyword[not] identifier[size] : identifier[size] = identifier[constants] . identifier[WINDOW_SIZE] [ identifier[window_key] + literal[string] ] identifier[window] . identifier[resize] (* identifier[size] ) keyword[if] identifier[position] : identifier[position] =( identifier[max] ( literal[int] , identifier[position] [ literal[int] ]), identifier[max] ( literal[int] , identifier[position] [ literal[int] ])) identifier[screen_width] = identifier[Gdk] . identifier[Screen] . identifier[width] () identifier[screen_height] = identifier[Gdk] . identifier[Screen] . identifier[height] () keyword[if] identifier[position] [ literal[int] ]< identifier[screen_width] keyword[and] identifier[position] [ literal[int] ]< identifier[screen_height] : identifier[window] . identifier[move] (* identifier[position] ) keyword[else] : identifier[window] . identifier[set_position] ( identifier[Gtk] . identifier[WindowPosition] . identifier[MOUSE] ) keyword[if] identifier[maximized] : identifier[window] . identifier[maximize] () identifier[window] . identifier[show] ()
def set_window_size_and_position(window, window_key): """Adjust GTK Window's size, position and maximized state according to the corresponding values in the runtime_config file. The maximize method is triggered last to restore also the last stored size and position of the window. If the runtime_config does not exist, or the corresponding values are missing in the file, default values for the window size are used, and the mouse position is used to adjust the window's position. :param window: The GTK Window to be adjusted :param window_key: The window's key stored in the runtime config file """ size = global_runtime_config.get_config_value(window_key + '_WINDOW_SIZE') position = global_runtime_config.get_config_value(window_key + '_WINDOW_POS') maximized = global_runtime_config.get_config_value(window_key + '_WINDOW_MAXIMIZED') # un-maximize here on purpose otherwise resize and reposition fails if not maximized: window.unmaximize() # depends on [control=['if'], data=[]] if not size: size = constants.WINDOW_SIZE[window_key + '_WINDOW'] # depends on [control=['if'], data=[]] window.resize(*size) if position: position = (max(0, position[0]), max(0, position[1])) screen_width = Gdk.Screen.width() screen_height = Gdk.Screen.height() if position[0] < screen_width and position[1] < screen_height: window.move(*position) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: window.set_position(Gtk.WindowPosition.MOUSE) if maximized: window.maximize() # depends on [control=['if'], data=[]] window.show()
def no_missing_terms(formula_name, term_set): """ Returns true if the set is not missing terms corresponding to the entries in Appendix D, False otherwise. The set of terms should be exactly equal, and not contain more or less terms than expected. """ reqd_terms = dimless_vertical_coordinates[formula_name] def has_all_terms(reqd_termset): return len(reqd_termset ^ term_set) == 0 if isinstance(reqd_terms, set): return has_all_terms(reqd_terms) # if it's not a set, it's likely some other form of iterable with multiple # possible definitions i.e. a/ap are interchangeable in else: return any(has_all_terms(req) for req in reqd_terms)
def function[no_missing_terms, parameter[formula_name, term_set]]: constant[ Returns true if the set is not missing terms corresponding to the entries in Appendix D, False otherwise. The set of terms should be exactly equal, and not contain more or less terms than expected. ] variable[reqd_terms] assign[=] call[name[dimless_vertical_coordinates]][name[formula_name]] def function[has_all_terms, parameter[reqd_termset]]: return[compare[call[name[len], parameter[binary_operation[name[reqd_termset] <ast.BitXor object at 0x7da2590d6b00> name[term_set]]]] equal[==] constant[0]]] if call[name[isinstance], parameter[name[reqd_terms], name[set]]] begin[:] return[call[name[has_all_terms], parameter[name[reqd_terms]]]]
keyword[def] identifier[no_missing_terms] ( identifier[formula_name] , identifier[term_set] ): literal[string] identifier[reqd_terms] = identifier[dimless_vertical_coordinates] [ identifier[formula_name] ] keyword[def] identifier[has_all_terms] ( identifier[reqd_termset] ): keyword[return] identifier[len] ( identifier[reqd_termset] ^ identifier[term_set] )== literal[int] keyword[if] identifier[isinstance] ( identifier[reqd_terms] , identifier[set] ): keyword[return] identifier[has_all_terms] ( identifier[reqd_terms] ) keyword[else] : keyword[return] identifier[any] ( identifier[has_all_terms] ( identifier[req] ) keyword[for] identifier[req] keyword[in] identifier[reqd_terms] )
def no_missing_terms(formula_name, term_set): """ Returns true if the set is not missing terms corresponding to the entries in Appendix D, False otherwise. The set of terms should be exactly equal, and not contain more or less terms than expected. """ reqd_terms = dimless_vertical_coordinates[formula_name] def has_all_terms(reqd_termset): return len(reqd_termset ^ term_set) == 0 if isinstance(reqd_terms, set): return has_all_terms(reqd_terms) # depends on [control=['if'], data=[]] else: # if it's not a set, it's likely some other form of iterable with multiple # possible definitions i.e. a/ap are interchangeable in return any((has_all_terms(req) for req in reqd_terms))
def sed(match, replacement, path, modifiers=""): """ Perform sed text substitution. """ cmd = "sed -r -i 's/%s/%s/%s' %s" % (match, replacement, modifiers, path) process = Subprocess(cmd, shell=True) ret, out, err = process.run(timeout=60) if ret: raise SubprocessError("Sed command failed!")
def function[sed, parameter[match, replacement, path, modifiers]]: constant[ Perform sed text substitution. ] variable[cmd] assign[=] binary_operation[constant[sed -r -i 's/%s/%s/%s' %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b09b97b0>, <ast.Name object at 0x7da1b09bad70>, <ast.Name object at 0x7da1b09b8040>, <ast.Name object at 0x7da1b09ba440>]]] variable[process] assign[=] call[name[Subprocess], parameter[name[cmd]]] <ast.Tuple object at 0x7da1b09b9840> assign[=] call[name[process].run, parameter[]] if name[ret] begin[:] <ast.Raise object at 0x7da1b09b8400>
keyword[def] identifier[sed] ( identifier[match] , identifier[replacement] , identifier[path] , identifier[modifiers] = literal[string] ): literal[string] identifier[cmd] = literal[string] %( identifier[match] , identifier[replacement] , identifier[modifiers] , identifier[path] ) identifier[process] = identifier[Subprocess] ( identifier[cmd] , identifier[shell] = keyword[True] ) identifier[ret] , identifier[out] , identifier[err] = identifier[process] . identifier[run] ( identifier[timeout] = literal[int] ) keyword[if] identifier[ret] : keyword[raise] identifier[SubprocessError] ( literal[string] )
def sed(match, replacement, path, modifiers=''): """ Perform sed text substitution. """ cmd = "sed -r -i 's/%s/%s/%s' %s" % (match, replacement, modifiers, path) process = Subprocess(cmd, shell=True) (ret, out, err) = process.run(timeout=60) if ret: raise SubprocessError('Sed command failed!') # depends on [control=['if'], data=[]]
def build_route_timetable( feed: "Feed", route_id: str, dates: List[str] ) -> DataFrame: """ Return a timetable for the given route and dates. Parameters ---------- feed : Feed route_id : string ID of a route in ``feed.routes`` dates : string or list A YYYYMMDD date string or list thereof Returns ------- DataFrame The columns are all those in ``feed.trips`` plus those in ``feed.stop_times`` plus ``'date'``, and the trip IDs are restricted to the given route ID. The result is sorted first by date and then by grouping by trip ID and sorting the groups by their first departure time. Skip dates outside of the Feed's dates. If there is no route activity on the given dates, then return an empty DataFrame. Notes ----- Assume the following feed attributes are not ``None``: - ``feed.stop_times`` - Those used in :func:`.trips.get_trips` """ dates = feed.restrict_dates(dates) if not dates: return pd.DataFrame() t = pd.merge(feed.trips, feed.stop_times) t = t[t["route_id"] == route_id].copy() a = feed.compute_trip_activity(dates) frames = [] for date in dates: # Slice to trips active on date ids = a.loc[a[date] == 1, "trip_id"] f = t[t["trip_id"].isin(ids)].copy() f["date"] = date # Groupby trip ID and sort groups by their minimum departure time. # For some reason NaN departure times mess up the transform below. # So temporarily fill NaN departure times as a workaround. f["dt"] = f["departure_time"].fillna(method="ffill") f["min_dt"] = f.groupby("trip_id")["dt"].transform(min) frames.append(f) f = pd.concat(frames) return f.sort_values(["date", "min_dt", "stop_sequence"]).drop( ["min_dt", "dt"], axis=1 )
def function[build_route_timetable, parameter[feed, route_id, dates]]: constant[ Return a timetable for the given route and dates. Parameters ---------- feed : Feed route_id : string ID of a route in ``feed.routes`` dates : string or list A YYYYMMDD date string or list thereof Returns ------- DataFrame The columns are all those in ``feed.trips`` plus those in ``feed.stop_times`` plus ``'date'``, and the trip IDs are restricted to the given route ID. The result is sorted first by date and then by grouping by trip ID and sorting the groups by their first departure time. Skip dates outside of the Feed's dates. If there is no route activity on the given dates, then return an empty DataFrame. Notes ----- Assume the following feed attributes are not ``None``: - ``feed.stop_times`` - Those used in :func:`.trips.get_trips` ] variable[dates] assign[=] call[name[feed].restrict_dates, parameter[name[dates]]] if <ast.UnaryOp object at 0x7da1b0c59ed0> begin[:] return[call[name[pd].DataFrame, parameter[]]] variable[t] assign[=] call[name[pd].merge, parameter[name[feed].trips, name[feed].stop_times]] variable[t] assign[=] call[call[name[t]][compare[call[name[t]][constant[route_id]] equal[==] name[route_id]]].copy, parameter[]] variable[a] assign[=] call[name[feed].compute_trip_activity, parameter[name[dates]]] variable[frames] assign[=] list[[]] for taget[name[date]] in starred[name[dates]] begin[:] variable[ids] assign[=] call[name[a].loc][tuple[[<ast.Compare object at 0x7da1b0baded0>, <ast.Constant object at 0x7da1b0bad900>]]] variable[f] assign[=] call[call[name[t]][call[call[name[t]][constant[trip_id]].isin, parameter[name[ids]]]].copy, parameter[]] call[name[f]][constant[date]] assign[=] name[date] call[name[f]][constant[dt]] assign[=] call[call[name[f]][constant[departure_time]].fillna, parameter[]] call[name[f]][constant[min_dt]] assign[=] call[call[call[name[f].groupby, parameter[constant[trip_id]]]][constant[dt]].transform, parameter[name[min]]] call[name[frames].append, parameter[name[f]]] variable[f] assign[=] call[name[pd].concat, parameter[name[frames]]] return[call[call[name[f].sort_values, parameter[list[[<ast.Constant object at 0x7da1b0ebfca0>, <ast.Constant object at 0x7da1b0ebc2b0>, <ast.Constant object at 0x7da1b0ebfaf0>]]]].drop, parameter[list[[<ast.Constant object at 0x7da1b0ebe9e0>, <ast.Constant object at 0x7da1b0ebf130>]]]]]
keyword[def] identifier[build_route_timetable] ( identifier[feed] : literal[string] , identifier[route_id] : identifier[str] , identifier[dates] : identifier[List] [ identifier[str] ] )-> identifier[DataFrame] : literal[string] identifier[dates] = identifier[feed] . identifier[restrict_dates] ( identifier[dates] ) keyword[if] keyword[not] identifier[dates] : keyword[return] identifier[pd] . identifier[DataFrame] () identifier[t] = identifier[pd] . identifier[merge] ( identifier[feed] . identifier[trips] , identifier[feed] . identifier[stop_times] ) identifier[t] = identifier[t] [ identifier[t] [ literal[string] ]== identifier[route_id] ]. identifier[copy] () identifier[a] = identifier[feed] . identifier[compute_trip_activity] ( identifier[dates] ) identifier[frames] =[] keyword[for] identifier[date] keyword[in] identifier[dates] : identifier[ids] = identifier[a] . identifier[loc] [ identifier[a] [ identifier[date] ]== literal[int] , literal[string] ] identifier[f] = identifier[t] [ identifier[t] [ literal[string] ]. identifier[isin] ( identifier[ids] )]. identifier[copy] () identifier[f] [ literal[string] ]= identifier[date] identifier[f] [ literal[string] ]= identifier[f] [ literal[string] ]. identifier[fillna] ( identifier[method] = literal[string] ) identifier[f] [ literal[string] ]= identifier[f] . identifier[groupby] ( literal[string] )[ literal[string] ]. identifier[transform] ( identifier[min] ) identifier[frames] . identifier[append] ( identifier[f] ) identifier[f] = identifier[pd] . identifier[concat] ( identifier[frames] ) keyword[return] identifier[f] . identifier[sort_values] ([ literal[string] , literal[string] , literal[string] ]). identifier[drop] ( [ literal[string] , literal[string] ], identifier[axis] = literal[int] )
def build_route_timetable(feed: 'Feed', route_id: str, dates: List[str]) -> DataFrame: """ Return a timetable for the given route and dates. Parameters ---------- feed : Feed route_id : string ID of a route in ``feed.routes`` dates : string or list A YYYYMMDD date string or list thereof Returns ------- DataFrame The columns are all those in ``feed.trips`` plus those in ``feed.stop_times`` plus ``'date'``, and the trip IDs are restricted to the given route ID. The result is sorted first by date and then by grouping by trip ID and sorting the groups by their first departure time. Skip dates outside of the Feed's dates. If there is no route activity on the given dates, then return an empty DataFrame. Notes ----- Assume the following feed attributes are not ``None``: - ``feed.stop_times`` - Those used in :func:`.trips.get_trips` """ dates = feed.restrict_dates(dates) if not dates: return pd.DataFrame() # depends on [control=['if'], data=[]] t = pd.merge(feed.trips, feed.stop_times) t = t[t['route_id'] == route_id].copy() a = feed.compute_trip_activity(dates) frames = [] for date in dates: # Slice to trips active on date ids = a.loc[a[date] == 1, 'trip_id'] f = t[t['trip_id'].isin(ids)].copy() f['date'] = date # Groupby trip ID and sort groups by their minimum departure time. # For some reason NaN departure times mess up the transform below. # So temporarily fill NaN departure times as a workaround. f['dt'] = f['departure_time'].fillna(method='ffill') f['min_dt'] = f.groupby('trip_id')['dt'].transform(min) frames.append(f) # depends on [control=['for'], data=['date']] f = pd.concat(frames) return f.sort_values(['date', 'min_dt', 'stop_sequence']).drop(['min_dt', 'dt'], axis=1)
def handle_versions(repo, **kwargs): """:return: repo.versions()""" log.info('versions: %s %s' %(repo, kwargs)) if not hasattr(repo, 'versions'): return [] return [v.serialize() for v in repo.versions(**kwargs)]
def function[handle_versions, parameter[repo]]: constant[:return: repo.versions()] call[name[log].info, parameter[binary_operation[constant[versions: %s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da207f98df0>, <ast.Name object at 0x7da207f98580>]]]]] if <ast.UnaryOp object at 0x7da207f98520> begin[:] return[list[[]]] return[<ast.ListComp object at 0x7da207f9a440>]
keyword[def] identifier[handle_versions] ( identifier[repo] ,** identifier[kwargs] ): literal[string] identifier[log] . identifier[info] ( literal[string] %( identifier[repo] , identifier[kwargs] )) keyword[if] keyword[not] identifier[hasattr] ( identifier[repo] , literal[string] ): keyword[return] [] keyword[return] [ identifier[v] . identifier[serialize] () keyword[for] identifier[v] keyword[in] identifier[repo] . identifier[versions] (** identifier[kwargs] )]
def handle_versions(repo, **kwargs): """:return: repo.versions()""" log.info('versions: %s %s' % (repo, kwargs)) if not hasattr(repo, 'versions'): return [] # depends on [control=['if'], data=[]] return [v.serialize() for v in repo.versions(**kwargs)]
def sheets(self): """return the sheets of data.""" data = Dict() for src in [src for src in self.zipfile.namelist() if 'xl/worksheets/' in src]: name = os.path.splitext(os.path.basename(src))[0] xml = self.xml(src) data[name] = xml return data
def function[sheets, parameter[self]]: constant[return the sheets of data.] variable[data] assign[=] call[name[Dict], parameter[]] for taget[name[src]] in starred[<ast.ListComp object at 0x7da20c6c47c0>] begin[:] variable[name] assign[=] call[call[name[os].path.splitext, parameter[call[name[os].path.basename, parameter[name[src]]]]]][constant[0]] variable[xml] assign[=] call[name[self].xml, parameter[name[src]]] call[name[data]][name[name]] assign[=] name[xml] return[name[data]]
keyword[def] identifier[sheets] ( identifier[self] ): literal[string] identifier[data] = identifier[Dict] () keyword[for] identifier[src] keyword[in] [ identifier[src] keyword[for] identifier[src] keyword[in] identifier[self] . identifier[zipfile] . identifier[namelist] () keyword[if] literal[string] keyword[in] identifier[src] ]: identifier[name] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[src] ))[ literal[int] ] identifier[xml] = identifier[self] . identifier[xml] ( identifier[src] ) identifier[data] [ identifier[name] ]= identifier[xml] keyword[return] identifier[data]
def sheets(self): """return the sheets of data.""" data = Dict() for src in [src for src in self.zipfile.namelist() if 'xl/worksheets/' in src]: name = os.path.splitext(os.path.basename(src))[0] xml = self.xml(src) data[name] = xml # depends on [control=['for'], data=['src']] return data
def delete_collection_persistent_volume(self, **kwargs): # noqa: E501 """delete_collection_persistent_volume # noqa: E501 delete collection of PersistentVolume # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_persistent_volume(async_req=True) >>> result = thread.get() :param async_req bool :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_persistent_volume_with_http_info(**kwargs) # noqa: E501 else: (data) = self.delete_collection_persistent_volume_with_http_info(**kwargs) # noqa: E501 return data
def function[delete_collection_persistent_volume, parameter[self]]: constant[delete_collection_persistent_volume # noqa: E501 delete collection of PersistentVolume # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_persistent_volume(async_req=True) >>> result = thread.get() :param async_req bool :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. ] call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True] if call[name[kwargs].get, parameter[constant[async_req]]] begin[:] return[call[name[self].delete_collection_persistent_volume_with_http_info, parameter[]]]
keyword[def] identifier[delete_collection_persistent_volume] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]= keyword[True] keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ): keyword[return] identifier[self] . identifier[delete_collection_persistent_volume_with_http_info] (** identifier[kwargs] ) keyword[else] : ( identifier[data] )= identifier[self] . identifier[delete_collection_persistent_volume_with_http_info] (** identifier[kwargs] ) keyword[return] identifier[data]
def delete_collection_persistent_volume(self, **kwargs): # noqa: E501 'delete_collection_persistent_volume # noqa: E501\n\n delete collection of PersistentVolume # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.delete_collection_persistent_volume(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param bool include_uninitialized: If true, partially initialized resources are included in the response.\n :param str pretty: If \'true\', then the output is pretty printed.\n :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.\n :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.\n :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.\n :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.\n :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it\'s 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.\n :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.\n :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.\n :return: V1Status\n If the method is called asynchronously,\n returns the request thread.\n ' kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_persistent_volume_with_http_info(**kwargs) # noqa: E501 # depends on [control=['if'], data=[]] else: data = self.delete_collection_persistent_volume_with_http_info(**kwargs) # noqa: E501 return data
def _matrix_integration(q, h, t): ''' Returns the dp metric for a single horsetail curve at a given value of the epistemic uncertainties''' N = len(q) # correction if CDF has gone out of trapezium range if h[-1] < 0.9: h[-1] = 1.0 W = np.zeros([N, N]) for i in range(N): W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)]) dp = (q - t).T.dot(W).dot(q - t) return dp
def function[_matrix_integration, parameter[q, h, t]]: constant[ Returns the dp metric for a single horsetail curve at a given value of the epistemic uncertainties] variable[N] assign[=] call[name[len], parameter[name[q]]] if compare[call[name[h]][<ast.UnaryOp object at 0x7da1b1594310>] less[<] constant[0.9]] begin[:] call[name[h]][<ast.UnaryOp object at 0x7da1b1595cf0>] assign[=] constant[1.0] variable[W] assign[=] call[name[np].zeros, parameter[list[[<ast.Name object at 0x7da1b1597ac0>, <ast.Name object at 0x7da1b1597b50>]]]] for taget[name[i]] in starred[call[name[range], parameter[name[N]]]] begin[:] call[name[W]][tuple[[<ast.Name object at 0x7da1b1594460>, <ast.Name object at 0x7da1b15952d0>]]] assign[=] binary_operation[constant[0.5] * binary_operation[call[name[h]][call[name[min], parameter[binary_operation[name[i] + constant[1]], binary_operation[name[N] - constant[1]]]]] - call[name[h]][call[name[max], parameter[binary_operation[name[i] - constant[1]], constant[0]]]]]] variable[dp] assign[=] call[call[binary_operation[name[q] - name[t]].T.dot, parameter[name[W]]].dot, parameter[binary_operation[name[q] - name[t]]]] return[name[dp]]
keyword[def] identifier[_matrix_integration] ( identifier[q] , identifier[h] , identifier[t] ): literal[string] identifier[N] = identifier[len] ( identifier[q] ) keyword[if] identifier[h] [- literal[int] ]< literal[int] : identifier[h] [- literal[int] ]= literal[int] identifier[W] = identifier[np] . identifier[zeros] ([ identifier[N] , identifier[N] ]) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[N] ): identifier[W] [ identifier[i] , identifier[i] ]= literal[int] *( identifier[h] [ identifier[min] ( identifier[i] + literal[int] , identifier[N] - literal[int] )]- identifier[h] [ identifier[max] ( identifier[i] - literal[int] , literal[int] )]) identifier[dp] =( identifier[q] - identifier[t] ). identifier[T] . identifier[dot] ( identifier[W] ). identifier[dot] ( identifier[q] - identifier[t] ) keyword[return] identifier[dp]
def _matrix_integration(q, h, t): """ Returns the dp metric for a single horsetail curve at a given value of the epistemic uncertainties""" N = len(q) # correction if CDF has gone out of trapezium range if h[-1] < 0.9: h[-1] = 1.0 # depends on [control=['if'], data=[]] W = np.zeros([N, N]) for i in range(N): W[i, i] = 0.5 * (h[min(i + 1, N - 1)] - h[max(i - 1, 0)]) # depends on [control=['for'], data=['i']] dp = (q - t).T.dot(W).dot(q - t) return dp
def create_explicit(bounds): """Creates a new instance of distribution with explicit buckets. bounds is an iterable of ordered floats that define the explicit buckets Args: bounds (iterable[float]): initializes the bounds Return: :class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution` Raises: ValueError: if the args are invalid for creating an instance """ safe_bounds = sorted(float(x) for x in bounds) if len(safe_bounds) != len(set(safe_bounds)): raise ValueError(u'Detected two elements of bounds that are the same') return sc_messages.Distribution( bucketCounts=[0] * (len(safe_bounds) + 1), explicitBuckets=sc_messages.ExplicitBuckets(bounds=safe_bounds))
def function[create_explicit, parameter[bounds]]: constant[Creates a new instance of distribution with explicit buckets. bounds is an iterable of ordered floats that define the explicit buckets Args: bounds (iterable[float]): initializes the bounds Return: :class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution` Raises: ValueError: if the args are invalid for creating an instance ] variable[safe_bounds] assign[=] call[name[sorted], parameter[<ast.GeneratorExp object at 0x7da1b0400310>]] if compare[call[name[len], parameter[name[safe_bounds]]] not_equal[!=] call[name[len], parameter[call[name[set], parameter[name[safe_bounds]]]]]] begin[:] <ast.Raise object at 0x7da1b0401870> return[call[name[sc_messages].Distribution, parameter[]]]
keyword[def] identifier[create_explicit] ( identifier[bounds] ): literal[string] identifier[safe_bounds] = identifier[sorted] ( identifier[float] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[bounds] ) keyword[if] identifier[len] ( identifier[safe_bounds] )!= identifier[len] ( identifier[set] ( identifier[safe_bounds] )): keyword[raise] identifier[ValueError] ( literal[string] ) keyword[return] identifier[sc_messages] . identifier[Distribution] ( identifier[bucketCounts] =[ literal[int] ]*( identifier[len] ( identifier[safe_bounds] )+ literal[int] ), identifier[explicitBuckets] = identifier[sc_messages] . identifier[ExplicitBuckets] ( identifier[bounds] = identifier[safe_bounds] ))
def create_explicit(bounds): """Creates a new instance of distribution with explicit buckets. bounds is an iterable of ordered floats that define the explicit buckets Args: bounds (iterable[float]): initializes the bounds Return: :class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution` Raises: ValueError: if the args are invalid for creating an instance """ safe_bounds = sorted((float(x) for x in bounds)) if len(safe_bounds) != len(set(safe_bounds)): raise ValueError(u'Detected two elements of bounds that are the same') # depends on [control=['if'], data=[]] return sc_messages.Distribution(bucketCounts=[0] * (len(safe_bounds) + 1), explicitBuckets=sc_messages.ExplicitBuckets(bounds=safe_bounds))
def _handle_fundamental_types(self, typ): """ Handles POD types nodes. see init_fundamental_types for the registration. """ ctypesname = self.get_ctypes_name(typ.kind) if typ.kind == TypeKind.VOID: size = align = 1 else: size = typ.get_size() align = typ.get_align() return typedesc.FundamentalType(ctypesname, size, align)
def function[_handle_fundamental_types, parameter[self, typ]]: constant[ Handles POD types nodes. see init_fundamental_types for the registration. ] variable[ctypesname] assign[=] call[name[self].get_ctypes_name, parameter[name[typ].kind]] if compare[name[typ].kind equal[==] name[TypeKind].VOID] begin[:] variable[size] assign[=] constant[1] return[call[name[typedesc].FundamentalType, parameter[name[ctypesname], name[size], name[align]]]]
keyword[def] identifier[_handle_fundamental_types] ( identifier[self] , identifier[typ] ): literal[string] identifier[ctypesname] = identifier[self] . identifier[get_ctypes_name] ( identifier[typ] . identifier[kind] ) keyword[if] identifier[typ] . identifier[kind] == identifier[TypeKind] . identifier[VOID] : identifier[size] = identifier[align] = literal[int] keyword[else] : identifier[size] = identifier[typ] . identifier[get_size] () identifier[align] = identifier[typ] . identifier[get_align] () keyword[return] identifier[typedesc] . identifier[FundamentalType] ( identifier[ctypesname] , identifier[size] , identifier[align] )
def _handle_fundamental_types(self, typ): """ Handles POD types nodes. see init_fundamental_types for the registration. """ ctypesname = self.get_ctypes_name(typ.kind) if typ.kind == TypeKind.VOID: size = align = 1 # depends on [control=['if'], data=[]] else: size = typ.get_size() align = typ.get_align() return typedesc.FundamentalType(ctypesname, size, align)
def inc(self, key, key_length=0): """Add value to key-value Params: <str> key <int> value <int> key_length Return: <int> key_value """ if key_length < 1: key_length = len(key) val = self.add_method(self, key, key_length, 1) if self.k: self._update(key, val) return val
def function[inc, parameter[self, key, key_length]]: constant[Add value to key-value Params: <str> key <int> value <int> key_length Return: <int> key_value ] if compare[name[key_length] less[<] constant[1]] begin[:] variable[key_length] assign[=] call[name[len], parameter[name[key]]] variable[val] assign[=] call[name[self].add_method, parameter[name[self], name[key], name[key_length], constant[1]]] if name[self].k begin[:] call[name[self]._update, parameter[name[key], name[val]]] return[name[val]]
keyword[def] identifier[inc] ( identifier[self] , identifier[key] , identifier[key_length] = literal[int] ): literal[string] keyword[if] identifier[key_length] < literal[int] : identifier[key_length] = identifier[len] ( identifier[key] ) identifier[val] = identifier[self] . identifier[add_method] ( identifier[self] , identifier[key] , identifier[key_length] , literal[int] ) keyword[if] identifier[self] . identifier[k] : identifier[self] . identifier[_update] ( identifier[key] , identifier[val] ) keyword[return] identifier[val]
def inc(self, key, key_length=0): """Add value to key-value Params: <str> key <int> value <int> key_length Return: <int> key_value """ if key_length < 1: key_length = len(key) # depends on [control=['if'], data=['key_length']] val = self.add_method(self, key, key_length, 1) if self.k: self._update(key, val) # depends on [control=['if'], data=[]] return val
def get_includes(self, path): """ Get all includes from a config in a given path """ config = self.file_index.unfold_yaml(path) return self.get_includes_from_dict(config, extract=True)
def function[get_includes, parameter[self, path]]: constant[ Get all includes from a config in a given path ] variable[config] assign[=] call[name[self].file_index.unfold_yaml, parameter[name[path]]] return[call[name[self].get_includes_from_dict, parameter[name[config]]]]
keyword[def] identifier[get_includes] ( identifier[self] , identifier[path] ): literal[string] identifier[config] = identifier[self] . identifier[file_index] . identifier[unfold_yaml] ( identifier[path] ) keyword[return] identifier[self] . identifier[get_includes_from_dict] ( identifier[config] , identifier[extract] = keyword[True] )
def get_includes(self, path): """ Get all includes from a config in a given path """ config = self.file_index.unfold_yaml(path) return self.get_includes_from_dict(config, extract=True)
def find_mismatch(self, other, indent=''): """ Highlights where two nodes differ in a human-readable form Parameters ---------- other : TreeNode The node to compare indent : str The white-space with which to indent output string Returns ------- mismatch : str The human-readable mismatch string """ if self != other: mismatch = "\n{}{}".format(indent, type(self).__name__) else: mismatch = '' sub_indent = indent + ' ' if len(list(self.filesets)) != len(list(other.filesets)): mismatch += ('\n{indent}mismatching summary fileset lengths ' '(self={} vs other={}): ' '\n{indent} self={}\n{indent} other={}' .format(len(list(self.filesets)), len(list(other.filesets)), list(self.filesets), list(other.filesets), indent=sub_indent)) else: for s, o in zip(self.filesets, other.filesets): mismatch += s.find_mismatch(o, indent=sub_indent) if len(list(self.fields)) != len(list(other.fields)): mismatch += ('\n{indent}mismatching summary field lengths ' '(self={} vs other={}): ' '\n{indent} self={}\n{indent} other={}' .format(len(list(self.fields)), len(list(other.fields)), list(self.fields), list(other.fields), indent=sub_indent)) else: for s, o in zip(self.fields, other.fields): mismatch += s.find_mismatch(o, indent=sub_indent) return mismatch
def function[find_mismatch, parameter[self, other, indent]]: constant[ Highlights where two nodes differ in a human-readable form Parameters ---------- other : TreeNode The node to compare indent : str The white-space with which to indent output string Returns ------- mismatch : str The human-readable mismatch string ] if compare[name[self] not_equal[!=] name[other]] begin[:] variable[mismatch] assign[=] call[constant[ {}{}].format, parameter[name[indent], call[name[type], parameter[name[self]]].__name__]] variable[sub_indent] assign[=] binary_operation[name[indent] + constant[ ]] if compare[call[name[len], parameter[call[name[list], parameter[name[self].filesets]]]] not_equal[!=] call[name[len], parameter[call[name[list], parameter[name[other].filesets]]]]] begin[:] <ast.AugAssign object at 0x7da18dc07d30> if compare[call[name[len], parameter[call[name[list], parameter[name[self].fields]]]] not_equal[!=] call[name[len], parameter[call[name[list], parameter[name[other].fields]]]]] begin[:] <ast.AugAssign object at 0x7da18dc071c0> return[name[mismatch]]
keyword[def] identifier[find_mismatch] ( identifier[self] , identifier[other] , identifier[indent] = literal[string] ): literal[string] keyword[if] identifier[self] != identifier[other] : identifier[mismatch] = literal[string] . identifier[format] ( identifier[indent] , identifier[type] ( identifier[self] ). identifier[__name__] ) keyword[else] : identifier[mismatch] = literal[string] identifier[sub_indent] = identifier[indent] + literal[string] keyword[if] identifier[len] ( identifier[list] ( identifier[self] . identifier[filesets] ))!= identifier[len] ( identifier[list] ( identifier[other] . identifier[filesets] )): identifier[mismatch] +=( literal[string] literal[string] literal[string] . identifier[format] ( identifier[len] ( identifier[list] ( identifier[self] . identifier[filesets] )), identifier[len] ( identifier[list] ( identifier[other] . identifier[filesets] )), identifier[list] ( identifier[self] . identifier[filesets] ), identifier[list] ( identifier[other] . identifier[filesets] ), identifier[indent] = identifier[sub_indent] )) keyword[else] : keyword[for] identifier[s] , identifier[o] keyword[in] identifier[zip] ( identifier[self] . identifier[filesets] , identifier[other] . identifier[filesets] ): identifier[mismatch] += identifier[s] . identifier[find_mismatch] ( identifier[o] , identifier[indent] = identifier[sub_indent] ) keyword[if] identifier[len] ( identifier[list] ( identifier[self] . identifier[fields] ))!= identifier[len] ( identifier[list] ( identifier[other] . identifier[fields] )): identifier[mismatch] +=( literal[string] literal[string] literal[string] . identifier[format] ( identifier[len] ( identifier[list] ( identifier[self] . identifier[fields] )), identifier[len] ( identifier[list] ( identifier[other] . identifier[fields] )), identifier[list] ( identifier[self] . identifier[fields] ), identifier[list] ( identifier[other] . identifier[fields] ), identifier[indent] = identifier[sub_indent] )) keyword[else] : keyword[for] identifier[s] , identifier[o] keyword[in] identifier[zip] ( identifier[self] . identifier[fields] , identifier[other] . identifier[fields] ): identifier[mismatch] += identifier[s] . identifier[find_mismatch] ( identifier[o] , identifier[indent] = identifier[sub_indent] ) keyword[return] identifier[mismatch]
def find_mismatch(self, other, indent=''): """ Highlights where two nodes differ in a human-readable form Parameters ---------- other : TreeNode The node to compare indent : str The white-space with which to indent output string Returns ------- mismatch : str The human-readable mismatch string """ if self != other: mismatch = '\n{}{}'.format(indent, type(self).__name__) # depends on [control=['if'], data=['self']] else: mismatch = '' sub_indent = indent + ' ' if len(list(self.filesets)) != len(list(other.filesets)): mismatch += '\n{indent}mismatching summary fileset lengths (self={} vs other={}): \n{indent} self={}\n{indent} other={}'.format(len(list(self.filesets)), len(list(other.filesets)), list(self.filesets), list(other.filesets), indent=sub_indent) # depends on [control=['if'], data=[]] else: for (s, o) in zip(self.filesets, other.filesets): mismatch += s.find_mismatch(o, indent=sub_indent) # depends on [control=['for'], data=[]] if len(list(self.fields)) != len(list(other.fields)): mismatch += '\n{indent}mismatching summary field lengths (self={} vs other={}): \n{indent} self={}\n{indent} other={}'.format(len(list(self.fields)), len(list(other.fields)), list(self.fields), list(other.fields), indent=sub_indent) # depends on [control=['if'], data=[]] else: for (s, o) in zip(self.fields, other.fields): mismatch += s.find_mismatch(o, indent=sub_indent) # depends on [control=['for'], data=[]] return mismatch
def before_render(self): """Before template render hook """ # Render the Add button if the user has the AddClient permission if check_permission(AddMethod, self.context): self.context_actions[_("Add")] = { "url": "createObject?type_name=Method", "icon": "++resource++bika.lims.images/add.png" } # Don't allow any context actions on the Methods folder self.request.set("disable_border", 1)
def function[before_render, parameter[self]]: constant[Before template render hook ] if call[name[check_permission], parameter[name[AddMethod], name[self].context]] begin[:] call[name[self].context_actions][call[name[_], parameter[constant[Add]]]] assign[=] dictionary[[<ast.Constant object at 0x7da1b1d67430>, <ast.Constant object at 0x7da1b1d66260>], [<ast.Constant object at 0x7da1b1d66c20>, <ast.Constant object at 0x7da1b1d64340>]] call[name[self].request.set, parameter[constant[disable_border], constant[1]]]
keyword[def] identifier[before_render] ( identifier[self] ): literal[string] keyword[if] identifier[check_permission] ( identifier[AddMethod] , identifier[self] . identifier[context] ): identifier[self] . identifier[context_actions] [ identifier[_] ( literal[string] )]={ literal[string] : literal[string] , literal[string] : literal[string] } identifier[self] . identifier[request] . identifier[set] ( literal[string] , literal[int] )
def before_render(self): """Before template render hook """ # Render the Add button if the user has the AddClient permission if check_permission(AddMethod, self.context): self.context_actions[_('Add')] = {'url': 'createObject?type_name=Method', 'icon': '++resource++bika.lims.images/add.png'} # depends on [control=['if'], data=[]] # Don't allow any context actions on the Methods folder self.request.set('disable_border', 1)
def read_pid_constants(self): """Reads back the PID constants stored on the Grizzly.""" p = self._read_as_int(Addr.PConstant, 4) i = self._read_as_int(Addr.IConstant, 4) d = self._read_as_int(Addr.DConstant, 4) return map(lambda x: x / (2 ** 16), (p, i, d))
def function[read_pid_constants, parameter[self]]: constant[Reads back the PID constants stored on the Grizzly.] variable[p] assign[=] call[name[self]._read_as_int, parameter[name[Addr].PConstant, constant[4]]] variable[i] assign[=] call[name[self]._read_as_int, parameter[name[Addr].IConstant, constant[4]]] variable[d] assign[=] call[name[self]._read_as_int, parameter[name[Addr].DConstant, constant[4]]] return[call[name[map], parameter[<ast.Lambda object at 0x7da18eb55ed0>, tuple[[<ast.Name object at 0x7da18eb56860>, <ast.Name object at 0x7da18eb54f70>, <ast.Name object at 0x7da18eb566e0>]]]]]
keyword[def] identifier[read_pid_constants] ( identifier[self] ): literal[string] identifier[p] = identifier[self] . identifier[_read_as_int] ( identifier[Addr] . identifier[PConstant] , literal[int] ) identifier[i] = identifier[self] . identifier[_read_as_int] ( identifier[Addr] . identifier[IConstant] , literal[int] ) identifier[d] = identifier[self] . identifier[_read_as_int] ( identifier[Addr] . identifier[DConstant] , literal[int] ) keyword[return] identifier[map] ( keyword[lambda] identifier[x] : identifier[x] /( literal[int] ** literal[int] ),( identifier[p] , identifier[i] , identifier[d] ))
def read_pid_constants(self): """Reads back the PID constants stored on the Grizzly.""" p = self._read_as_int(Addr.PConstant, 4) i = self._read_as_int(Addr.IConstant, 4) d = self._read_as_int(Addr.DConstant, 4) return map(lambda x: x / 2 ** 16, (p, i, d))
def constructFMIndex(self, logger): ''' This function iterates through the BWT and counts the letters as it goes to create the FM index. For example, the string 'ACC$' would have BWT 'C$CA'. The FM index would iterate over this and count the occurence of the letter it found so you'd end up with this: BWT FM-index C 0 0 0 $ 0 0 1 C 1 0 1 A 1 0 2 1 1 2 This is necessary for finding the occurrence of a letter using the getOccurrenceOfCharAtIndex(...) function. In reality, this function creates a sampled FM-index more complicated than the uncompressed counter-part. This is because the 2048 size bins don't fall evenly all the time. A second data structure is used to tell you where to start a particular FM-index count. The two files necessary are '<DIR>/comp_fmIndex.npy' and '<DIR>/comp_refIndex.npy' ''' #sampling method self.searchCache = {} self.bitPower = 11 self.binSize = 2**self.bitPower self.fmIndexFN = self.dirName+'/comp_fmIndex.npy' self.fmRefFN = self.dirName+'/comp_refIndex.npy' if os.path.exists(self.fmIndexFN) and os.path.exists(self.fmRefFN): #both exist, just memmap them self.partialFM = np.load(self.fmIndexFN, 'r') self.refFM = np.load(self.fmRefFN, 'r') else: if logger != None: logger.info('First time calculation of \'%s\'' % self.fmIndexFN) #pre-allocate space samplingSize = int(math.ceil(float(self.totalSize)/self.binSize)) self.partialFM = np.lib.format.open_memmap(self.fmIndexFN, 'w+', '<u8', (samplingSize, self.vcLen)) self.refFM = np.lib.format.open_memmap(self.fmRefFN, 'w+', '<u8', (samplingSize,)) countsSoFar = np.cumsum(self.totalCounts)-self.totalCounts totalCounts = 0 prevStart = 0 bwtIndex = 0 chunkSize = 10000 samplingID = 0 #iterate through the whole file creating dynamically sized bins while bwtIndex < self.bwt.shape[0] and samplingID < samplingSize: #extract letters and counts so we can do sums letters = np.bitwise_and(self.bwt[bwtIndex:bwtIndex+chunkSize], self.mask) counts = np.right_shift(self.bwt[bwtIndex:bwtIndex+chunkSize], self.letterBits, dtype='<u8') #numpy methods for find the powers i = 1 same = (letters[0:-1] == letters[1:]) while np.count_nonzero(same) > 0: (counts[i:])[same] *= self.numPower i += 1 same = np.bitwise_and(same[0:-1], same[1:]) offsets = np.cumsum(counts) #this is basically looking for a clean breakpoint for our bin to end moreToUpdate = True while moreToUpdate: prevStart = np.searchsorted(offsets, samplingID*self.binSize-totalCounts, 'right') if prevStart == letters.shape[0]: prevStart -= 1 while prevStart > 0 and letters[prevStart] == letters[prevStart-1]: prevStart -= 1 moreToUpdate = False else: while prevStart > 0 and letters[prevStart] == letters[prevStart-1]: prevStart -= 1 self.refFM[samplingID] = bwtIndex+prevStart if prevStart > 0: self.partialFM[samplingID][:] = np.add(countsSoFar, np.bincount(letters[0:prevStart], counts[0:prevStart], self.vcLen)) else: self.partialFM[samplingID][:] = countsSoFar samplingID += 1 bwtIndex += prevStart if prevStart > 0: countsSoFar += np.bincount(letters[0:prevStart], counts[0:prevStart], self.vcLen) totalCounts += np.sum(np.bincount(letters[0:prevStart], counts[0:prevStart], self.vcLen)) #we'll use this later when we do lookups self.offsetSum = np.sum(self.partialFM[0])
def function[constructFMIndex, parameter[self, logger]]: constant[ This function iterates through the BWT and counts the letters as it goes to create the FM index. For example, the string 'ACC$' would have BWT 'C$CA'. The FM index would iterate over this and count the occurence of the letter it found so you'd end up with this: BWT FM-index C 0 0 0 $ 0 0 1 C 1 0 1 A 1 0 2 1 1 2 This is necessary for finding the occurrence of a letter using the getOccurrenceOfCharAtIndex(...) function. In reality, this function creates a sampled FM-index more complicated than the uncompressed counter-part. This is because the 2048 size bins don't fall evenly all the time. A second data structure is used to tell you where to start a particular FM-index count. The two files necessary are '<DIR>/comp_fmIndex.npy' and '<DIR>/comp_refIndex.npy' ] name[self].searchCache assign[=] dictionary[[], []] name[self].bitPower assign[=] constant[11] name[self].binSize assign[=] binary_operation[constant[2] ** name[self].bitPower] name[self].fmIndexFN assign[=] binary_operation[name[self].dirName + constant[/comp_fmIndex.npy]] name[self].fmRefFN assign[=] binary_operation[name[self].dirName + constant[/comp_refIndex.npy]] if <ast.BoolOp object at 0x7da20e9b33d0> begin[:] name[self].partialFM assign[=] call[name[np].load, parameter[name[self].fmIndexFN, constant[r]]] name[self].refFM assign[=] call[name[np].load, parameter[name[self].fmRefFN, constant[r]]] name[self].offsetSum assign[=] call[name[np].sum, parameter[call[name[self].partialFM][constant[0]]]]
keyword[def] identifier[constructFMIndex] ( identifier[self] , identifier[logger] ): literal[string] identifier[self] . identifier[searchCache] ={} identifier[self] . identifier[bitPower] = literal[int] identifier[self] . identifier[binSize] = literal[int] ** identifier[self] . identifier[bitPower] identifier[self] . identifier[fmIndexFN] = identifier[self] . identifier[dirName] + literal[string] identifier[self] . identifier[fmRefFN] = identifier[self] . identifier[dirName] + literal[string] keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[fmIndexFN] ) keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[fmRefFN] ): identifier[self] . identifier[partialFM] = identifier[np] . identifier[load] ( identifier[self] . identifier[fmIndexFN] , literal[string] ) identifier[self] . identifier[refFM] = identifier[np] . identifier[load] ( identifier[self] . identifier[fmRefFN] , literal[string] ) keyword[else] : keyword[if] identifier[logger] != keyword[None] : identifier[logger] . identifier[info] ( literal[string] % identifier[self] . identifier[fmIndexFN] ) identifier[samplingSize] = identifier[int] ( identifier[math] . identifier[ceil] ( identifier[float] ( identifier[self] . identifier[totalSize] )/ identifier[self] . identifier[binSize] )) identifier[self] . identifier[partialFM] = identifier[np] . identifier[lib] . identifier[format] . identifier[open_memmap] ( identifier[self] . identifier[fmIndexFN] , literal[string] , literal[string] ,( identifier[samplingSize] , identifier[self] . identifier[vcLen] )) identifier[self] . identifier[refFM] = identifier[np] . identifier[lib] . identifier[format] . identifier[open_memmap] ( identifier[self] . identifier[fmRefFN] , literal[string] , literal[string] ,( identifier[samplingSize] ,)) identifier[countsSoFar] = identifier[np] . identifier[cumsum] ( identifier[self] . identifier[totalCounts] )- identifier[self] . identifier[totalCounts] identifier[totalCounts] = literal[int] identifier[prevStart] = literal[int] identifier[bwtIndex] = literal[int] identifier[chunkSize] = literal[int] identifier[samplingID] = literal[int] keyword[while] identifier[bwtIndex] < identifier[self] . identifier[bwt] . identifier[shape] [ literal[int] ] keyword[and] identifier[samplingID] < identifier[samplingSize] : identifier[letters] = identifier[np] . identifier[bitwise_and] ( identifier[self] . identifier[bwt] [ identifier[bwtIndex] : identifier[bwtIndex] + identifier[chunkSize] ], identifier[self] . identifier[mask] ) identifier[counts] = identifier[np] . identifier[right_shift] ( identifier[self] . identifier[bwt] [ identifier[bwtIndex] : identifier[bwtIndex] + identifier[chunkSize] ], identifier[self] . identifier[letterBits] , identifier[dtype] = literal[string] ) identifier[i] = literal[int] identifier[same] =( identifier[letters] [ literal[int] :- literal[int] ]== identifier[letters] [ literal[int] :]) keyword[while] identifier[np] . identifier[count_nonzero] ( identifier[same] )> literal[int] : ( identifier[counts] [ identifier[i] :])[ identifier[same] ]*= identifier[self] . identifier[numPower] identifier[i] += literal[int] identifier[same] = identifier[np] . identifier[bitwise_and] ( identifier[same] [ literal[int] :- literal[int] ], identifier[same] [ literal[int] :]) identifier[offsets] = identifier[np] . identifier[cumsum] ( identifier[counts] ) identifier[moreToUpdate] = keyword[True] keyword[while] identifier[moreToUpdate] : identifier[prevStart] = identifier[np] . identifier[searchsorted] ( identifier[offsets] , identifier[samplingID] * identifier[self] . identifier[binSize] - identifier[totalCounts] , literal[string] ) keyword[if] identifier[prevStart] == identifier[letters] . identifier[shape] [ literal[int] ]: identifier[prevStart] -= literal[int] keyword[while] identifier[prevStart] > literal[int] keyword[and] identifier[letters] [ identifier[prevStart] ]== identifier[letters] [ identifier[prevStart] - literal[int] ]: identifier[prevStart] -= literal[int] identifier[moreToUpdate] = keyword[False] keyword[else] : keyword[while] identifier[prevStart] > literal[int] keyword[and] identifier[letters] [ identifier[prevStart] ]== identifier[letters] [ identifier[prevStart] - literal[int] ]: identifier[prevStart] -= literal[int] identifier[self] . identifier[refFM] [ identifier[samplingID] ]= identifier[bwtIndex] + identifier[prevStart] keyword[if] identifier[prevStart] > literal[int] : identifier[self] . identifier[partialFM] [ identifier[samplingID] ][:]= identifier[np] . identifier[add] ( identifier[countsSoFar] , identifier[np] . identifier[bincount] ( identifier[letters] [ literal[int] : identifier[prevStart] ], identifier[counts] [ literal[int] : identifier[prevStart] ], identifier[self] . identifier[vcLen] )) keyword[else] : identifier[self] . identifier[partialFM] [ identifier[samplingID] ][:]= identifier[countsSoFar] identifier[samplingID] += literal[int] identifier[bwtIndex] += identifier[prevStart] keyword[if] identifier[prevStart] > literal[int] : identifier[countsSoFar] += identifier[np] . identifier[bincount] ( identifier[letters] [ literal[int] : identifier[prevStart] ], identifier[counts] [ literal[int] : identifier[prevStart] ], identifier[self] . identifier[vcLen] ) identifier[totalCounts] += identifier[np] . identifier[sum] ( identifier[np] . identifier[bincount] ( identifier[letters] [ literal[int] : identifier[prevStart] ], identifier[counts] [ literal[int] : identifier[prevStart] ], identifier[self] . identifier[vcLen] )) identifier[self] . identifier[offsetSum] = identifier[np] . identifier[sum] ( identifier[self] . identifier[partialFM] [ literal[int] ])
def constructFMIndex(self, logger): """ This function iterates through the BWT and counts the letters as it goes to create the FM index. For example, the string 'ACC$' would have BWT 'C$CA'. The FM index would iterate over this and count the occurence of the letter it found so you'd end up with this: BWT FM-index C 0 0 0 $ 0 0 1 C 1 0 1 A 1 0 2 1 1 2 This is necessary for finding the occurrence of a letter using the getOccurrenceOfCharAtIndex(...) function. In reality, this function creates a sampled FM-index more complicated than the uncompressed counter-part. This is because the 2048 size bins don't fall evenly all the time. A second data structure is used to tell you where to start a particular FM-index count. The two files necessary are '<DIR>/comp_fmIndex.npy' and '<DIR>/comp_refIndex.npy' """ #sampling method self.searchCache = {} self.bitPower = 11 self.binSize = 2 ** self.bitPower self.fmIndexFN = self.dirName + '/comp_fmIndex.npy' self.fmRefFN = self.dirName + '/comp_refIndex.npy' if os.path.exists(self.fmIndexFN) and os.path.exists(self.fmRefFN): #both exist, just memmap them self.partialFM = np.load(self.fmIndexFN, 'r') self.refFM = np.load(self.fmRefFN, 'r') # depends on [control=['if'], data=[]] else: if logger != None: logger.info("First time calculation of '%s'" % self.fmIndexFN) # depends on [control=['if'], data=['logger']] #pre-allocate space samplingSize = int(math.ceil(float(self.totalSize) / self.binSize)) self.partialFM = np.lib.format.open_memmap(self.fmIndexFN, 'w+', '<u8', (samplingSize, self.vcLen)) self.refFM = np.lib.format.open_memmap(self.fmRefFN, 'w+', '<u8', (samplingSize,)) countsSoFar = np.cumsum(self.totalCounts) - self.totalCounts totalCounts = 0 prevStart = 0 bwtIndex = 0 chunkSize = 10000 samplingID = 0 #iterate through the whole file creating dynamically sized bins while bwtIndex < self.bwt.shape[0] and samplingID < samplingSize: #extract letters and counts so we can do sums letters = np.bitwise_and(self.bwt[bwtIndex:bwtIndex + chunkSize], self.mask) counts = np.right_shift(self.bwt[bwtIndex:bwtIndex + chunkSize], self.letterBits, dtype='<u8') #numpy methods for find the powers i = 1 same = letters[0:-1] == letters[1:] while np.count_nonzero(same) > 0: counts[i:][same] *= self.numPower i += 1 same = np.bitwise_and(same[0:-1], same[1:]) # depends on [control=['while'], data=[]] offsets = np.cumsum(counts) #this is basically looking for a clean breakpoint for our bin to end moreToUpdate = True while moreToUpdate: prevStart = np.searchsorted(offsets, samplingID * self.binSize - totalCounts, 'right') if prevStart == letters.shape[0]: prevStart -= 1 while prevStart > 0 and letters[prevStart] == letters[prevStart - 1]: prevStart -= 1 # depends on [control=['while'], data=[]] moreToUpdate = False # depends on [control=['if'], data=['prevStart']] else: while prevStart > 0 and letters[prevStart] == letters[prevStart - 1]: prevStart -= 1 # depends on [control=['while'], data=[]] self.refFM[samplingID] = bwtIndex + prevStart if prevStart > 0: self.partialFM[samplingID][:] = np.add(countsSoFar, np.bincount(letters[0:prevStart], counts[0:prevStart], self.vcLen)) # depends on [control=['if'], data=['prevStart']] else: self.partialFM[samplingID][:] = countsSoFar samplingID += 1 # depends on [control=['while'], data=[]] bwtIndex += prevStart if prevStart > 0: countsSoFar += np.bincount(letters[0:prevStart], counts[0:prevStart], self.vcLen) totalCounts += np.sum(np.bincount(letters[0:prevStart], counts[0:prevStart], self.vcLen)) # depends on [control=['if'], data=['prevStart']] # depends on [control=['while'], data=[]] #we'll use this later when we do lookups self.offsetSum = np.sum(self.partialFM[0])
def update_build_configuration_set(id, **kwargs): """ Update a BuildConfigurationSet """ data = update_build_configuration_set_raw(id, **kwargs) if data: return utils.format_json(data)
def function[update_build_configuration_set, parameter[id]]: constant[ Update a BuildConfigurationSet ] variable[data] assign[=] call[name[update_build_configuration_set_raw], parameter[name[id]]] if name[data] begin[:] return[call[name[utils].format_json, parameter[name[data]]]]
keyword[def] identifier[update_build_configuration_set] ( identifier[id] ,** identifier[kwargs] ): literal[string] identifier[data] = identifier[update_build_configuration_set_raw] ( identifier[id] ,** identifier[kwargs] ) keyword[if] identifier[data] : keyword[return] identifier[utils] . identifier[format_json] ( identifier[data] )
def update_build_configuration_set(id, **kwargs): """ Update a BuildConfigurationSet """ data = update_build_configuration_set_raw(id, **kwargs) if data: return utils.format_json(data) # depends on [control=['if'], data=[]]
def get_token(): """ Get the encrypted GitHub token in Travis. Make sure the contents this variable do not leak. The ``run()`` function will remove this from the output, so always use it. """ token = os.environ.get("GH_TOKEN", None) if not token: token = "GH_TOKEN environment variable not set" token = token.encode('utf-8') return token
def function[get_token, parameter[]]: constant[ Get the encrypted GitHub token in Travis. Make sure the contents this variable do not leak. The ``run()`` function will remove this from the output, so always use it. ] variable[token] assign[=] call[name[os].environ.get, parameter[constant[GH_TOKEN], constant[None]]] if <ast.UnaryOp object at 0x7da1b1071510> begin[:] variable[token] assign[=] constant[GH_TOKEN environment variable not set] variable[token] assign[=] call[name[token].encode, parameter[constant[utf-8]]] return[name[token]]
keyword[def] identifier[get_token] (): literal[string] identifier[token] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] , keyword[None] ) keyword[if] keyword[not] identifier[token] : identifier[token] = literal[string] identifier[token] = identifier[token] . identifier[encode] ( literal[string] ) keyword[return] identifier[token]
def get_token(): """ Get the encrypted GitHub token in Travis. Make sure the contents this variable do not leak. The ``run()`` function will remove this from the output, so always use it. """ token = os.environ.get('GH_TOKEN', None) if not token: token = 'GH_TOKEN environment variable not set' # depends on [control=['if'], data=[]] token = token.encode('utf-8') return token
def add_properties(self, names, methods): """Returns a view of self with the given methods added as properties. From: <http://stackoverflow.com/a/2954373/1366472>. """ cls = type(self) cls = type(cls.__name__, (cls,), dict(cls.__dict__)) if isinstance(names, string_types): names = [names] methods = [methods] for name,method in zip(names, methods): setattr(cls, name, property(method)) return self.view(type=cls)
def function[add_properties, parameter[self, names, methods]]: constant[Returns a view of self with the given methods added as properties. From: <http://stackoverflow.com/a/2954373/1366472>. ] variable[cls] assign[=] call[name[type], parameter[name[self]]] variable[cls] assign[=] call[name[type], parameter[name[cls].__name__, tuple[[<ast.Name object at 0x7da18bcc93c0>]], call[name[dict], parameter[name[cls].__dict__]]]] if call[name[isinstance], parameter[name[names], name[string_types]]] begin[:] variable[names] assign[=] list[[<ast.Name object at 0x7da18bccb430>]] variable[methods] assign[=] list[[<ast.Name object at 0x7da18bcc8fd0>]] for taget[tuple[[<ast.Name object at 0x7da18bccabc0>, <ast.Name object at 0x7da18bcc9e10>]]] in starred[call[name[zip], parameter[name[names], name[methods]]]] begin[:] call[name[setattr], parameter[name[cls], name[name], call[name[property], parameter[name[method]]]]] return[call[name[self].view, parameter[]]]
keyword[def] identifier[add_properties] ( identifier[self] , identifier[names] , identifier[methods] ): literal[string] identifier[cls] = identifier[type] ( identifier[self] ) identifier[cls] = identifier[type] ( identifier[cls] . identifier[__name__] ,( identifier[cls] ,), identifier[dict] ( identifier[cls] . identifier[__dict__] )) keyword[if] identifier[isinstance] ( identifier[names] , identifier[string_types] ): identifier[names] =[ identifier[names] ] identifier[methods] =[ identifier[methods] ] keyword[for] identifier[name] , identifier[method] keyword[in] identifier[zip] ( identifier[names] , identifier[methods] ): identifier[setattr] ( identifier[cls] , identifier[name] , identifier[property] ( identifier[method] )) keyword[return] identifier[self] . identifier[view] ( identifier[type] = identifier[cls] )
def add_properties(self, names, methods): """Returns a view of self with the given methods added as properties. From: <http://stackoverflow.com/a/2954373/1366472>. """ cls = type(self) cls = type(cls.__name__, (cls,), dict(cls.__dict__)) if isinstance(names, string_types): names = [names] methods = [methods] # depends on [control=['if'], data=[]] for (name, method) in zip(names, methods): setattr(cls, name, property(method)) # depends on [control=['for'], data=[]] return self.view(type=cls)
def print_stats(self, header=True, file=sys.stdout): """Pretty print stats table.""" if header: print("CACHE {:*^18} {:*^18} {:*^18} {:*^18} {:*^18}".format( "HIT", "MISS", "LOAD", "STORE", "EVICT"), file=file) for s in self.stats(): print("{name:>5} {HIT_count:>6} ({HIT_byte:>8}B) {MISS_count:>6} ({MISS_byte:>8}B) " "{LOAD_count:>6} ({LOAD_byte:>8}B) {STORE_count:>6} " "({STORE_byte:>8}B) {EVICT_count:>6} ({EVICT_byte:>8}B)".format( HIT_bytes=2342, **s), file=file)
def function[print_stats, parameter[self, header, file]]: constant[Pretty print stats table.] if name[header] begin[:] call[name[print], parameter[call[constant[CACHE {:*^18} {:*^18} {:*^18} {:*^18} {:*^18}].format, parameter[constant[HIT], constant[MISS], constant[LOAD], constant[STORE], constant[EVICT]]]]] for taget[name[s]] in starred[call[name[self].stats, parameter[]]] begin[:] call[name[print], parameter[call[constant[{name:>5} {HIT_count:>6} ({HIT_byte:>8}B) {MISS_count:>6} ({MISS_byte:>8}B) {LOAD_count:>6} ({LOAD_byte:>8}B) {STORE_count:>6} ({STORE_byte:>8}B) {EVICT_count:>6} ({EVICT_byte:>8}B)].format, parameter[]]]]
keyword[def] identifier[print_stats] ( identifier[self] , identifier[header] = keyword[True] , identifier[file] = identifier[sys] . identifier[stdout] ): literal[string] keyword[if] identifier[header] : identifier[print] ( literal[string] . identifier[format] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ), identifier[file] = identifier[file] ) keyword[for] identifier[s] keyword[in] identifier[self] . identifier[stats] (): identifier[print] ( literal[string] literal[string] literal[string] . identifier[format] ( identifier[HIT_bytes] = literal[int] ,** identifier[s] ), identifier[file] = identifier[file] )
def print_stats(self, header=True, file=sys.stdout): """Pretty print stats table.""" if header: print('CACHE {:*^18} {:*^18} {:*^18} {:*^18} {:*^18}'.format('HIT', 'MISS', 'LOAD', 'STORE', 'EVICT'), file=file) # depends on [control=['if'], data=[]] for s in self.stats(): print('{name:>5} {HIT_count:>6} ({HIT_byte:>8}B) {MISS_count:>6} ({MISS_byte:>8}B) {LOAD_count:>6} ({LOAD_byte:>8}B) {STORE_count:>6} ({STORE_byte:>8}B) {EVICT_count:>6} ({EVICT_byte:>8}B)'.format(HIT_bytes=2342, **s), file=file) # depends on [control=['for'], data=['s']]
def parameterized_expectations(model, verbose=False, initial_dr=None, pert_order=1, with_complementarities=True, grid={}, distribution={}, maxit=100, tol=1e-8, inner_maxit=10, direct=False): ''' Find global solution for ``model`` via parameterized expectations. Controls must be expressed as a direct function of equilibrium objects. Algorithm iterates over the expectations function in the arbitrage equation. Parameters: ---------- model : NumericModel ``dtcscc`` model to be solved verbose : boolean if True, display iterations initial_dr : decision rule initial guess for the decision rule pert_order : {1} if no initial guess is supplied, the perturbation solution at order ``pert_order`` is used as initial guess grid : grid options distribution : distribution options maxit : maximum number of iterations tol : tolerance criterium for successive approximations inner_maxit : maximum number of iteration for inner solver direct : if True, solve with direct method. If false, solve indirectly Returns ------- decision rule : approximated solution ''' t1 = time.time() g = model.functions['transition'] h = model.functions['expectation'] d = model.functions['direct_response'] f = model.functions['arbitrage_exp'] # f(s, x, z, p, out) parms = model.calibration['parameters'] if initial_dr is None: if pert_order == 1: initial_dr = approximate_controls(model) if pert_order > 1: raise Exception("Perturbation order > 1 not supported (yet).") approx = model.get_grid(**grid) grid = approx.grid interp_type = approx.interpolation dr = create_interpolator(approx, interp_type) expect = create_interpolator(approx, interp_type) distrib = model.get_distribution(**distribution) nodes, weights = distrib.discretize() N = grid.shape[0] z = np.zeros((N, len(model.symbols['expectations']))) x_0 = initial_dr(grid) x_0 = x_0.real # just in case ... h_0 = h(grid, x_0, parms) it = 0 err = 10 err_0 = 10 verbit = True if verbose == 'full' else False if with_complementarities is True: lbfun = model.functions['controls_lb'] ubfun = model.functions['controls_ub'] lb = lbfun(grid, parms) ub = ubfun(grid, parms) else: lb = None ub = None if verbose: headline = '|{0:^4} | {1:10} | {2:8} | {3:8} |' headline = headline.format('N', ' Error', 'Gain', 'Time') stars = '-'*len(headline) print(stars) print(headline) print(stars) # format string for within loop fmt_str = '|{0:4} | {1:10.3e} | {2:8.3f} | {3:8.3f} |' while err > tol and it <= maxit: it += 1 t_start = time.time() # dr.set_values(x_0) expect.set_values(h_0) # evaluate expectation over the future state z[...] = 0 for i in range(weights.shape[0]): e = nodes[i, :] S = g(grid, x_0, e, parms) z += weights[i]*expect(S) if direct is True: # Use control as direct function of arbitrage equation new_x = d(grid, z, parms) if with_complementarities is True: new_x = np.minimum(new_x, ub) new_x = np.maximum(new_x, lb) else: # Find control by solving arbitrage equation def fun(x): return f(grid, x, z, parms) sdfun = SerialDifferentiableFunction(fun) if with_complementarities is True: [new_x, nit] = ncpsolve(sdfun, lb, ub, x_0, verbose=verbit, maxit=inner_maxit) else: [new_x, nit] = serial_newton(sdfun, x_0, verbose=verbit) new_h = h(grid, new_x, parms) # update error err = (abs(new_h - h_0).max()) # Update guess for decision rule and expectations function x_0 = new_x h_0 = new_h # print error infomation if `verbose` err_SA = err/err_0 err_0 = err t_finish = time.time() elapsed = t_finish - t_start if verbose: print(fmt_str.format(it, err, err_SA, elapsed)) if it == maxit: import warnings warnings.warn(UserWarning("Maximum number of iterations reached")) # compute final fime and do final printout if `verbose` t2 = time.time() if verbose: print(stars) print('Elapsed: {} seconds.'.format(t2 - t1)) print(stars) # Interpolation for the decision rule dr.set_values(x_0) return dr
def function[parameterized_expectations, parameter[model, verbose, initial_dr, pert_order, with_complementarities, grid, distribution, maxit, tol, inner_maxit, direct]]: constant[ Find global solution for ``model`` via parameterized expectations. Controls must be expressed as a direct function of equilibrium objects. Algorithm iterates over the expectations function in the arbitrage equation. Parameters: ---------- model : NumericModel ``dtcscc`` model to be solved verbose : boolean if True, display iterations initial_dr : decision rule initial guess for the decision rule pert_order : {1} if no initial guess is supplied, the perturbation solution at order ``pert_order`` is used as initial guess grid : grid options distribution : distribution options maxit : maximum number of iterations tol : tolerance criterium for successive approximations inner_maxit : maximum number of iteration for inner solver direct : if True, solve with direct method. If false, solve indirectly Returns ------- decision rule : approximated solution ] variable[t1] assign[=] call[name[time].time, parameter[]] variable[g] assign[=] call[name[model].functions][constant[transition]] variable[h] assign[=] call[name[model].functions][constant[expectation]] variable[d] assign[=] call[name[model].functions][constant[direct_response]] variable[f] assign[=] call[name[model].functions][constant[arbitrage_exp]] variable[parms] assign[=] call[name[model].calibration][constant[parameters]] if compare[name[initial_dr] is constant[None]] begin[:] if compare[name[pert_order] equal[==] constant[1]] begin[:] variable[initial_dr] assign[=] call[name[approximate_controls], parameter[name[model]]] if compare[name[pert_order] greater[>] constant[1]] begin[:] <ast.Raise object at 0x7da18fe912a0> variable[approx] assign[=] call[name[model].get_grid, parameter[]] variable[grid] assign[=] name[approx].grid variable[interp_type] assign[=] name[approx].interpolation variable[dr] assign[=] call[name[create_interpolator], parameter[name[approx], name[interp_type]]] variable[expect] assign[=] call[name[create_interpolator], parameter[name[approx], name[interp_type]]] variable[distrib] assign[=] call[name[model].get_distribution, parameter[]] <ast.Tuple object at 0x7da18fe92a10> assign[=] call[name[distrib].discretize, parameter[]] variable[N] assign[=] call[name[grid].shape][constant[0]] variable[z] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da18fe90220>, <ast.Call object at 0x7da18fe93190>]]]] variable[x_0] assign[=] call[name[initial_dr], parameter[name[grid]]] variable[x_0] assign[=] name[x_0].real variable[h_0] assign[=] call[name[h], parameter[name[grid], name[x_0], name[parms]]] variable[it] assign[=] constant[0] variable[err] assign[=] constant[10] variable[err_0] assign[=] constant[10] variable[verbit] assign[=] <ast.IfExp object at 0x7da18fe90f70> if compare[name[with_complementarities] is constant[True]] begin[:] variable[lbfun] assign[=] call[name[model].functions][constant[controls_lb]] variable[ubfun] assign[=] call[name[model].functions][constant[controls_ub]] variable[lb] assign[=] call[name[lbfun], parameter[name[grid], name[parms]]] variable[ub] assign[=] call[name[ubfun], parameter[name[grid], name[parms]]] if name[verbose] begin[:] variable[headline] assign[=] constant[|{0:^4} | {1:10} | {2:8} | {3:8} |] variable[headline] assign[=] call[name[headline].format, parameter[constant[N], constant[ Error], constant[Gain], constant[Time]]] variable[stars] assign[=] binary_operation[constant[-] * call[name[len], parameter[name[headline]]]] call[name[print], parameter[name[stars]]] call[name[print], parameter[name[headline]]] call[name[print], parameter[name[stars]]] variable[fmt_str] assign[=] constant[|{0:4} | {1:10.3e} | {2:8.3f} | {3:8.3f} |] while <ast.BoolOp object at 0x7da18fe91180> begin[:] <ast.AugAssign object at 0x7da18fe93310> variable[t_start] assign[=] call[name[time].time, parameter[]] call[name[expect].set_values, parameter[name[h_0]]] call[name[z]][constant[Ellipsis]] assign[=] constant[0] for taget[name[i]] in starred[call[name[range], parameter[call[name[weights].shape][constant[0]]]]] begin[:] variable[e] assign[=] call[name[nodes]][tuple[[<ast.Name object at 0x7da207f9a110>, <ast.Slice object at 0x7da207f9b9a0>]]] variable[S] assign[=] call[name[g], parameter[name[grid], name[x_0], name[e], name[parms]]] <ast.AugAssign object at 0x7da207f99390> if compare[name[direct] is constant[True]] begin[:] variable[new_x] assign[=] call[name[d], parameter[name[grid], name[z], name[parms]]] if compare[name[with_complementarities] is constant[True]] begin[:] variable[new_x] assign[=] call[name[np].minimum, parameter[name[new_x], name[ub]]] variable[new_x] assign[=] call[name[np].maximum, parameter[name[new_x], name[lb]]] variable[new_h] assign[=] call[name[h], parameter[name[grid], name[new_x], name[parms]]] variable[err] assign[=] call[call[name[abs], parameter[binary_operation[name[new_h] - name[h_0]]]].max, parameter[]] variable[x_0] assign[=] name[new_x] variable[h_0] assign[=] name[new_h] variable[err_SA] assign[=] binary_operation[name[err] / name[err_0]] variable[err_0] assign[=] name[err] variable[t_finish] assign[=] call[name[time].time, parameter[]] variable[elapsed] assign[=] binary_operation[name[t_finish] - name[t_start]] if name[verbose] begin[:] call[name[print], parameter[call[name[fmt_str].format, parameter[name[it], name[err], name[err_SA], name[elapsed]]]]] if compare[name[it] equal[==] name[maxit]] begin[:] import module[warnings] call[name[warnings].warn, parameter[call[name[UserWarning], parameter[constant[Maximum number of iterations reached]]]]] variable[t2] assign[=] call[name[time].time, parameter[]] if name[verbose] begin[:] call[name[print], parameter[name[stars]]] call[name[print], parameter[call[constant[Elapsed: {} seconds.].format, parameter[binary_operation[name[t2] - name[t1]]]]]] call[name[print], parameter[name[stars]]] call[name[dr].set_values, parameter[name[x_0]]] return[name[dr]]
keyword[def] identifier[parameterized_expectations] ( identifier[model] , identifier[verbose] = keyword[False] , identifier[initial_dr] = keyword[None] , identifier[pert_order] = literal[int] , identifier[with_complementarities] = keyword[True] , identifier[grid] ={}, identifier[distribution] ={}, identifier[maxit] = literal[int] , identifier[tol] = literal[int] , identifier[inner_maxit] = literal[int] , identifier[direct] = keyword[False] ): literal[string] identifier[t1] = identifier[time] . identifier[time] () identifier[g] = identifier[model] . identifier[functions] [ literal[string] ] identifier[h] = identifier[model] . identifier[functions] [ literal[string] ] identifier[d] = identifier[model] . identifier[functions] [ literal[string] ] identifier[f] = identifier[model] . identifier[functions] [ literal[string] ] identifier[parms] = identifier[model] . identifier[calibration] [ literal[string] ] keyword[if] identifier[initial_dr] keyword[is] keyword[None] : keyword[if] identifier[pert_order] == literal[int] : identifier[initial_dr] = identifier[approximate_controls] ( identifier[model] ) keyword[if] identifier[pert_order] > literal[int] : keyword[raise] identifier[Exception] ( literal[string] ) identifier[approx] = identifier[model] . identifier[get_grid] (** identifier[grid] ) identifier[grid] = identifier[approx] . identifier[grid] identifier[interp_type] = identifier[approx] . identifier[interpolation] identifier[dr] = identifier[create_interpolator] ( identifier[approx] , identifier[interp_type] ) identifier[expect] = identifier[create_interpolator] ( identifier[approx] , identifier[interp_type] ) identifier[distrib] = identifier[model] . identifier[get_distribution] (** identifier[distribution] ) identifier[nodes] , identifier[weights] = identifier[distrib] . identifier[discretize] () identifier[N] = identifier[grid] . identifier[shape] [ literal[int] ] identifier[z] = identifier[np] . identifier[zeros] (( identifier[N] , identifier[len] ( identifier[model] . identifier[symbols] [ literal[string] ]))) identifier[x_0] = identifier[initial_dr] ( identifier[grid] ) identifier[x_0] = identifier[x_0] . identifier[real] identifier[h_0] = identifier[h] ( identifier[grid] , identifier[x_0] , identifier[parms] ) identifier[it] = literal[int] identifier[err] = literal[int] identifier[err_0] = literal[int] identifier[verbit] = keyword[True] keyword[if] identifier[verbose] == literal[string] keyword[else] keyword[False] keyword[if] identifier[with_complementarities] keyword[is] keyword[True] : identifier[lbfun] = identifier[model] . identifier[functions] [ literal[string] ] identifier[ubfun] = identifier[model] . identifier[functions] [ literal[string] ] identifier[lb] = identifier[lbfun] ( identifier[grid] , identifier[parms] ) identifier[ub] = identifier[ubfun] ( identifier[grid] , identifier[parms] ) keyword[else] : identifier[lb] = keyword[None] identifier[ub] = keyword[None] keyword[if] identifier[verbose] : identifier[headline] = literal[string] identifier[headline] = identifier[headline] . identifier[format] ( literal[string] , literal[string] , literal[string] , literal[string] ) identifier[stars] = literal[string] * identifier[len] ( identifier[headline] ) identifier[print] ( identifier[stars] ) identifier[print] ( identifier[headline] ) identifier[print] ( identifier[stars] ) identifier[fmt_str] = literal[string] keyword[while] identifier[err] > identifier[tol] keyword[and] identifier[it] <= identifier[maxit] : identifier[it] += literal[int] identifier[t_start] = identifier[time] . identifier[time] () identifier[expect] . identifier[set_values] ( identifier[h_0] ) identifier[z] [...]= literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[weights] . identifier[shape] [ literal[int] ]): identifier[e] = identifier[nodes] [ identifier[i] ,:] identifier[S] = identifier[g] ( identifier[grid] , identifier[x_0] , identifier[e] , identifier[parms] ) identifier[z] += identifier[weights] [ identifier[i] ]* identifier[expect] ( identifier[S] ) keyword[if] identifier[direct] keyword[is] keyword[True] : identifier[new_x] = identifier[d] ( identifier[grid] , identifier[z] , identifier[parms] ) keyword[if] identifier[with_complementarities] keyword[is] keyword[True] : identifier[new_x] = identifier[np] . identifier[minimum] ( identifier[new_x] , identifier[ub] ) identifier[new_x] = identifier[np] . identifier[maximum] ( identifier[new_x] , identifier[lb] ) keyword[else] : keyword[def] identifier[fun] ( identifier[x] ): keyword[return] identifier[f] ( identifier[grid] , identifier[x] , identifier[z] , identifier[parms] ) identifier[sdfun] = identifier[SerialDifferentiableFunction] ( identifier[fun] ) keyword[if] identifier[with_complementarities] keyword[is] keyword[True] : [ identifier[new_x] , identifier[nit] ]= identifier[ncpsolve] ( identifier[sdfun] , identifier[lb] , identifier[ub] , identifier[x_0] , identifier[verbose] = identifier[verbit] , identifier[maxit] = identifier[inner_maxit] ) keyword[else] : [ identifier[new_x] , identifier[nit] ]= identifier[serial_newton] ( identifier[sdfun] , identifier[x_0] , identifier[verbose] = identifier[verbit] ) identifier[new_h] = identifier[h] ( identifier[grid] , identifier[new_x] , identifier[parms] ) identifier[err] =( identifier[abs] ( identifier[new_h] - identifier[h_0] ). identifier[max] ()) identifier[x_0] = identifier[new_x] identifier[h_0] = identifier[new_h] identifier[err_SA] = identifier[err] / identifier[err_0] identifier[err_0] = identifier[err] identifier[t_finish] = identifier[time] . identifier[time] () identifier[elapsed] = identifier[t_finish] - identifier[t_start] keyword[if] identifier[verbose] : identifier[print] ( identifier[fmt_str] . identifier[format] ( identifier[it] , identifier[err] , identifier[err_SA] , identifier[elapsed] )) keyword[if] identifier[it] == identifier[maxit] : keyword[import] identifier[warnings] identifier[warnings] . identifier[warn] ( identifier[UserWarning] ( literal[string] )) identifier[t2] = identifier[time] . identifier[time] () keyword[if] identifier[verbose] : identifier[print] ( identifier[stars] ) identifier[print] ( literal[string] . identifier[format] ( identifier[t2] - identifier[t1] )) identifier[print] ( identifier[stars] ) identifier[dr] . identifier[set_values] ( identifier[x_0] ) keyword[return] identifier[dr]
def parameterized_expectations(model, verbose=False, initial_dr=None, pert_order=1, with_complementarities=True, grid={}, distribution={}, maxit=100, tol=1e-08, inner_maxit=10, direct=False): """ Find global solution for ``model`` via parameterized expectations. Controls must be expressed as a direct function of equilibrium objects. Algorithm iterates over the expectations function in the arbitrage equation. Parameters: ---------- model : NumericModel ``dtcscc`` model to be solved verbose : boolean if True, display iterations initial_dr : decision rule initial guess for the decision rule pert_order : {1} if no initial guess is supplied, the perturbation solution at order ``pert_order`` is used as initial guess grid : grid options distribution : distribution options maxit : maximum number of iterations tol : tolerance criterium for successive approximations inner_maxit : maximum number of iteration for inner solver direct : if True, solve with direct method. If false, solve indirectly Returns ------- decision rule : approximated solution """ t1 = time.time() g = model.functions['transition'] h = model.functions['expectation'] d = model.functions['direct_response'] f = model.functions['arbitrage_exp'] # f(s, x, z, p, out) parms = model.calibration['parameters'] if initial_dr is None: if pert_order == 1: initial_dr = approximate_controls(model) # depends on [control=['if'], data=[]] if pert_order > 1: raise Exception('Perturbation order > 1 not supported (yet).') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['initial_dr']] approx = model.get_grid(**grid) grid = approx.grid interp_type = approx.interpolation dr = create_interpolator(approx, interp_type) expect = create_interpolator(approx, interp_type) distrib = model.get_distribution(**distribution) (nodes, weights) = distrib.discretize() N = grid.shape[0] z = np.zeros((N, len(model.symbols['expectations']))) x_0 = initial_dr(grid) x_0 = x_0.real # just in case ... h_0 = h(grid, x_0, parms) it = 0 err = 10 err_0 = 10 verbit = True if verbose == 'full' else False if with_complementarities is True: lbfun = model.functions['controls_lb'] ubfun = model.functions['controls_ub'] lb = lbfun(grid, parms) ub = ubfun(grid, parms) # depends on [control=['if'], data=[]] else: lb = None ub = None if verbose: headline = '|{0:^4} | {1:10} | {2:8} | {3:8} |' headline = headline.format('N', ' Error', 'Gain', 'Time') stars = '-' * len(headline) print(stars) print(headline) print(stars) # format string for within loop fmt_str = '|{0:4} | {1:10.3e} | {2:8.3f} | {3:8.3f} |' # depends on [control=['if'], data=[]] while err > tol and it <= maxit: it += 1 t_start = time.time() # dr.set_values(x_0) expect.set_values(h_0) # evaluate expectation over the future state z[...] = 0 for i in range(weights.shape[0]): e = nodes[i, :] S = g(grid, x_0, e, parms) z += weights[i] * expect(S) # depends on [control=['for'], data=['i']] if direct is True: # Use control as direct function of arbitrage equation new_x = d(grid, z, parms) if with_complementarities is True: new_x = np.minimum(new_x, ub) new_x = np.maximum(new_x, lb) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: # Find control by solving arbitrage equation def fun(x): return f(grid, x, z, parms) sdfun = SerialDifferentiableFunction(fun) if with_complementarities is True: [new_x, nit] = ncpsolve(sdfun, lb, ub, x_0, verbose=verbit, maxit=inner_maxit) # depends on [control=['if'], data=[]] else: [new_x, nit] = serial_newton(sdfun, x_0, verbose=verbit) new_h = h(grid, new_x, parms) # update error err = abs(new_h - h_0).max() # Update guess for decision rule and expectations function x_0 = new_x h_0 = new_h # print error infomation if `verbose` err_SA = err / err_0 err_0 = err t_finish = time.time() elapsed = t_finish - t_start if verbose: print(fmt_str.format(it, err, err_SA, elapsed)) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] if it == maxit: import warnings warnings.warn(UserWarning('Maximum number of iterations reached')) # depends on [control=['if'], data=[]] # compute final fime and do final printout if `verbose` t2 = time.time() if verbose: print(stars) print('Elapsed: {} seconds.'.format(t2 - t1)) print(stars) # depends on [control=['if'], data=[]] # Interpolation for the decision rule dr.set_values(x_0) return dr
def findzc(x, thresh, t_max=None): ''' Find cues to each zero-crossing in vector x. To be accepted as a zero-crossing, the signal must pass from below -thresh to above thresh, or vice versa, in no more than t_max samples. Args ---- thresh: (float) magnitude threshold for detecting a zero-crossing. t_max: (int) maximum duration in samples between threshold crossings. Returns ------- zc: ndarray Array containing the start **zc_s**, finish **zc_f** and direction **S** of zero crossings where: * zc_s: the cue of the first threshold-crossing in samples * zc_f: the cue of the second threshold-crossing in samples * S: the sign of each zero-crossing (1 = positive-going, -1 = negative-going). Notes ----- This routine is a reimplementation of Mark Johnson's Dtag toolbox method and tested against the Matlab version to be sure it has the same result. ''' import numpy # positive threshold: p (over) n (under) pt_p = x > thresh pt_n = ~pt_p # negative threshold: p (over) n (under) nt_n = x < -thresh nt_p = ~nt_n # Over positive threshold +thresh # neg to pos pt_np = (pt_p[:-1] & pt_n[1:]).nonzero()[0] # pos to neg pt_pn = (pt_n[:-1] & pt_p[1:]).nonzero()[0] + 1 # Over positive threshold +thresh # neg to pos nt_np = (nt_p[:-1] & nt_n[1:]).nonzero()[0] + 1 # pos to neg nt_pn = (nt_n[:-1] & nt_p[1:]).nonzero()[0] # Concat indices, order sequentially ind_all = numpy.hstack((pt_np, nt_np, pt_pn, nt_pn)) ind_all.sort() # Omit rows where just touching but not crossing crossing_mask = ~(numpy.diff(numpy.sign(x[ind_all])) == 0) # Append a False to make the same length as ind_all crossing_mask = numpy.hstack((crossing_mask, False)) # Get 1st and 2nd crossings ind_1stx = ind_all[crossing_mask] ind_2ndx = ind_all[numpy.where(crossing_mask)[0]+1] # TODO odd option to replace with NaNs rather than delete? # Delete indices that do not have a second crossing del_ind = numpy.where(ind_2ndx > len(x)-1)[0] for i in del_ind: ind_1stx = numpy.delete(ind_1stx, i) ind_2ndx = numpy.delete(ind_1stx, i) # Get direction/sign of crossing signs = numpy.sign(x[ind_1stx])*-1 # Add column of direction and transpose zc = numpy.vstack((ind_1stx, ind_2ndx, signs)).T # TODO not mentioned in docstring, remove? #x_norm? = ((x[:, 1] * zc[:, 0]) - (x[:, 0] * zc[:, 1])) / x[:, 1] - x[:, 0] if t_max: zc = zc[zc[:, 1] - zc[:, 0] <= t_max, :] return zc.astype(int)
def function[findzc, parameter[x, thresh, t_max]]: constant[ Find cues to each zero-crossing in vector x. To be accepted as a zero-crossing, the signal must pass from below -thresh to above thresh, or vice versa, in no more than t_max samples. Args ---- thresh: (float) magnitude threshold for detecting a zero-crossing. t_max: (int) maximum duration in samples between threshold crossings. Returns ------- zc: ndarray Array containing the start **zc_s**, finish **zc_f** and direction **S** of zero crossings where: * zc_s: the cue of the first threshold-crossing in samples * zc_f: the cue of the second threshold-crossing in samples * S: the sign of each zero-crossing (1 = positive-going, -1 = negative-going). Notes ----- This routine is a reimplementation of Mark Johnson's Dtag toolbox method and tested against the Matlab version to be sure it has the same result. ] import module[numpy] variable[pt_p] assign[=] compare[name[x] greater[>] name[thresh]] variable[pt_n] assign[=] <ast.UnaryOp object at 0x7da18f7216f0> variable[nt_n] assign[=] compare[name[x] less[<] <ast.UnaryOp object at 0x7da18f722fe0>] variable[nt_p] assign[=] <ast.UnaryOp object at 0x7da18f720610> variable[pt_np] assign[=] call[call[binary_operation[call[name[pt_p]][<ast.Slice object at 0x7da18f720640>] <ast.BitAnd object at 0x7da2590d6b60> call[name[pt_n]][<ast.Slice object at 0x7da18eb54340>]].nonzero, parameter[]]][constant[0]] variable[pt_pn] assign[=] binary_operation[call[call[binary_operation[call[name[pt_n]][<ast.Slice object at 0x7da18eb56230>] <ast.BitAnd object at 0x7da2590d6b60> call[name[pt_p]][<ast.Slice object at 0x7da18eb543d0>]].nonzero, parameter[]]][constant[0]] + constant[1]] variable[nt_np] assign[=] binary_operation[call[call[binary_operation[call[name[nt_p]][<ast.Slice object at 0x7da18eb55f00>] <ast.BitAnd object at 0x7da2590d6b60> call[name[nt_n]][<ast.Slice object at 0x7da18eb54580>]].nonzero, parameter[]]][constant[0]] + constant[1]] variable[nt_pn] assign[=] call[call[binary_operation[call[name[nt_n]][<ast.Slice object at 0x7da18eb55f90>] <ast.BitAnd object at 0x7da2590d6b60> call[name[nt_p]][<ast.Slice object at 0x7da18eb57e50>]].nonzero, parameter[]]][constant[0]] variable[ind_all] assign[=] call[name[numpy].hstack, parameter[tuple[[<ast.Name object at 0x7da18eb54520>, <ast.Name object at 0x7da18eb55090>, <ast.Name object at 0x7da18f723c70>, <ast.Name object at 0x7da18f7230a0>]]]] call[name[ind_all].sort, parameter[]] variable[crossing_mask] assign[=] <ast.UnaryOp object at 0x7da18f721f90> variable[crossing_mask] assign[=] call[name[numpy].hstack, parameter[tuple[[<ast.Name object at 0x7da18f8124a0>, <ast.Constant object at 0x7da18f813820>]]]] variable[ind_1stx] assign[=] call[name[ind_all]][name[crossing_mask]] variable[ind_2ndx] assign[=] call[name[ind_all]][binary_operation[call[call[name[numpy].where, parameter[name[crossing_mask]]]][constant[0]] + constant[1]]] variable[del_ind] assign[=] call[call[name[numpy].where, parameter[compare[name[ind_2ndx] greater[>] binary_operation[call[name[len], parameter[name[x]]] - constant[1]]]]]][constant[0]] for taget[name[i]] in starred[name[del_ind]] begin[:] variable[ind_1stx] assign[=] call[name[numpy].delete, parameter[name[ind_1stx], name[i]]] variable[ind_2ndx] assign[=] call[name[numpy].delete, parameter[name[ind_1stx], name[i]]] variable[signs] assign[=] binary_operation[call[name[numpy].sign, parameter[call[name[x]][name[ind_1stx]]]] * <ast.UnaryOp object at 0x7da18c4cfb50>] variable[zc] assign[=] call[name[numpy].vstack, parameter[tuple[[<ast.Name object at 0x7da18c4ce530>, <ast.Name object at 0x7da18c4cfb80>, <ast.Name object at 0x7da18c4cc070>]]]].T if name[t_max] begin[:] variable[zc] assign[=] call[name[zc]][tuple[[<ast.Compare object at 0x7da1b143e800>, <ast.Slice object at 0x7da1b143f3a0>]]] return[call[name[zc].astype, parameter[name[int]]]]
keyword[def] identifier[findzc] ( identifier[x] , identifier[thresh] , identifier[t_max] = keyword[None] ): literal[string] keyword[import] identifier[numpy] identifier[pt_p] = identifier[x] > identifier[thresh] identifier[pt_n] =~ identifier[pt_p] identifier[nt_n] = identifier[x] <- identifier[thresh] identifier[nt_p] =~ identifier[nt_n] identifier[pt_np] =( identifier[pt_p] [:- literal[int] ]& identifier[pt_n] [ literal[int] :]). identifier[nonzero] ()[ literal[int] ] identifier[pt_pn] =( identifier[pt_n] [:- literal[int] ]& identifier[pt_p] [ literal[int] :]). identifier[nonzero] ()[ literal[int] ]+ literal[int] identifier[nt_np] =( identifier[nt_p] [:- literal[int] ]& identifier[nt_n] [ literal[int] :]). identifier[nonzero] ()[ literal[int] ]+ literal[int] identifier[nt_pn] =( identifier[nt_n] [:- literal[int] ]& identifier[nt_p] [ literal[int] :]). identifier[nonzero] ()[ literal[int] ] identifier[ind_all] = identifier[numpy] . identifier[hstack] (( identifier[pt_np] , identifier[nt_np] , identifier[pt_pn] , identifier[nt_pn] )) identifier[ind_all] . identifier[sort] () identifier[crossing_mask] =~( identifier[numpy] . identifier[diff] ( identifier[numpy] . identifier[sign] ( identifier[x] [ identifier[ind_all] ]))== literal[int] ) identifier[crossing_mask] = identifier[numpy] . identifier[hstack] (( identifier[crossing_mask] , keyword[False] )) identifier[ind_1stx] = identifier[ind_all] [ identifier[crossing_mask] ] identifier[ind_2ndx] = identifier[ind_all] [ identifier[numpy] . identifier[where] ( identifier[crossing_mask] )[ literal[int] ]+ literal[int] ] identifier[del_ind] = identifier[numpy] . identifier[where] ( identifier[ind_2ndx] > identifier[len] ( identifier[x] )- literal[int] )[ literal[int] ] keyword[for] identifier[i] keyword[in] identifier[del_ind] : identifier[ind_1stx] = identifier[numpy] . identifier[delete] ( identifier[ind_1stx] , identifier[i] ) identifier[ind_2ndx] = identifier[numpy] . identifier[delete] ( identifier[ind_1stx] , identifier[i] ) identifier[signs] = identifier[numpy] . identifier[sign] ( identifier[x] [ identifier[ind_1stx] ])*- literal[int] identifier[zc] = identifier[numpy] . identifier[vstack] (( identifier[ind_1stx] , identifier[ind_2ndx] , identifier[signs] )). identifier[T] keyword[if] identifier[t_max] : identifier[zc] = identifier[zc] [ identifier[zc] [:, literal[int] ]- identifier[zc] [:, literal[int] ]<= identifier[t_max] ,:] keyword[return] identifier[zc] . identifier[astype] ( identifier[int] )
def findzc(x, thresh, t_max=None): """ Find cues to each zero-crossing in vector x. To be accepted as a zero-crossing, the signal must pass from below -thresh to above thresh, or vice versa, in no more than t_max samples. Args ---- thresh: (float) magnitude threshold for detecting a zero-crossing. t_max: (int) maximum duration in samples between threshold crossings. Returns ------- zc: ndarray Array containing the start **zc_s**, finish **zc_f** and direction **S** of zero crossings where: * zc_s: the cue of the first threshold-crossing in samples * zc_f: the cue of the second threshold-crossing in samples * S: the sign of each zero-crossing (1 = positive-going, -1 = negative-going). Notes ----- This routine is a reimplementation of Mark Johnson's Dtag toolbox method and tested against the Matlab version to be sure it has the same result. """ import numpy # positive threshold: p (over) n (under) pt_p = x > thresh pt_n = ~pt_p # negative threshold: p (over) n (under) nt_n = x < -thresh nt_p = ~nt_n # Over positive threshold +thresh # neg to pos pt_np = (pt_p[:-1] & pt_n[1:]).nonzero()[0] # pos to neg pt_pn = (pt_n[:-1] & pt_p[1:]).nonzero()[0] + 1 # Over positive threshold +thresh # neg to pos nt_np = (nt_p[:-1] & nt_n[1:]).nonzero()[0] + 1 # pos to neg nt_pn = (nt_n[:-1] & nt_p[1:]).nonzero()[0] # Concat indices, order sequentially ind_all = numpy.hstack((pt_np, nt_np, pt_pn, nt_pn)) ind_all.sort() # Omit rows where just touching but not crossing crossing_mask = ~(numpy.diff(numpy.sign(x[ind_all])) == 0) # Append a False to make the same length as ind_all crossing_mask = numpy.hstack((crossing_mask, False)) # Get 1st and 2nd crossings ind_1stx = ind_all[crossing_mask] ind_2ndx = ind_all[numpy.where(crossing_mask)[0] + 1] # TODO odd option to replace with NaNs rather than delete? # Delete indices that do not have a second crossing del_ind = numpy.where(ind_2ndx > len(x) - 1)[0] for i in del_ind: ind_1stx = numpy.delete(ind_1stx, i) ind_2ndx = numpy.delete(ind_1stx, i) # depends on [control=['for'], data=['i']] # Get direction/sign of crossing signs = numpy.sign(x[ind_1stx]) * -1 # Add column of direction and transpose zc = numpy.vstack((ind_1stx, ind_2ndx, signs)).T # TODO not mentioned in docstring, remove? #x_norm? = ((x[:, 1] * zc[:, 0]) - (x[:, 0] * zc[:, 1])) / x[:, 1] - x[:, 0] if t_max: zc = zc[zc[:, 1] - zc[:, 0] <= t_max, :] # depends on [control=['if'], data=[]] return zc.astype(int)
def make_export_strategy( args, keep_target, assets_extra, features, schema, stats): """Makes prediction graph that takes json input. Args: args: command line args keep_target: If ture, target column is returned in prediction graph. Target column must also exist in input data assets_extra: other fiels to copy to the output folder job_dir: root job folder features: features dict schema: schema list stats: stats dict """ target_name = feature_transforms.get_target_name(features) csv_header = [col['name'] for col in schema] if not keep_target: csv_header.remove(target_name) def export_fn(estimator, export_dir_base, checkpoint_path=None, eval_result=None): with ops.Graph().as_default() as g: contrib_variables.create_global_step(g) input_ops = feature_transforms.build_csv_serving_tensors_for_training_step( args.analysis, features, schema, stats, keep_target) model_fn_ops = estimator._call_model_fn(input_ops.features, None, model_fn_lib.ModeKeys.INFER) output_fetch_tensors = make_prediction_output_tensors( args=args, features=features, input_ops=input_ops, model_fn_ops=model_fn_ops, keep_target=keep_target) # Don't use signature_def_utils.predict_signature_def as that renames # tensor names if there is only 1 input/output tensor! signature_inputs = {key: tf.saved_model.utils.build_tensor_info(tensor) for key, tensor in six.iteritems(input_ops.default_inputs)} signature_outputs = {key: tf.saved_model.utils.build_tensor_info(tensor) for key, tensor in six.iteritems(output_fetch_tensors)} signature_def_map = { 'serving_default': signature_def_utils.build_signature_def( signature_inputs, signature_outputs, tf.saved_model.signature_constants.PREDICT_METHOD_NAME)} if not checkpoint_path: # Locate the latest checkpoint checkpoint_path = saver.latest_checkpoint(estimator._model_dir) if not checkpoint_path: raise ValueError("Couldn't find trained model at %s." % estimator._model_dir) export_dir = saved_model_export_utils.get_timestamped_export_dir( export_dir_base) if (model_fn_ops.scaffold is not None and model_fn_ops.scaffold.saver is not None): saver_for_restore = model_fn_ops.scaffold.saver else: saver_for_restore = saver.Saver(sharded=True) with tf_session.Session('') as session: saver_for_restore.restore(session, checkpoint_path) init_op = control_flow_ops.group( variables.local_variables_initializer(), resources.initialize_resources(resources.shared_resources()), tf.tables_initializer()) # Perform the export builder = saved_model_builder.SavedModelBuilder(export_dir) builder.add_meta_graph_and_variables( session, [tag_constants.SERVING], signature_def_map=signature_def_map, assets_collection=ops.get_collection( ops.GraphKeys.ASSET_FILEPATHS), legacy_init_op=init_op) builder.save(False) # Add the extra assets if assets_extra: assets_extra_path = os.path.join(compat.as_bytes(export_dir), compat.as_bytes('assets.extra')) for dest_relative, source in assets_extra.items(): dest_absolute = os.path.join(compat.as_bytes(assets_extra_path), compat.as_bytes(dest_relative)) dest_path = os.path.dirname(dest_absolute) file_io.recursive_create_dir(dest_path) file_io.copy(source, dest_absolute) # only keep the last 3 models saved_model_export_utils.garbage_collect_exports( export_dir_base, exports_to_keep=3) # save the last model to the model folder. # export_dir_base = A/B/intermediate_models/ if keep_target: final_dir = os.path.join(args.job_dir, 'evaluation_model') else: final_dir = os.path.join(args.job_dir, 'model') if file_io.is_directory(final_dir): file_io.delete_recursively(final_dir) file_io.recursive_create_dir(final_dir) recursive_copy(export_dir, final_dir) return export_dir if keep_target: intermediate_dir = 'intermediate_evaluation_models' else: intermediate_dir = 'intermediate_prediction_models' return export_strategy.ExportStrategy(intermediate_dir, export_fn)
def function[make_export_strategy, parameter[args, keep_target, assets_extra, features, schema, stats]]: constant[Makes prediction graph that takes json input. Args: args: command line args keep_target: If ture, target column is returned in prediction graph. Target column must also exist in input data assets_extra: other fiels to copy to the output folder job_dir: root job folder features: features dict schema: schema list stats: stats dict ] variable[target_name] assign[=] call[name[feature_transforms].get_target_name, parameter[name[features]]] variable[csv_header] assign[=] <ast.ListComp object at 0x7da18fe90e20> if <ast.UnaryOp object at 0x7da18fe90b80> begin[:] call[name[csv_header].remove, parameter[name[target_name]]] def function[export_fn, parameter[estimator, export_dir_base, checkpoint_path, eval_result]]: with call[call[name[ops].Graph, parameter[]].as_default, parameter[]] begin[:] call[name[contrib_variables].create_global_step, parameter[name[g]]] variable[input_ops] assign[=] call[name[feature_transforms].build_csv_serving_tensors_for_training_step, parameter[name[args].analysis, name[features], name[schema], name[stats], name[keep_target]]] variable[model_fn_ops] assign[=] call[name[estimator]._call_model_fn, parameter[name[input_ops].features, constant[None], name[model_fn_lib].ModeKeys.INFER]] variable[output_fetch_tensors] assign[=] call[name[make_prediction_output_tensors], parameter[]] variable[signature_inputs] assign[=] <ast.DictComp object at 0x7da18bc72b60> variable[signature_outputs] assign[=] <ast.DictComp object at 0x7da18bc72fe0> variable[signature_def_map] assign[=] dictionary[[<ast.Constant object at 0x7da18bc70be0>], [<ast.Call object at 0x7da18bc72d70>]] if <ast.UnaryOp object at 0x7da18bc701c0> begin[:] variable[checkpoint_path] assign[=] call[name[saver].latest_checkpoint, parameter[name[estimator]._model_dir]] if <ast.UnaryOp object at 0x7da18bc734c0> begin[:] <ast.Raise object at 0x7da18bc70c10> variable[export_dir] assign[=] call[name[saved_model_export_utils].get_timestamped_export_dir, parameter[name[export_dir_base]]] if <ast.BoolOp object at 0x7da18bc73df0> begin[:] variable[saver_for_restore] assign[=] name[model_fn_ops].scaffold.saver with call[name[tf_session].Session, parameter[constant[]]] begin[:] call[name[saver_for_restore].restore, parameter[name[session], name[checkpoint_path]]] variable[init_op] assign[=] call[name[control_flow_ops].group, parameter[call[name[variables].local_variables_initializer, parameter[]], call[name[resources].initialize_resources, parameter[call[name[resources].shared_resources, parameter[]]]], call[name[tf].tables_initializer, parameter[]]]] variable[builder] assign[=] call[name[saved_model_builder].SavedModelBuilder, parameter[name[export_dir]]] call[name[builder].add_meta_graph_and_variables, parameter[name[session], list[[<ast.Attribute object at 0x7da18bc736a0>]]]] call[name[builder].save, parameter[constant[False]]] if name[assets_extra] begin[:] variable[assets_extra_path] assign[=] call[name[os].path.join, parameter[call[name[compat].as_bytes, parameter[name[export_dir]]], call[name[compat].as_bytes, parameter[constant[assets.extra]]]]] for taget[tuple[[<ast.Name object at 0x7da18bc738e0>, <ast.Name object at 0x7da18bc728f0>]]] in starred[call[name[assets_extra].items, parameter[]]] begin[:] variable[dest_absolute] assign[=] call[name[os].path.join, parameter[call[name[compat].as_bytes, parameter[name[assets_extra_path]]], call[name[compat].as_bytes, parameter[name[dest_relative]]]]] variable[dest_path] assign[=] call[name[os].path.dirname, parameter[name[dest_absolute]]] call[name[file_io].recursive_create_dir, parameter[name[dest_path]]] call[name[file_io].copy, parameter[name[source], name[dest_absolute]]] call[name[saved_model_export_utils].garbage_collect_exports, parameter[name[export_dir_base]]] if name[keep_target] begin[:] variable[final_dir] assign[=] call[name[os].path.join, parameter[name[args].job_dir, constant[evaluation_model]]] if call[name[file_io].is_directory, parameter[name[final_dir]]] begin[:] call[name[file_io].delete_recursively, parameter[name[final_dir]]] call[name[file_io].recursive_create_dir, parameter[name[final_dir]]] call[name[recursive_copy], parameter[name[export_dir], name[final_dir]]] return[name[export_dir]] if name[keep_target] begin[:] variable[intermediate_dir] assign[=] constant[intermediate_evaluation_models] return[call[name[export_strategy].ExportStrategy, parameter[name[intermediate_dir], name[export_fn]]]]
keyword[def] identifier[make_export_strategy] ( identifier[args] , identifier[keep_target] , identifier[assets_extra] , identifier[features] , identifier[schema] , identifier[stats] ): literal[string] identifier[target_name] = identifier[feature_transforms] . identifier[get_target_name] ( identifier[features] ) identifier[csv_header] =[ identifier[col] [ literal[string] ] keyword[for] identifier[col] keyword[in] identifier[schema] ] keyword[if] keyword[not] identifier[keep_target] : identifier[csv_header] . identifier[remove] ( identifier[target_name] ) keyword[def] identifier[export_fn] ( identifier[estimator] , identifier[export_dir_base] , identifier[checkpoint_path] = keyword[None] , identifier[eval_result] = keyword[None] ): keyword[with] identifier[ops] . identifier[Graph] (). identifier[as_default] () keyword[as] identifier[g] : identifier[contrib_variables] . identifier[create_global_step] ( identifier[g] ) identifier[input_ops] = identifier[feature_transforms] . identifier[build_csv_serving_tensors_for_training_step] ( identifier[args] . identifier[analysis] , identifier[features] , identifier[schema] , identifier[stats] , identifier[keep_target] ) identifier[model_fn_ops] = identifier[estimator] . identifier[_call_model_fn] ( identifier[input_ops] . identifier[features] , keyword[None] , identifier[model_fn_lib] . identifier[ModeKeys] . identifier[INFER] ) identifier[output_fetch_tensors] = identifier[make_prediction_output_tensors] ( identifier[args] = identifier[args] , identifier[features] = identifier[features] , identifier[input_ops] = identifier[input_ops] , identifier[model_fn_ops] = identifier[model_fn_ops] , identifier[keep_target] = identifier[keep_target] ) identifier[signature_inputs] ={ identifier[key] : identifier[tf] . identifier[saved_model] . identifier[utils] . identifier[build_tensor_info] ( identifier[tensor] ) keyword[for] identifier[key] , identifier[tensor] keyword[in] identifier[six] . identifier[iteritems] ( identifier[input_ops] . identifier[default_inputs] )} identifier[signature_outputs] ={ identifier[key] : identifier[tf] . identifier[saved_model] . identifier[utils] . identifier[build_tensor_info] ( identifier[tensor] ) keyword[for] identifier[key] , identifier[tensor] keyword[in] identifier[six] . identifier[iteritems] ( identifier[output_fetch_tensors] )} identifier[signature_def_map] ={ literal[string] : identifier[signature_def_utils] . identifier[build_signature_def] ( identifier[signature_inputs] , identifier[signature_outputs] , identifier[tf] . identifier[saved_model] . identifier[signature_constants] . identifier[PREDICT_METHOD_NAME] )} keyword[if] keyword[not] identifier[checkpoint_path] : identifier[checkpoint_path] = identifier[saver] . identifier[latest_checkpoint] ( identifier[estimator] . identifier[_model_dir] ) keyword[if] keyword[not] identifier[checkpoint_path] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[estimator] . identifier[_model_dir] ) identifier[export_dir] = identifier[saved_model_export_utils] . identifier[get_timestamped_export_dir] ( identifier[export_dir_base] ) keyword[if] ( identifier[model_fn_ops] . identifier[scaffold] keyword[is] keyword[not] keyword[None] keyword[and] identifier[model_fn_ops] . identifier[scaffold] . identifier[saver] keyword[is] keyword[not] keyword[None] ): identifier[saver_for_restore] = identifier[model_fn_ops] . identifier[scaffold] . identifier[saver] keyword[else] : identifier[saver_for_restore] = identifier[saver] . identifier[Saver] ( identifier[sharded] = keyword[True] ) keyword[with] identifier[tf_session] . identifier[Session] ( literal[string] ) keyword[as] identifier[session] : identifier[saver_for_restore] . identifier[restore] ( identifier[session] , identifier[checkpoint_path] ) identifier[init_op] = identifier[control_flow_ops] . identifier[group] ( identifier[variables] . identifier[local_variables_initializer] (), identifier[resources] . identifier[initialize_resources] ( identifier[resources] . identifier[shared_resources] ()), identifier[tf] . identifier[tables_initializer] ()) identifier[builder] = identifier[saved_model_builder] . identifier[SavedModelBuilder] ( identifier[export_dir] ) identifier[builder] . identifier[add_meta_graph_and_variables] ( identifier[session] ,[ identifier[tag_constants] . identifier[SERVING] ], identifier[signature_def_map] = identifier[signature_def_map] , identifier[assets_collection] = identifier[ops] . identifier[get_collection] ( identifier[ops] . identifier[GraphKeys] . identifier[ASSET_FILEPATHS] ), identifier[legacy_init_op] = identifier[init_op] ) identifier[builder] . identifier[save] ( keyword[False] ) keyword[if] identifier[assets_extra] : identifier[assets_extra_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[compat] . identifier[as_bytes] ( identifier[export_dir] ), identifier[compat] . identifier[as_bytes] ( literal[string] )) keyword[for] identifier[dest_relative] , identifier[source] keyword[in] identifier[assets_extra] . identifier[items] (): identifier[dest_absolute] = identifier[os] . identifier[path] . identifier[join] ( identifier[compat] . identifier[as_bytes] ( identifier[assets_extra_path] ), identifier[compat] . identifier[as_bytes] ( identifier[dest_relative] )) identifier[dest_path] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[dest_absolute] ) identifier[file_io] . identifier[recursive_create_dir] ( identifier[dest_path] ) identifier[file_io] . identifier[copy] ( identifier[source] , identifier[dest_absolute] ) identifier[saved_model_export_utils] . identifier[garbage_collect_exports] ( identifier[export_dir_base] , identifier[exports_to_keep] = literal[int] ) keyword[if] identifier[keep_target] : identifier[final_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[args] . identifier[job_dir] , literal[string] ) keyword[else] : identifier[final_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[args] . identifier[job_dir] , literal[string] ) keyword[if] identifier[file_io] . identifier[is_directory] ( identifier[final_dir] ): identifier[file_io] . identifier[delete_recursively] ( identifier[final_dir] ) identifier[file_io] . identifier[recursive_create_dir] ( identifier[final_dir] ) identifier[recursive_copy] ( identifier[export_dir] , identifier[final_dir] ) keyword[return] identifier[export_dir] keyword[if] identifier[keep_target] : identifier[intermediate_dir] = literal[string] keyword[else] : identifier[intermediate_dir] = literal[string] keyword[return] identifier[export_strategy] . identifier[ExportStrategy] ( identifier[intermediate_dir] , identifier[export_fn] )
def make_export_strategy(args, keep_target, assets_extra, features, schema, stats): """Makes prediction graph that takes json input. Args: args: command line args keep_target: If ture, target column is returned in prediction graph. Target column must also exist in input data assets_extra: other fiels to copy to the output folder job_dir: root job folder features: features dict schema: schema list stats: stats dict """ target_name = feature_transforms.get_target_name(features) csv_header = [col['name'] for col in schema] if not keep_target: csv_header.remove(target_name) # depends on [control=['if'], data=[]] def export_fn(estimator, export_dir_base, checkpoint_path=None, eval_result=None): with ops.Graph().as_default() as g: contrib_variables.create_global_step(g) input_ops = feature_transforms.build_csv_serving_tensors_for_training_step(args.analysis, features, schema, stats, keep_target) model_fn_ops = estimator._call_model_fn(input_ops.features, None, model_fn_lib.ModeKeys.INFER) output_fetch_tensors = make_prediction_output_tensors(args=args, features=features, input_ops=input_ops, model_fn_ops=model_fn_ops, keep_target=keep_target) # Don't use signature_def_utils.predict_signature_def as that renames # tensor names if there is only 1 input/output tensor! signature_inputs = {key: tf.saved_model.utils.build_tensor_info(tensor) for (key, tensor) in six.iteritems(input_ops.default_inputs)} signature_outputs = {key: tf.saved_model.utils.build_tensor_info(tensor) for (key, tensor) in six.iteritems(output_fetch_tensors)} signature_def_map = {'serving_default': signature_def_utils.build_signature_def(signature_inputs, signature_outputs, tf.saved_model.signature_constants.PREDICT_METHOD_NAME)} if not checkpoint_path: # Locate the latest checkpoint checkpoint_path = saver.latest_checkpoint(estimator._model_dir) # depends on [control=['if'], data=[]] if not checkpoint_path: raise ValueError("Couldn't find trained model at %s." % estimator._model_dir) # depends on [control=['if'], data=[]] export_dir = saved_model_export_utils.get_timestamped_export_dir(export_dir_base) if model_fn_ops.scaffold is not None and model_fn_ops.scaffold.saver is not None: saver_for_restore = model_fn_ops.scaffold.saver # depends on [control=['if'], data=[]] else: saver_for_restore = saver.Saver(sharded=True) with tf_session.Session('') as session: saver_for_restore.restore(session, checkpoint_path) init_op = control_flow_ops.group(variables.local_variables_initializer(), resources.initialize_resources(resources.shared_resources()), tf.tables_initializer()) # Perform the export builder = saved_model_builder.SavedModelBuilder(export_dir) builder.add_meta_graph_and_variables(session, [tag_constants.SERVING], signature_def_map=signature_def_map, assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS), legacy_init_op=init_op) builder.save(False) # depends on [control=['with'], data=['session']] # Add the extra assets if assets_extra: assets_extra_path = os.path.join(compat.as_bytes(export_dir), compat.as_bytes('assets.extra')) for (dest_relative, source) in assets_extra.items(): dest_absolute = os.path.join(compat.as_bytes(assets_extra_path), compat.as_bytes(dest_relative)) dest_path = os.path.dirname(dest_absolute) file_io.recursive_create_dir(dest_path) file_io.copy(source, dest_absolute) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['g']] # only keep the last 3 models saved_model_export_utils.garbage_collect_exports(export_dir_base, exports_to_keep=3) # save the last model to the model folder. # export_dir_base = A/B/intermediate_models/ if keep_target: final_dir = os.path.join(args.job_dir, 'evaluation_model') # depends on [control=['if'], data=[]] else: final_dir = os.path.join(args.job_dir, 'model') if file_io.is_directory(final_dir): file_io.delete_recursively(final_dir) # depends on [control=['if'], data=[]] file_io.recursive_create_dir(final_dir) recursive_copy(export_dir, final_dir) return export_dir if keep_target: intermediate_dir = 'intermediate_evaluation_models' # depends on [control=['if'], data=[]] else: intermediate_dir = 'intermediate_prediction_models' return export_strategy.ExportStrategy(intermediate_dir, export_fn)
def fit(self, X): """Compute the Robust Shared Response Model Parameters ---------- X : list of 2D arrays, element i has shape=[voxels_i, timepoints] Each element in the list contains the fMRI data of one subject. """ logger.info('Starting RSRM') # Check that the regularizer value is positive if 0.0 >= self.lam: raise ValueError("Gamma parameter should be positive.") # Check the number of subjects if len(X) <= 1: raise ValueError("There are not enough subjects in the input " "data to train the model.") # Check for input data sizes if X[0].shape[1] < self.features: raise ValueError( "There are not enough timepoints to train the model with " "{0:d} features.".format(self.features)) # Check if all subjects have same number of TRs for alignment number_trs = X[0].shape[1] number_subjects = len(X) for subject in range(number_subjects): assert_all_finite(X[subject]) if X[subject].shape[1] != number_trs: raise ValueError("Different number of alignment timepoints " "between subjects.") # Create a new random state self.random_state_ = np.random.RandomState(self.rand_seed) # Run RSRM self.w_, self.r_, self.s_ = self._rsrm(X) return self
def function[fit, parameter[self, X]]: constant[Compute the Robust Shared Response Model Parameters ---------- X : list of 2D arrays, element i has shape=[voxels_i, timepoints] Each element in the list contains the fMRI data of one subject. ] call[name[logger].info, parameter[constant[Starting RSRM]]] if compare[constant[0.0] greater_or_equal[>=] name[self].lam] begin[:] <ast.Raise object at 0x7da1b0787100> if compare[call[name[len], parameter[name[X]]] less_or_equal[<=] constant[1]] begin[:] <ast.Raise object at 0x7da1b07872e0> if compare[call[call[name[X]][constant[0]].shape][constant[1]] less[<] name[self].features] begin[:] <ast.Raise object at 0x7da1b0787d90> variable[number_trs] assign[=] call[call[name[X]][constant[0]].shape][constant[1]] variable[number_subjects] assign[=] call[name[len], parameter[name[X]]] for taget[name[subject]] in starred[call[name[range], parameter[name[number_subjects]]]] begin[:] call[name[assert_all_finite], parameter[call[name[X]][name[subject]]]] if compare[call[call[name[X]][name[subject]].shape][constant[1]] not_equal[!=] name[number_trs]] begin[:] <ast.Raise object at 0x7da1b0747160> name[self].random_state_ assign[=] call[name[np].random.RandomState, parameter[name[self].rand_seed]] <ast.Tuple object at 0x7da1b0747070> assign[=] call[name[self]._rsrm, parameter[name[X]]] return[name[self]]
keyword[def] identifier[fit] ( identifier[self] , identifier[X] ): literal[string] identifier[logger] . identifier[info] ( literal[string] ) keyword[if] literal[int] >= identifier[self] . identifier[lam] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[len] ( identifier[X] )<= literal[int] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) keyword[if] identifier[X] [ literal[int] ]. identifier[shape] [ literal[int] ]< identifier[self] . identifier[features] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] . identifier[format] ( identifier[self] . identifier[features] )) identifier[number_trs] = identifier[X] [ literal[int] ]. identifier[shape] [ literal[int] ] identifier[number_subjects] = identifier[len] ( identifier[X] ) keyword[for] identifier[subject] keyword[in] identifier[range] ( identifier[number_subjects] ): identifier[assert_all_finite] ( identifier[X] [ identifier[subject] ]) keyword[if] identifier[X] [ identifier[subject] ]. identifier[shape] [ literal[int] ]!= identifier[number_trs] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) identifier[self] . identifier[random_state_] = identifier[np] . identifier[random] . identifier[RandomState] ( identifier[self] . identifier[rand_seed] ) identifier[self] . identifier[w_] , identifier[self] . identifier[r_] , identifier[self] . identifier[s_] = identifier[self] . identifier[_rsrm] ( identifier[X] ) keyword[return] identifier[self]
def fit(self, X): """Compute the Robust Shared Response Model Parameters ---------- X : list of 2D arrays, element i has shape=[voxels_i, timepoints] Each element in the list contains the fMRI data of one subject. """ logger.info('Starting RSRM') # Check that the regularizer value is positive if 0.0 >= self.lam: raise ValueError('Gamma parameter should be positive.') # depends on [control=['if'], data=[]] # Check the number of subjects if len(X) <= 1: raise ValueError('There are not enough subjects in the input data to train the model.') # depends on [control=['if'], data=[]] # Check for input data sizes if X[0].shape[1] < self.features: raise ValueError('There are not enough timepoints to train the model with {0:d} features.'.format(self.features)) # depends on [control=['if'], data=[]] # Check if all subjects have same number of TRs for alignment number_trs = X[0].shape[1] number_subjects = len(X) for subject in range(number_subjects): assert_all_finite(X[subject]) if X[subject].shape[1] != number_trs: raise ValueError('Different number of alignment timepoints between subjects.') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['subject']] # Create a new random state self.random_state_ = np.random.RandomState(self.rand_seed) # Run RSRM (self.w_, self.r_, self.s_) = self._rsrm(X) return self
def aggregate(self, dimensions=None, function=None, spreadfn=None, **kwargs): """Applies a aggregate function to all ViewableElements. See :py:meth:`Dimensioned.opts` and :py:meth:`Apply.__call__` for more information. """ kwargs['_method_args'] = (dimensions, function, spreadfn) return self.__call__('aggregate', **kwargs)
def function[aggregate, parameter[self, dimensions, function, spreadfn]]: constant[Applies a aggregate function to all ViewableElements. See :py:meth:`Dimensioned.opts` and :py:meth:`Apply.__call__` for more information. ] call[name[kwargs]][constant[_method_args]] assign[=] tuple[[<ast.Name object at 0x7da1b2347f10>, <ast.Name object at 0x7da1b2345b10>, <ast.Name object at 0x7da1b2347430>]] return[call[name[self].__call__, parameter[constant[aggregate]]]]
keyword[def] identifier[aggregate] ( identifier[self] , identifier[dimensions] = keyword[None] , identifier[function] = keyword[None] , identifier[spreadfn] = keyword[None] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]=( identifier[dimensions] , identifier[function] , identifier[spreadfn] ) keyword[return] identifier[self] . identifier[__call__] ( literal[string] ,** identifier[kwargs] )
def aggregate(self, dimensions=None, function=None, spreadfn=None, **kwargs): """Applies a aggregate function to all ViewableElements. See :py:meth:`Dimensioned.opts` and :py:meth:`Apply.__call__` for more information. """ kwargs['_method_args'] = (dimensions, function, spreadfn) return self.__call__('aggregate', **kwargs)
def __modify(self, withdrawal_id, **kwargs): """Call documentation: `/withdrawal/modify <https://www.wepay.com/developer/reference/withdrawal#modify>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay` """ params = { 'withdrawal_id': withdrawal_id } return self.make_call(self.__modify, params, kwargs)
def function[__modify, parameter[self, withdrawal_id]]: constant[Call documentation: `/withdrawal/modify <https://www.wepay.com/developer/reference/withdrawal#modify>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay` ] variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b1365d20>], [<ast.Name object at 0x7da1b1366ad0>]] return[call[name[self].make_call, parameter[name[self].__modify, name[params], name[kwargs]]]]
keyword[def] identifier[__modify] ( identifier[self] , identifier[withdrawal_id] ,** identifier[kwargs] ): literal[string] identifier[params] ={ literal[string] : identifier[withdrawal_id] } keyword[return] identifier[self] . identifier[make_call] ( identifier[self] . identifier[__modify] , identifier[params] , identifier[kwargs] )
def __modify(self, withdrawal_id, **kwargs): """Call documentation: `/withdrawal/modify <https://www.wepay.com/developer/reference/withdrawal#modify>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay` """ params = {'withdrawal_id': withdrawal_id} return self.make_call(self.__modify, params, kwargs)
def _put_table(D, name, table): """ Use the dataset and name to place the new table data into the dataset. :param dict D: Dataset :param str name: Table name / path to store new table :param dict table: Newly created table data :return dict D: Dataset """ try: # print("Placing table: {}".format(name)) table["tableName"] = name m = re.match(re_table_name, name) if m: _pc = m.group(1) + "Data" _section = m.group(1) + m.group(2) # place a measurement table if m.group(3) == "measurement": # This shouldn't happen. User chose one of our options. That should be an empty location. if name in D[_pc][_section]["measurementTable"]: print("Oops. This shouldn't happen. That table path is occupied in the dataset") # Place the data else: D[_pc][_section]["measurementTable"][name] = table # place a model table type else: _model = _section + m.group(3) + m.group(4) _tt = m.group(5) + "Table" if name in D[_pc][_model][_tt]: print("Oops. This shouldn't happen. That table path is occupied in the dataset") else: D[_pc][_model][_tt][name] = table else: print("Oops. This shouldn't happen. That table name doesn't look right. Please report this error") return except Exception as e: print("addTable: Unable to put the table data into the dataset, {}".format(e)) return D
def function[_put_table, parameter[D, name, table]]: constant[ Use the dataset and name to place the new table data into the dataset. :param dict D: Dataset :param str name: Table name / path to store new table :param dict table: Newly created table data :return dict D: Dataset ] <ast.Try object at 0x7da18f7221a0> return[name[D]]
keyword[def] identifier[_put_table] ( identifier[D] , identifier[name] , identifier[table] ): literal[string] keyword[try] : identifier[table] [ literal[string] ]= identifier[name] identifier[m] = identifier[re] . identifier[match] ( identifier[re_table_name] , identifier[name] ) keyword[if] identifier[m] : identifier[_pc] = identifier[m] . identifier[group] ( literal[int] )+ literal[string] identifier[_section] = identifier[m] . identifier[group] ( literal[int] )+ identifier[m] . identifier[group] ( literal[int] ) keyword[if] identifier[m] . identifier[group] ( literal[int] )== literal[string] : keyword[if] identifier[name] keyword[in] identifier[D] [ identifier[_pc] ][ identifier[_section] ][ literal[string] ]: identifier[print] ( literal[string] ) keyword[else] : identifier[D] [ identifier[_pc] ][ identifier[_section] ][ literal[string] ][ identifier[name] ]= identifier[table] keyword[else] : identifier[_model] = identifier[_section] + identifier[m] . identifier[group] ( literal[int] )+ identifier[m] . identifier[group] ( literal[int] ) identifier[_tt] = identifier[m] . identifier[group] ( literal[int] )+ literal[string] keyword[if] identifier[name] keyword[in] identifier[D] [ identifier[_pc] ][ identifier[_model] ][ identifier[_tt] ]: identifier[print] ( literal[string] ) keyword[else] : identifier[D] [ identifier[_pc] ][ identifier[_model] ][ identifier[_tt] ][ identifier[name] ]= identifier[table] keyword[else] : identifier[print] ( literal[string] ) keyword[return] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[print] ( literal[string] . identifier[format] ( identifier[e] )) keyword[return] identifier[D]
def _put_table(D, name, table): """ Use the dataset and name to place the new table data into the dataset. :param dict D: Dataset :param str name: Table name / path to store new table :param dict table: Newly created table data :return dict D: Dataset """ try: # print("Placing table: {}".format(name)) table['tableName'] = name m = re.match(re_table_name, name) if m: _pc = m.group(1) + 'Data' _section = m.group(1) + m.group(2) # place a measurement table if m.group(3) == 'measurement': # This shouldn't happen. User chose one of our options. That should be an empty location. if name in D[_pc][_section]['measurementTable']: print("Oops. This shouldn't happen. That table path is occupied in the dataset") # depends on [control=['if'], data=[]] else: # Place the data D[_pc][_section]['measurementTable'][name] = table # depends on [control=['if'], data=[]] else: # place a model table type _model = _section + m.group(3) + m.group(4) _tt = m.group(5) + 'Table' if name in D[_pc][_model][_tt]: print("Oops. This shouldn't happen. That table path is occupied in the dataset") # depends on [control=['if'], data=[]] else: D[_pc][_model][_tt][name] = table # depends on [control=['if'], data=[]] else: print("Oops. This shouldn't happen. That table name doesn't look right. Please report this error") return # depends on [control=['try'], data=[]] except Exception as e: print('addTable: Unable to put the table data into the dataset, {}'.format(e)) # depends on [control=['except'], data=['e']] return D
def _to_java_object_rdd(rdd): """ Return a JavaRDD of Object by unpickling It will convert each Python object into Java object by Pyrolite, whenever the RDD is serialized in batch or not. """ rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer())) return rdd.ctx._jvm.org.apache.spark.mllib.api.python.SerDe.pythonToJava(rdd._jrdd, True)
def function[_to_java_object_rdd, parameter[rdd]]: constant[ Return a JavaRDD of Object by unpickling It will convert each Python object into Java object by Pyrolite, whenever the RDD is serialized in batch or not. ] variable[rdd] assign[=] call[name[rdd]._reserialize, parameter[call[name[AutoBatchedSerializer], parameter[call[name[PickleSerializer], parameter[]]]]]] return[call[name[rdd].ctx._jvm.org.apache.spark.mllib.api.python.SerDe.pythonToJava, parameter[name[rdd]._jrdd, constant[True]]]]
keyword[def] identifier[_to_java_object_rdd] ( identifier[rdd] ): literal[string] identifier[rdd] = identifier[rdd] . identifier[_reserialize] ( identifier[AutoBatchedSerializer] ( identifier[PickleSerializer] ())) keyword[return] identifier[rdd] . identifier[ctx] . identifier[_jvm] . identifier[org] . identifier[apache] . identifier[spark] . identifier[mllib] . identifier[api] . identifier[python] . identifier[SerDe] . identifier[pythonToJava] ( identifier[rdd] . identifier[_jrdd] , keyword[True] )
def _to_java_object_rdd(rdd): """ Return a JavaRDD of Object by unpickling It will convert each Python object into Java object by Pyrolite, whenever the RDD is serialized in batch or not. """ rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer())) return rdd.ctx._jvm.org.apache.spark.mllib.api.python.SerDe.pythonToJava(rdd._jrdd, True)
def pip(self, cmd): """Execute some pip function using the virtual environment pip.""" pip_bin = self.cmd_path('pip') cmd = '{0} {1}'.format(pip_bin, cmd) return self._execute(cmd)
def function[pip, parameter[self, cmd]]: constant[Execute some pip function using the virtual environment pip.] variable[pip_bin] assign[=] call[name[self].cmd_path, parameter[constant[pip]]] variable[cmd] assign[=] call[constant[{0} {1}].format, parameter[name[pip_bin], name[cmd]]] return[call[name[self]._execute, parameter[name[cmd]]]]
keyword[def] identifier[pip] ( identifier[self] , identifier[cmd] ): literal[string] identifier[pip_bin] = identifier[self] . identifier[cmd_path] ( literal[string] ) identifier[cmd] = literal[string] . identifier[format] ( identifier[pip_bin] , identifier[cmd] ) keyword[return] identifier[self] . identifier[_execute] ( identifier[cmd] )
def pip(self, cmd): """Execute some pip function using the virtual environment pip.""" pip_bin = self.cmd_path('pip') cmd = '{0} {1}'.format(pip_bin, cmd) return self._execute(cmd)
def get_attribute_type(o_attr): ''' Get the base data type (S_DT) associated with a BridgePoint attribute. ''' ref_o_attr = one(o_attr).O_RATTR[106].O_BATTR[113].O_ATTR[106]() if ref_o_attr: return get_attribute_type(ref_o_attr) else: return one(o_attr).S_DT[114]()
def function[get_attribute_type, parameter[o_attr]]: constant[ Get the base data type (S_DT) associated with a BridgePoint attribute. ] variable[ref_o_attr] assign[=] call[call[call[call[call[name[one], parameter[name[o_attr]]].O_RATTR][constant[106]].O_BATTR][constant[113]].O_ATTR][constant[106]], parameter[]] if name[ref_o_attr] begin[:] return[call[name[get_attribute_type], parameter[name[ref_o_attr]]]]
keyword[def] identifier[get_attribute_type] ( identifier[o_attr] ): literal[string] identifier[ref_o_attr] = identifier[one] ( identifier[o_attr] ). identifier[O_RATTR] [ literal[int] ]. identifier[O_BATTR] [ literal[int] ]. identifier[O_ATTR] [ literal[int] ]() keyword[if] identifier[ref_o_attr] : keyword[return] identifier[get_attribute_type] ( identifier[ref_o_attr] ) keyword[else] : keyword[return] identifier[one] ( identifier[o_attr] ). identifier[S_DT] [ literal[int] ]()
def get_attribute_type(o_attr): """ Get the base data type (S_DT) associated with a BridgePoint attribute. """ ref_o_attr = one(o_attr).O_RATTR[106].O_BATTR[113].O_ATTR[106]() if ref_o_attr: return get_attribute_type(ref_o_attr) # depends on [control=['if'], data=[]] else: return one(o_attr).S_DT[114]()
def get_assessment_part(self, assessment_part_id): """Gets the ``AssessmentPart`` specified by its ``Id``. arg: assessment_part_id (osid.id.Id): the ``Id`` of the ``AssessmentPart`` to retrieve return: (osid.assessment.authoring.AssessmentPart) - the returned ``AssessmentPart`` raise: NotFound - no ``AssessmentPart`` found with the given ``Id`` raise: NullArgument - ``assessment_part_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resource # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('assessment_authoring', collection='AssessmentPart', runtime=self._runtime) result = collection.find_one( dict({'_id': ObjectId(self._get_id(assessment_part_id, 'assessment_authoring').get_identifier())}, **self._view_filter())) return objects.AssessmentPart(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)
def function[get_assessment_part, parameter[self, assessment_part_id]]: constant[Gets the ``AssessmentPart`` specified by its ``Id``. arg: assessment_part_id (osid.id.Id): the ``Id`` of the ``AssessmentPart`` to retrieve return: (osid.assessment.authoring.AssessmentPart) - the returned ``AssessmentPart`` raise: NotFound - no ``AssessmentPart`` found with the given ``Id`` raise: NullArgument - ``assessment_part_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* ] variable[collection] assign[=] call[name[JSONClientValidated], parameter[constant[assessment_authoring]]] variable[result] assign[=] call[name[collection].find_one, parameter[call[name[dict], parameter[dictionary[[<ast.Constant object at 0x7da207f012d0>], [<ast.Call object at 0x7da207f01300>]]]]]] return[call[name[objects].AssessmentPart, parameter[]]]
keyword[def] identifier[get_assessment_part] ( identifier[self] , identifier[assessment_part_id] ): literal[string] identifier[collection] = identifier[JSONClientValidated] ( literal[string] , identifier[collection] = literal[string] , identifier[runtime] = identifier[self] . identifier[_runtime] ) identifier[result] = identifier[collection] . identifier[find_one] ( identifier[dict] ({ literal[string] : identifier[ObjectId] ( identifier[self] . identifier[_get_id] ( identifier[assessment_part_id] , literal[string] ). identifier[get_identifier] ())}, ** identifier[self] . identifier[_view_filter] ())) keyword[return] identifier[objects] . identifier[AssessmentPart] ( identifier[osid_object_map] = identifier[result] , identifier[runtime] = identifier[self] . identifier[_runtime] , identifier[proxy] = identifier[self] . identifier[_proxy] )
def get_assessment_part(self, assessment_part_id): """Gets the ``AssessmentPart`` specified by its ``Id``. arg: assessment_part_id (osid.id.Id): the ``Id`` of the ``AssessmentPart`` to retrieve return: (osid.assessment.authoring.AssessmentPart) - the returned ``AssessmentPart`` raise: NotFound - no ``AssessmentPart`` found with the given ``Id`` raise: NullArgument - ``assessment_part_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resource # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('assessment_authoring', collection='AssessmentPart', runtime=self._runtime) result = collection.find_one(dict({'_id': ObjectId(self._get_id(assessment_part_id, 'assessment_authoring').get_identifier())}, **self._view_filter())) return objects.AssessmentPart(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)
def Filter(self, function=None): """Construct Textable from the rows of which the function returns true. Args: function: A function applied to each row which returns a bool. If function is None, all rows with empty column values are removed. Returns: A new TextTable() Raises: TableError: When an invalid row entry is Append()'d """ flat = lambda x: x if isinstance(x, str) else ''.join([flat(y) for y in x]) if function is None: function = lambda row: bool(flat(row.values)) new_table = self.__class__() # pylint: disable=protected-access new_table._table = [self.header] for row in self: if function(row) is True: new_table.Append(row) return new_table
def function[Filter, parameter[self, function]]: constant[Construct Textable from the rows of which the function returns true. Args: function: A function applied to each row which returns a bool. If function is None, all rows with empty column values are removed. Returns: A new TextTable() Raises: TableError: When an invalid row entry is Append()'d ] variable[flat] assign[=] <ast.Lambda object at 0x7da1b1798b80> if compare[name[function] is constant[None]] begin[:] variable[function] assign[=] <ast.Lambda object at 0x7da1b179bf40> variable[new_table] assign[=] call[name[self].__class__, parameter[]] name[new_table]._table assign[=] list[[<ast.Attribute object at 0x7da1b179b970>]] for taget[name[row]] in starred[name[self]] begin[:] if compare[call[name[function], parameter[name[row]]] is constant[True]] begin[:] call[name[new_table].Append, parameter[name[row]]] return[name[new_table]]
keyword[def] identifier[Filter] ( identifier[self] , identifier[function] = keyword[None] ): literal[string] identifier[flat] = keyword[lambda] identifier[x] : identifier[x] keyword[if] identifier[isinstance] ( identifier[x] , identifier[str] ) keyword[else] literal[string] . identifier[join] ([ identifier[flat] ( identifier[y] ) keyword[for] identifier[y] keyword[in] identifier[x] ]) keyword[if] identifier[function] keyword[is] keyword[None] : identifier[function] = keyword[lambda] identifier[row] : identifier[bool] ( identifier[flat] ( identifier[row] . identifier[values] )) identifier[new_table] = identifier[self] . identifier[__class__] () identifier[new_table] . identifier[_table] =[ identifier[self] . identifier[header] ] keyword[for] identifier[row] keyword[in] identifier[self] : keyword[if] identifier[function] ( identifier[row] ) keyword[is] keyword[True] : identifier[new_table] . identifier[Append] ( identifier[row] ) keyword[return] identifier[new_table]
def Filter(self, function=None): """Construct Textable from the rows of which the function returns true. Args: function: A function applied to each row which returns a bool. If function is None, all rows with empty column values are removed. Returns: A new TextTable() Raises: TableError: When an invalid row entry is Append()'d """ flat = lambda x: x if isinstance(x, str) else ''.join([flat(y) for y in x]) if function is None: function = lambda row: bool(flat(row.values)) # depends on [control=['if'], data=['function']] new_table = self.__class__() # pylint: disable=protected-access new_table._table = [self.header] for row in self: if function(row) is True: new_table.Append(row) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['row']] return new_table
def find_inspectable_lines(lines, pos): """Find lines in home that are inspectable. Walk back from the err line up to 3 lines, but don't walk back over changes in indent level. Walk forward up to 3 lines, counting \ separated lines as 1. Don't walk over changes in indent level (unless part of an extended line) """ cnt = re.compile(r'\\[\s\n]*$') df = re.compile(r':[\s\n]*$') ind = re.compile(r'^(\s*)') toinspect = [] home = lines[pos] home_indent = ind.match(home).groups()[0] before = lines[max(pos-3, 0):pos] before.reverse() after = lines[pos+1:min(pos+4, len(lines))] for line in before: if ind.match(line).groups()[0] == home_indent: toinspect.append(line) else: break toinspect.reverse() toinspect.append(home) home_pos = len(toinspect)-1 continued = cnt.search(home) for line in after: if ((continued or ind.match(line).groups()[0] == home_indent) and not df.search(line)): toinspect.append(line) continued = cnt.search(line) else: break log.debug("Inspecting lines '''%s''' around %s", toinspect, home_pos) return toinspect, home_pos
def function[find_inspectable_lines, parameter[lines, pos]]: constant[Find lines in home that are inspectable. Walk back from the err line up to 3 lines, but don't walk back over changes in indent level. Walk forward up to 3 lines, counting \ separated lines as 1. Don't walk over changes in indent level (unless part of an extended line) ] variable[cnt] assign[=] call[name[re].compile, parameter[constant[\\[\s\n]*$]]] variable[df] assign[=] call[name[re].compile, parameter[constant[:[\s\n]*$]]] variable[ind] assign[=] call[name[re].compile, parameter[constant[^(\s*)]]] variable[toinspect] assign[=] list[[]] variable[home] assign[=] call[name[lines]][name[pos]] variable[home_indent] assign[=] call[call[call[name[ind].match, parameter[name[home]]].groups, parameter[]]][constant[0]] variable[before] assign[=] call[name[lines]][<ast.Slice object at 0x7da18bc71870>] call[name[before].reverse, parameter[]] variable[after] assign[=] call[name[lines]][<ast.Slice object at 0x7da18bc72350>] for taget[name[line]] in starred[name[before]] begin[:] if compare[call[call[call[name[ind].match, parameter[name[line]]].groups, parameter[]]][constant[0]] equal[==] name[home_indent]] begin[:] call[name[toinspect].append, parameter[name[line]]] call[name[toinspect].reverse, parameter[]] call[name[toinspect].append, parameter[name[home]]] variable[home_pos] assign[=] binary_operation[call[name[len], parameter[name[toinspect]]] - constant[1]] variable[continued] assign[=] call[name[cnt].search, parameter[name[home]]] for taget[name[line]] in starred[name[after]] begin[:] if <ast.BoolOp object at 0x7da18f09c280> begin[:] call[name[toinspect].append, parameter[name[line]]] variable[continued] assign[=] call[name[cnt].search, parameter[name[line]]] call[name[log].debug, parameter[constant[Inspecting lines '''%s''' around %s], name[toinspect], name[home_pos]]] return[tuple[[<ast.Name object at 0x7da18f09d180>, <ast.Name object at 0x7da18f09df60>]]]
keyword[def] identifier[find_inspectable_lines] ( identifier[lines] , identifier[pos] ): literal[string] identifier[cnt] = identifier[re] . identifier[compile] ( literal[string] ) identifier[df] = identifier[re] . identifier[compile] ( literal[string] ) identifier[ind] = identifier[re] . identifier[compile] ( literal[string] ) identifier[toinspect] =[] identifier[home] = identifier[lines] [ identifier[pos] ] identifier[home_indent] = identifier[ind] . identifier[match] ( identifier[home] ). identifier[groups] ()[ literal[int] ] identifier[before] = identifier[lines] [ identifier[max] ( identifier[pos] - literal[int] , literal[int] ): identifier[pos] ] identifier[before] . identifier[reverse] () identifier[after] = identifier[lines] [ identifier[pos] + literal[int] : identifier[min] ( identifier[pos] + literal[int] , identifier[len] ( identifier[lines] ))] keyword[for] identifier[line] keyword[in] identifier[before] : keyword[if] identifier[ind] . identifier[match] ( identifier[line] ). identifier[groups] ()[ literal[int] ]== identifier[home_indent] : identifier[toinspect] . identifier[append] ( identifier[line] ) keyword[else] : keyword[break] identifier[toinspect] . identifier[reverse] () identifier[toinspect] . identifier[append] ( identifier[home] ) identifier[home_pos] = identifier[len] ( identifier[toinspect] )- literal[int] identifier[continued] = identifier[cnt] . identifier[search] ( identifier[home] ) keyword[for] identifier[line] keyword[in] identifier[after] : keyword[if] (( identifier[continued] keyword[or] identifier[ind] . identifier[match] ( identifier[line] ). identifier[groups] ()[ literal[int] ]== identifier[home_indent] ) keyword[and] keyword[not] identifier[df] . identifier[search] ( identifier[line] )): identifier[toinspect] . identifier[append] ( identifier[line] ) identifier[continued] = identifier[cnt] . identifier[search] ( identifier[line] ) keyword[else] : keyword[break] identifier[log] . identifier[debug] ( literal[string] , identifier[toinspect] , identifier[home_pos] ) keyword[return] identifier[toinspect] , identifier[home_pos]
def find_inspectable_lines(lines, pos): """Find lines in home that are inspectable. Walk back from the err line up to 3 lines, but don't walk back over changes in indent level. Walk forward up to 3 lines, counting \\ separated lines as 1. Don't walk over changes in indent level (unless part of an extended line) """ cnt = re.compile('\\\\[\\s\\n]*$') df = re.compile(':[\\s\\n]*$') ind = re.compile('^(\\s*)') toinspect = [] home = lines[pos] home_indent = ind.match(home).groups()[0] before = lines[max(pos - 3, 0):pos] before.reverse() after = lines[pos + 1:min(pos + 4, len(lines))] for line in before: if ind.match(line).groups()[0] == home_indent: toinspect.append(line) # depends on [control=['if'], data=[]] else: break # depends on [control=['for'], data=['line']] toinspect.reverse() toinspect.append(home) home_pos = len(toinspect) - 1 continued = cnt.search(home) for line in after: if (continued or ind.match(line).groups()[0] == home_indent) and (not df.search(line)): toinspect.append(line) continued = cnt.search(line) # depends on [control=['if'], data=[]] else: break # depends on [control=['for'], data=['line']] log.debug("Inspecting lines '''%s''' around %s", toinspect, home_pos) return (toinspect, home_pos)
def put(func: Callable, allowed_exceptions: List = None, title: str = None, req_obj_type: Callable = None) -> HTTPMethod: """Returns a HTTPMethod instance to create a PUT route. :see: :class:`~doctor.routing.HTTPMethod` """ return HTTPMethod('put', func, allowed_exceptions=allowed_exceptions, title=title, req_obj_type=req_obj_type)
def function[put, parameter[func, allowed_exceptions, title, req_obj_type]]: constant[Returns a HTTPMethod instance to create a PUT route. :see: :class:`~doctor.routing.HTTPMethod` ] return[call[name[HTTPMethod], parameter[constant[put], name[func]]]]
keyword[def] identifier[put] ( identifier[func] : identifier[Callable] , identifier[allowed_exceptions] : identifier[List] = keyword[None] , identifier[title] : identifier[str] = keyword[None] , identifier[req_obj_type] : identifier[Callable] = keyword[None] )-> identifier[HTTPMethod] : literal[string] keyword[return] identifier[HTTPMethod] ( literal[string] , identifier[func] , identifier[allowed_exceptions] = identifier[allowed_exceptions] , identifier[title] = identifier[title] , identifier[req_obj_type] = identifier[req_obj_type] )
def put(func: Callable, allowed_exceptions: List=None, title: str=None, req_obj_type: Callable=None) -> HTTPMethod: """Returns a HTTPMethod instance to create a PUT route. :see: :class:`~doctor.routing.HTTPMethod` """ return HTTPMethod('put', func, allowed_exceptions=allowed_exceptions, title=title, req_obj_type=req_obj_type)
def plot(self, figsize=(12, 6), xscale='auto-gps', **kwargs): """Plot the data for this `Spectrogram` Parameters ---------- **kwargs all keyword arguments are passed along to underlying functions, see below for references Returns ------- plot : `~gwpy.plot.Plot` the `Plot` containing the data See Also -------- matplotlib.pyplot.figure for documentation of keyword arguments used to create the figure matplotlib.figure.Figure.add_subplot for documentation of keyword arguments used to create the axes gwpy.plot.Axes.imshow or gwpy.plot.Axes.pcolormesh for documentation of keyword arguments used in rendering the `Spectrogram` data """ if 'imshow' in kwargs: warnings.warn('the imshow keyword for Spectrogram.plot was ' 'removed, please pass method=\'imshow\' instead', DeprecationWarning) kwargs.setdefault('method', 'imshow' if kwargs.pop('imshow') else 'pcolormesh') kwargs.update(figsize=figsize, xscale=xscale) return super(Spectrogram, self).plot(**kwargs)
def function[plot, parameter[self, figsize, xscale]]: constant[Plot the data for this `Spectrogram` Parameters ---------- **kwargs all keyword arguments are passed along to underlying functions, see below for references Returns ------- plot : `~gwpy.plot.Plot` the `Plot` containing the data See Also -------- matplotlib.pyplot.figure for documentation of keyword arguments used to create the figure matplotlib.figure.Figure.add_subplot for documentation of keyword arguments used to create the axes gwpy.plot.Axes.imshow or gwpy.plot.Axes.pcolormesh for documentation of keyword arguments used in rendering the `Spectrogram` data ] if compare[constant[imshow] in name[kwargs]] begin[:] call[name[warnings].warn, parameter[constant[the imshow keyword for Spectrogram.plot was removed, please pass method='imshow' instead], name[DeprecationWarning]]] call[name[kwargs].setdefault, parameter[constant[method], <ast.IfExp object at 0x7da2043461d0>]] call[name[kwargs].update, parameter[]] return[call[call[name[super], parameter[name[Spectrogram], name[self]]].plot, parameter[]]]
keyword[def] identifier[plot] ( identifier[self] , identifier[figsize] =( literal[int] , literal[int] ), identifier[xscale] = literal[string] ,** identifier[kwargs] ): literal[string] keyword[if] literal[string] keyword[in] identifier[kwargs] : identifier[warnings] . identifier[warn] ( literal[string] literal[string] , identifier[DeprecationWarning] ) identifier[kwargs] . identifier[setdefault] ( literal[string] , literal[string] keyword[if] identifier[kwargs] . identifier[pop] ( literal[string] ) keyword[else] literal[string] ) identifier[kwargs] . identifier[update] ( identifier[figsize] = identifier[figsize] , identifier[xscale] = identifier[xscale] ) keyword[return] identifier[super] ( identifier[Spectrogram] , identifier[self] ). identifier[plot] (** identifier[kwargs] )
def plot(self, figsize=(12, 6), xscale='auto-gps', **kwargs): """Plot the data for this `Spectrogram` Parameters ---------- **kwargs all keyword arguments are passed along to underlying functions, see below for references Returns ------- plot : `~gwpy.plot.Plot` the `Plot` containing the data See Also -------- matplotlib.pyplot.figure for documentation of keyword arguments used to create the figure matplotlib.figure.Figure.add_subplot for documentation of keyword arguments used to create the axes gwpy.plot.Axes.imshow or gwpy.plot.Axes.pcolormesh for documentation of keyword arguments used in rendering the `Spectrogram` data """ if 'imshow' in kwargs: warnings.warn("the imshow keyword for Spectrogram.plot was removed, please pass method='imshow' instead", DeprecationWarning) kwargs.setdefault('method', 'imshow' if kwargs.pop('imshow') else 'pcolormesh') # depends on [control=['if'], data=['kwargs']] kwargs.update(figsize=figsize, xscale=xscale) return super(Spectrogram, self).plot(**kwargs)
def get_revisions(page): """Extract the revisions of a page. Args: page: a string Returns: a list of strings """ start_string = " <revision>\n" end_string = " </revision>\n" ret = [] current_pos = 0 while True: start_pos = page.find(start_string, current_pos) if start_pos == -1: break end_pos = page.find(end_string, start_pos) assert end_pos != -1 ret.append(page[start_pos + len(start_string):end_pos]) current_pos = end_pos + len(end_string) return ret
def function[get_revisions, parameter[page]]: constant[Extract the revisions of a page. Args: page: a string Returns: a list of strings ] variable[start_string] assign[=] constant[ <revision> ] variable[end_string] assign[=] constant[ </revision> ] variable[ret] assign[=] list[[]] variable[current_pos] assign[=] constant[0] while constant[True] begin[:] variable[start_pos] assign[=] call[name[page].find, parameter[name[start_string], name[current_pos]]] if compare[name[start_pos] equal[==] <ast.UnaryOp object at 0x7da204564790>] begin[:] break variable[end_pos] assign[=] call[name[page].find, parameter[name[end_string], name[start_pos]]] assert[compare[name[end_pos] not_equal[!=] <ast.UnaryOp object at 0x7da2045646a0>]] call[name[ret].append, parameter[call[name[page]][<ast.Slice object at 0x7da2045640a0>]]] variable[current_pos] assign[=] binary_operation[name[end_pos] + call[name[len], parameter[name[end_string]]]] return[name[ret]]
keyword[def] identifier[get_revisions] ( identifier[page] ): literal[string] identifier[start_string] = literal[string] identifier[end_string] = literal[string] identifier[ret] =[] identifier[current_pos] = literal[int] keyword[while] keyword[True] : identifier[start_pos] = identifier[page] . identifier[find] ( identifier[start_string] , identifier[current_pos] ) keyword[if] identifier[start_pos] ==- literal[int] : keyword[break] identifier[end_pos] = identifier[page] . identifier[find] ( identifier[end_string] , identifier[start_pos] ) keyword[assert] identifier[end_pos] !=- literal[int] identifier[ret] . identifier[append] ( identifier[page] [ identifier[start_pos] + identifier[len] ( identifier[start_string] ): identifier[end_pos] ]) identifier[current_pos] = identifier[end_pos] + identifier[len] ( identifier[end_string] ) keyword[return] identifier[ret]
def get_revisions(page): """Extract the revisions of a page. Args: page: a string Returns: a list of strings """ start_string = ' <revision>\n' end_string = ' </revision>\n' ret = [] current_pos = 0 while True: start_pos = page.find(start_string, current_pos) if start_pos == -1: break # depends on [control=['if'], data=[]] end_pos = page.find(end_string, start_pos) assert end_pos != -1 ret.append(page[start_pos + len(start_string):end_pos]) current_pos = end_pos + len(end_string) # depends on [control=['while'], data=[]] return ret
def enable_plugin(name, runas=None): ''' Enable a RabbitMQ plugin via the rabbitmq-plugins command. CLI Example: .. code-block:: bash salt '*' rabbitmq.enable_plugin foo ''' if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.user.get_user() cmd = [_get_rabbitmq_plugin(), 'enable', name] ret = __salt__['cmd.run_all'](cmd, reset_system_locale=False, runas=runas, python_shell=False) return _format_response(ret, 'Enabled')
def function[enable_plugin, parameter[name, runas]]: constant[ Enable a RabbitMQ plugin via the rabbitmq-plugins command. CLI Example: .. code-block:: bash salt '*' rabbitmq.enable_plugin foo ] if <ast.BoolOp object at 0x7da18f813af0> begin[:] variable[runas] assign[=] call[name[salt].utils.user.get_user, parameter[]] variable[cmd] assign[=] list[[<ast.Call object at 0x7da20c6e5780>, <ast.Constant object at 0x7da20c6e4a00>, <ast.Name object at 0x7da20c6e64a0>]] variable[ret] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[name[cmd]]] return[call[name[_format_response], parameter[name[ret], constant[Enabled]]]]
keyword[def] identifier[enable_plugin] ( identifier[name] , identifier[runas] = keyword[None] ): literal[string] keyword[if] identifier[runas] keyword[is] keyword[None] keyword[and] keyword[not] identifier[salt] . identifier[utils] . identifier[platform] . identifier[is_windows] (): identifier[runas] = identifier[salt] . identifier[utils] . identifier[user] . identifier[get_user] () identifier[cmd] =[ identifier[_get_rabbitmq_plugin] (), literal[string] , identifier[name] ] identifier[ret] = identifier[__salt__] [ literal[string] ]( identifier[cmd] , identifier[reset_system_locale] = keyword[False] , identifier[runas] = identifier[runas] , identifier[python_shell] = keyword[False] ) keyword[return] identifier[_format_response] ( identifier[ret] , literal[string] )
def enable_plugin(name, runas=None): """ Enable a RabbitMQ plugin via the rabbitmq-plugins command. CLI Example: .. code-block:: bash salt '*' rabbitmq.enable_plugin foo """ if runas is None and (not salt.utils.platform.is_windows()): runas = salt.utils.user.get_user() # depends on [control=['if'], data=[]] cmd = [_get_rabbitmq_plugin(), 'enable', name] ret = __salt__['cmd.run_all'](cmd, reset_system_locale=False, runas=runas, python_shell=False) return _format_response(ret, 'Enabled')
def predict(self, p, x): """ Parameters ---------- p: ndarray (Ns, d) array of predictor variables (Ns samples, d dimensions) for regression x: ndarray ndarray of (x, y) points. Needs to be a (Ns, 2) array corresponding to the lon/lat, for example. array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging Returns ------- pred: ndarray The expected value of ys for the query inputs, of shape (Ns,). """ return self.krige_residual(x) + self.regression_model.predict(p)
def function[predict, parameter[self, p, x]]: constant[ Parameters ---------- p: ndarray (Ns, d) array of predictor variables (Ns samples, d dimensions) for regression x: ndarray ndarray of (x, y) points. Needs to be a (Ns, 2) array corresponding to the lon/lat, for example. array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging Returns ------- pred: ndarray The expected value of ys for the query inputs, of shape (Ns,). ] return[binary_operation[call[name[self].krige_residual, parameter[name[x]]] + call[name[self].regression_model.predict, parameter[name[p]]]]]
keyword[def] identifier[predict] ( identifier[self] , identifier[p] , identifier[x] ): literal[string] keyword[return] identifier[self] . identifier[krige_residual] ( identifier[x] )+ identifier[self] . identifier[regression_model] . identifier[predict] ( identifier[p] )
def predict(self, p, x): """ Parameters ---------- p: ndarray (Ns, d) array of predictor variables (Ns samples, d dimensions) for regression x: ndarray ndarray of (x, y) points. Needs to be a (Ns, 2) array corresponding to the lon/lat, for example. array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging Returns ------- pred: ndarray The expected value of ys for the query inputs, of shape (Ns,). """ return self.krige_residual(x) + self.regression_model.predict(p)
def _pi_id(self): """Try to detect id of a Raspberry Pi.""" # Check for Pi boards: pi_rev_code = self._pi_rev_code() if pi_rev_code: for model, codes in _PI_REV_CODES.items(): if pi_rev_code in codes: return model return None
def function[_pi_id, parameter[self]]: constant[Try to detect id of a Raspberry Pi.] variable[pi_rev_code] assign[=] call[name[self]._pi_rev_code, parameter[]] if name[pi_rev_code] begin[:] for taget[tuple[[<ast.Name object at 0x7da18bccaec0>, <ast.Name object at 0x7da18bccb8e0>]]] in starred[call[name[_PI_REV_CODES].items, parameter[]]] begin[:] if compare[name[pi_rev_code] in name[codes]] begin[:] return[name[model]] return[constant[None]]
keyword[def] identifier[_pi_id] ( identifier[self] ): literal[string] identifier[pi_rev_code] = identifier[self] . identifier[_pi_rev_code] () keyword[if] identifier[pi_rev_code] : keyword[for] identifier[model] , identifier[codes] keyword[in] identifier[_PI_REV_CODES] . identifier[items] (): keyword[if] identifier[pi_rev_code] keyword[in] identifier[codes] : keyword[return] identifier[model] keyword[return] keyword[None]
def _pi_id(self): """Try to detect id of a Raspberry Pi.""" # Check for Pi boards: pi_rev_code = self._pi_rev_code() if pi_rev_code: for (model, codes) in _PI_REV_CODES.items(): if pi_rev_code in codes: return model # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] return None
def connect_cloudwatch(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.ec2.cloudwatch.CloudWatchConnection` :return: A connection to Amazon's EC2 Monitoring service """ from boto.ec2.cloudwatch import CloudWatchConnection return CloudWatchConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def function[connect_cloudwatch, parameter[aws_access_key_id, aws_secret_access_key]]: constant[ :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.ec2.cloudwatch.CloudWatchConnection` :return: A connection to Amazon's EC2 Monitoring service ] from relative_module[boto.ec2.cloudwatch] import module[CloudWatchConnection] return[call[name[CloudWatchConnection], parameter[name[aws_access_key_id], name[aws_secret_access_key]]]]
keyword[def] identifier[connect_cloudwatch] ( identifier[aws_access_key_id] = keyword[None] , identifier[aws_secret_access_key] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[from] identifier[boto] . identifier[ec2] . identifier[cloudwatch] keyword[import] identifier[CloudWatchConnection] keyword[return] identifier[CloudWatchConnection] ( identifier[aws_access_key_id] , identifier[aws_secret_access_key] ,** identifier[kwargs] )
def connect_cloudwatch(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.ec2.cloudwatch.CloudWatchConnection` :return: A connection to Amazon's EC2 Monitoring service """ from boto.ec2.cloudwatch import CloudWatchConnection return CloudWatchConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def fullversion(): ''' Return all server information from catalina.sh version CLI Example: .. code-block:: bash salt '*' tomcat.fullversion ''' cmd = __catalina_home() + '/bin/catalina.sh version' ret = {} out = __salt__['cmd.run'](cmd).splitlines() for line in out: if not line: continue if ': ' in line: comps = line.split(': ') ret[comps[0]] = comps[1].lstrip() return ret
def function[fullversion, parameter[]]: constant[ Return all server information from catalina.sh version CLI Example: .. code-block:: bash salt '*' tomcat.fullversion ] variable[cmd] assign[=] binary_operation[call[name[__catalina_home], parameter[]] + constant[/bin/catalina.sh version]] variable[ret] assign[=] dictionary[[], []] variable[out] assign[=] call[call[call[name[__salt__]][constant[cmd.run]], parameter[name[cmd]]].splitlines, parameter[]] for taget[name[line]] in starred[name[out]] begin[:] if <ast.UnaryOp object at 0x7da204345330> begin[:] continue if compare[constant[: ] in name[line]] begin[:] variable[comps] assign[=] call[name[line].split, parameter[constant[: ]]] call[name[ret]][call[name[comps]][constant[0]]] assign[=] call[call[name[comps]][constant[1]].lstrip, parameter[]] return[name[ret]]
keyword[def] identifier[fullversion] (): literal[string] identifier[cmd] = identifier[__catalina_home] ()+ literal[string] identifier[ret] ={} identifier[out] = identifier[__salt__] [ literal[string] ]( identifier[cmd] ). identifier[splitlines] () keyword[for] identifier[line] keyword[in] identifier[out] : keyword[if] keyword[not] identifier[line] : keyword[continue] keyword[if] literal[string] keyword[in] identifier[line] : identifier[comps] = identifier[line] . identifier[split] ( literal[string] ) identifier[ret] [ identifier[comps] [ literal[int] ]]= identifier[comps] [ literal[int] ]. identifier[lstrip] () keyword[return] identifier[ret]
def fullversion(): """ Return all server information from catalina.sh version CLI Example: .. code-block:: bash salt '*' tomcat.fullversion """ cmd = __catalina_home() + '/bin/catalina.sh version' ret = {} out = __salt__['cmd.run'](cmd).splitlines() for line in out: if not line: continue # depends on [control=['if'], data=[]] if ': ' in line: comps = line.split(': ') ret[comps[0]] = comps[1].lstrip() # depends on [control=['if'], data=['line']] # depends on [control=['for'], data=['line']] return ret
def _fake_deleteclass(self, namespace, **params): """ Implements a mock server responder for :meth:`~pywbem.WBEMConnection.DeleteClass` Delete a class in the class repository if it exists. Emulates WBEMConnection.DeleteClass(...)) This is simplistic in that it ignores issues like existing subclasses and existence of instances. Nothing is returned. Raises: CIMError: CIM_ERR_NOT_FOUND if ClassName defines class not in repository """ # Validate namespace class_repo = self._get_class_repo(namespace) cname = params['ClassName'].classname try: class_repo[cname] except KeyError: raise CIMError( CIM_ERR_NOT_FOUND, _format("Class {0!A} in namespace {1!A} not in repository. " "Nothing deleted.", cname, namespace)) classnames = self._get_subclass_names(cname, namespace, True) classnames.append(cname) # delete all instances in this class and subclasses and delete # this class and subclasses for clname in classnames: if self.instances: inst_names = self.EnumerateInstanceNames(clname, namespace) for iname in inst_names: self.DeleteInstance(iname) del class_repo[clname]
def function[_fake_deleteclass, parameter[self, namespace]]: constant[ Implements a mock server responder for :meth:`~pywbem.WBEMConnection.DeleteClass` Delete a class in the class repository if it exists. Emulates WBEMConnection.DeleteClass(...)) This is simplistic in that it ignores issues like existing subclasses and existence of instances. Nothing is returned. Raises: CIMError: CIM_ERR_NOT_FOUND if ClassName defines class not in repository ] variable[class_repo] assign[=] call[name[self]._get_class_repo, parameter[name[namespace]]] variable[cname] assign[=] call[name[params]][constant[ClassName]].classname <ast.Try object at 0x7da20c6c4760> variable[classnames] assign[=] call[name[self]._get_subclass_names, parameter[name[cname], name[namespace], constant[True]]] call[name[classnames].append, parameter[name[cname]]] for taget[name[clname]] in starred[name[classnames]] begin[:] if name[self].instances begin[:] variable[inst_names] assign[=] call[name[self].EnumerateInstanceNames, parameter[name[clname], name[namespace]]] for taget[name[iname]] in starred[name[inst_names]] begin[:] call[name[self].DeleteInstance, parameter[name[iname]]] <ast.Delete object at 0x7da20c6c74f0>
keyword[def] identifier[_fake_deleteclass] ( identifier[self] , identifier[namespace] ,** identifier[params] ): literal[string] identifier[class_repo] = identifier[self] . identifier[_get_class_repo] ( identifier[namespace] ) identifier[cname] = identifier[params] [ literal[string] ]. identifier[classname] keyword[try] : identifier[class_repo] [ identifier[cname] ] keyword[except] identifier[KeyError] : keyword[raise] identifier[CIMError] ( identifier[CIM_ERR_NOT_FOUND] , identifier[_format] ( literal[string] literal[string] , identifier[cname] , identifier[namespace] )) identifier[classnames] = identifier[self] . identifier[_get_subclass_names] ( identifier[cname] , identifier[namespace] , keyword[True] ) identifier[classnames] . identifier[append] ( identifier[cname] ) keyword[for] identifier[clname] keyword[in] identifier[classnames] : keyword[if] identifier[self] . identifier[instances] : identifier[inst_names] = identifier[self] . identifier[EnumerateInstanceNames] ( identifier[clname] , identifier[namespace] ) keyword[for] identifier[iname] keyword[in] identifier[inst_names] : identifier[self] . identifier[DeleteInstance] ( identifier[iname] ) keyword[del] identifier[class_repo] [ identifier[clname] ]
def _fake_deleteclass(self, namespace, **params): """ Implements a mock server responder for :meth:`~pywbem.WBEMConnection.DeleteClass` Delete a class in the class repository if it exists. Emulates WBEMConnection.DeleteClass(...)) This is simplistic in that it ignores issues like existing subclasses and existence of instances. Nothing is returned. Raises: CIMError: CIM_ERR_NOT_FOUND if ClassName defines class not in repository """ # Validate namespace class_repo = self._get_class_repo(namespace) cname = params['ClassName'].classname try: class_repo[cname] # depends on [control=['try'], data=[]] except KeyError: raise CIMError(CIM_ERR_NOT_FOUND, _format('Class {0!A} in namespace {1!A} not in repository. Nothing deleted.', cname, namespace)) # depends on [control=['except'], data=[]] classnames = self._get_subclass_names(cname, namespace, True) classnames.append(cname) # delete all instances in this class and subclasses and delete # this class and subclasses for clname in classnames: if self.instances: inst_names = self.EnumerateInstanceNames(clname, namespace) for iname in inst_names: self.DeleteInstance(iname) # depends on [control=['for'], data=['iname']] # depends on [control=['if'], data=[]] del class_repo[clname] # depends on [control=['for'], data=['clname']]
def tplot_options(option, value): """ This function allows the user to set several global options for the generated plots. Parameters: option : str The name of the option. See section below value : str/int/float/list The value of the option. See section below. Options: ============ ========== ===== Options Value type Notes ============ ========== ===== title str Title of the the entire output title_size int Font size of the output wsize [int, int] [height, width], pixel size of the plot window title_align int Offset position in pixels of the title var_label srt Name of the tplot variable to be used as another x axis alt_range [flt, flt] The min and max altitude to be plotted on all alt plots map_x_range [int, int] The min and max longitude to be plotted on all map plots map_y_range [int, int] The min and max latitude to be plotted on all map plots x_range [flt, flt] The min and max x_range (usually time) to be plotted on all Spec/1D plots data_gap int Number of seconds with consecutive nan values allowed before no interp should occur crosshair bool Option allowing crosshairs and crosshair legend roi [str, str] Times between which there's a region of interest for a user ============ ========== ===== Returns: None Examples: >>> # Set the plot title >>> import pytplot >>> pytplot.tplot_options('title', 'SWEA Data for Orbit 1563') >>> # Set the window size >>> pytplot.tplot_options('wsize', [1000,500]) """ option = option.lower() temp = tplot_utilities.set_tplot_options(option, value, pytplot.tplot_opt_glob) pytplot.tplot_opt_glob = temp return
def function[tplot_options, parameter[option, value]]: constant[ This function allows the user to set several global options for the generated plots. Parameters: option : str The name of the option. See section below value : str/int/float/list The value of the option. See section below. Options: ============ ========== ===== Options Value type Notes ============ ========== ===== title str Title of the the entire output title_size int Font size of the output wsize [int, int] [height, width], pixel size of the plot window title_align int Offset position in pixels of the title var_label srt Name of the tplot variable to be used as another x axis alt_range [flt, flt] The min and max altitude to be plotted on all alt plots map_x_range [int, int] The min and max longitude to be plotted on all map plots map_y_range [int, int] The min and max latitude to be plotted on all map plots x_range [flt, flt] The min and max x_range (usually time) to be plotted on all Spec/1D plots data_gap int Number of seconds with consecutive nan values allowed before no interp should occur crosshair bool Option allowing crosshairs and crosshair legend roi [str, str] Times between which there's a region of interest for a user ============ ========== ===== Returns: None Examples: >>> # Set the plot title >>> import pytplot >>> pytplot.tplot_options('title', 'SWEA Data for Orbit 1563') >>> # Set the window size >>> pytplot.tplot_options('wsize', [1000,500]) ] variable[option] assign[=] call[name[option].lower, parameter[]] variable[temp] assign[=] call[name[tplot_utilities].set_tplot_options, parameter[name[option], name[value], name[pytplot].tplot_opt_glob]] name[pytplot].tplot_opt_glob assign[=] name[temp] return[None]
keyword[def] identifier[tplot_options] ( identifier[option] , identifier[value] ): literal[string] identifier[option] = identifier[option] . identifier[lower] () identifier[temp] = identifier[tplot_utilities] . identifier[set_tplot_options] ( identifier[option] , identifier[value] , identifier[pytplot] . identifier[tplot_opt_glob] ) identifier[pytplot] . identifier[tplot_opt_glob] = identifier[temp] keyword[return]
def tplot_options(option, value): """ This function allows the user to set several global options for the generated plots. Parameters: option : str The name of the option. See section below value : str/int/float/list The value of the option. See section below. Options: ============ ========== ===== Options Value type Notes ============ ========== ===== title str Title of the the entire output title_size int Font size of the output wsize [int, int] [height, width], pixel size of the plot window title_align int Offset position in pixels of the title var_label srt Name of the tplot variable to be used as another x axis alt_range [flt, flt] The min and max altitude to be plotted on all alt plots map_x_range [int, int] The min and max longitude to be plotted on all map plots map_y_range [int, int] The min and max latitude to be plotted on all map plots x_range [flt, flt] The min and max x_range (usually time) to be plotted on all Spec/1D plots data_gap int Number of seconds with consecutive nan values allowed before no interp should occur crosshair bool Option allowing crosshairs and crosshair legend roi [str, str] Times between which there's a region of interest for a user ============ ========== ===== Returns: None Examples: >>> # Set the plot title >>> import pytplot >>> pytplot.tplot_options('title', 'SWEA Data for Orbit 1563') >>> # Set the window size >>> pytplot.tplot_options('wsize', [1000,500]) """ option = option.lower() temp = tplot_utilities.set_tplot_options(option, value, pytplot.tplot_opt_glob) pytplot.tplot_opt_glob = temp return
def config_program_reqs(cls, programs): """Run the program tester and determine if we can do anything.""" cls._set_program_defaults(programs) do_png = cls.optipng or cls.pngout or cls.advpng do_jpeg = cls.mozjpeg or cls.jpegrescan or cls.jpegtran do_comics = cls.comics if not do_png and not do_jpeg and not do_comics: print("All optimizers are not available or disabled.") exit(1)
def function[config_program_reqs, parameter[cls, programs]]: constant[Run the program tester and determine if we can do anything.] call[name[cls]._set_program_defaults, parameter[name[programs]]] variable[do_png] assign[=] <ast.BoolOp object at 0x7da207f01720> variable[do_jpeg] assign[=] <ast.BoolOp object at 0x7da207f01570> variable[do_comics] assign[=] name[cls].comics if <ast.BoolOp object at 0x7da207f02bc0> begin[:] call[name[print], parameter[constant[All optimizers are not available or disabled.]]] call[name[exit], parameter[constant[1]]]
keyword[def] identifier[config_program_reqs] ( identifier[cls] , identifier[programs] ): literal[string] identifier[cls] . identifier[_set_program_defaults] ( identifier[programs] ) identifier[do_png] = identifier[cls] . identifier[optipng] keyword[or] identifier[cls] . identifier[pngout] keyword[or] identifier[cls] . identifier[advpng] identifier[do_jpeg] = identifier[cls] . identifier[mozjpeg] keyword[or] identifier[cls] . identifier[jpegrescan] keyword[or] identifier[cls] . identifier[jpegtran] identifier[do_comics] = identifier[cls] . identifier[comics] keyword[if] keyword[not] identifier[do_png] keyword[and] keyword[not] identifier[do_jpeg] keyword[and] keyword[not] identifier[do_comics] : identifier[print] ( literal[string] ) identifier[exit] ( literal[int] )
def config_program_reqs(cls, programs): """Run the program tester and determine if we can do anything.""" cls._set_program_defaults(programs) do_png = cls.optipng or cls.pngout or cls.advpng do_jpeg = cls.mozjpeg or cls.jpegrescan or cls.jpegtran do_comics = cls.comics if not do_png and (not do_jpeg) and (not do_comics): print('All optimizers are not available or disabled.') exit(1) # depends on [control=['if'], data=[]]
def unbind(self, binding): """ Unbind the instance Args: binding (AtlasServiceBinding.Binding): Existing or New binding """ username = self.backend.config.generate_binding_username(binding) try: self.backend.atlas.DatabaseUsers.delete_a_database_user(username) except ErrAtlasNotFound: # The user does not exist. This is not an issue because this is possible that we # removed it in a previous call that failed later on the broker. # This cover a manually deleted user case too. pass self.backend.storage.remove(binding)
def function[unbind, parameter[self, binding]]: constant[ Unbind the instance Args: binding (AtlasServiceBinding.Binding): Existing or New binding ] variable[username] assign[=] call[name[self].backend.config.generate_binding_username, parameter[name[binding]]] <ast.Try object at 0x7da1b2884df0> call[name[self].backend.storage.remove, parameter[name[binding]]]
keyword[def] identifier[unbind] ( identifier[self] , identifier[binding] ): literal[string] identifier[username] = identifier[self] . identifier[backend] . identifier[config] . identifier[generate_binding_username] ( identifier[binding] ) keyword[try] : identifier[self] . identifier[backend] . identifier[atlas] . identifier[DatabaseUsers] . identifier[delete_a_database_user] ( identifier[username] ) keyword[except] identifier[ErrAtlasNotFound] : keyword[pass] identifier[self] . identifier[backend] . identifier[storage] . identifier[remove] ( identifier[binding] )
def unbind(self, binding): """ Unbind the instance Args: binding (AtlasServiceBinding.Binding): Existing or New binding """ username = self.backend.config.generate_binding_username(binding) try: self.backend.atlas.DatabaseUsers.delete_a_database_user(username) # depends on [control=['try'], data=[]] except ErrAtlasNotFound: # The user does not exist. This is not an issue because this is possible that we # removed it in a previous call that failed later on the broker. # This cover a manually deleted user case too. pass # depends on [control=['except'], data=[]] self.backend.storage.remove(binding)
def create_file_in_fs(file_data, file_name, file_system, static_dir): """ Writes file in specific file system. Arguments: file_data (str): Data to store into the file. file_name (str): File name of the file to be created. file_system (OSFS): Import file system. static_dir (str): The Directory to retrieve transcript file. """ with file_system.open(combine(static_dir, file_name), 'wb') as f: f.write(file_data.encode('utf-8'))
def function[create_file_in_fs, parameter[file_data, file_name, file_system, static_dir]]: constant[ Writes file in specific file system. Arguments: file_data (str): Data to store into the file. file_name (str): File name of the file to be created. file_system (OSFS): Import file system. static_dir (str): The Directory to retrieve transcript file. ] with call[name[file_system].open, parameter[call[name[combine], parameter[name[static_dir], name[file_name]]], constant[wb]]] begin[:] call[name[f].write, parameter[call[name[file_data].encode, parameter[constant[utf-8]]]]]
keyword[def] identifier[create_file_in_fs] ( identifier[file_data] , identifier[file_name] , identifier[file_system] , identifier[static_dir] ): literal[string] keyword[with] identifier[file_system] . identifier[open] ( identifier[combine] ( identifier[static_dir] , identifier[file_name] ), literal[string] ) keyword[as] identifier[f] : identifier[f] . identifier[write] ( identifier[file_data] . identifier[encode] ( literal[string] ))
def create_file_in_fs(file_data, file_name, file_system, static_dir): """ Writes file in specific file system. Arguments: file_data (str): Data to store into the file. file_name (str): File name of the file to be created. file_system (OSFS): Import file system. static_dir (str): The Directory to retrieve transcript file. """ with file_system.open(combine(static_dir, file_name), 'wb') as f: f.write(file_data.encode('utf-8')) # depends on [control=['with'], data=['f']]
def set_group_anonymous(self, *, group_id, enable=True): """ 群组匿名 ------------ :param int group_id: 群号 :param bool enable: 是否允许匿名聊天 :return: None :rtype: None """ return super().__getattr__('set_group_anonymous') \ (group_id=group_id, enable=enable)
def function[set_group_anonymous, parameter[self]]: constant[ 群组匿名 ------------ :param int group_id: 群号 :param bool enable: 是否允许匿名聊天 :return: None :rtype: None ] return[call[call[call[name[super], parameter[]].__getattr__, parameter[constant[set_group_anonymous]]], parameter[]]]
keyword[def] identifier[set_group_anonymous] ( identifier[self] ,*, identifier[group_id] , identifier[enable] = keyword[True] ): literal[string] keyword[return] identifier[super] (). identifier[__getattr__] ( literal[string] )( identifier[group_id] = identifier[group_id] , identifier[enable] = identifier[enable] )
def set_group_anonymous(self, *, group_id, enable=True): """ 群组匿名 ------------ :param int group_id: 群号 :param bool enable: 是否允许匿名聊天 :return: None :rtype: None """ return super().__getattr__('set_group_anonymous')(group_id=group_id, enable=enable)
def urn(self, value: Union[URN, str]): """ Set the urn :param value: URN to be saved :raises: *TypeError* when the value is not URN compatible """ if isinstance(value, str): value = URN(value) elif not isinstance(value, URN): raise TypeError("New urn must be string or {} instead of {}".format(type(URN), type(value))) self._urn = value
def function[urn, parameter[self, value]]: constant[ Set the urn :param value: URN to be saved :raises: *TypeError* when the value is not URN compatible ] if call[name[isinstance], parameter[name[value], name[str]]] begin[:] variable[value] assign[=] call[name[URN], parameter[name[value]]] name[self]._urn assign[=] name[value]
keyword[def] identifier[urn] ( identifier[self] , identifier[value] : identifier[Union] [ identifier[URN] , identifier[str] ]): literal[string] keyword[if] identifier[isinstance] ( identifier[value] , identifier[str] ): identifier[value] = identifier[URN] ( identifier[value] ) keyword[elif] keyword[not] identifier[isinstance] ( identifier[value] , identifier[URN] ): keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[type] ( identifier[URN] ), identifier[type] ( identifier[value] ))) identifier[self] . identifier[_urn] = identifier[value]
def urn(self, value: Union[URN, str]): """ Set the urn :param value: URN to be saved :raises: *TypeError* when the value is not URN compatible """ if isinstance(value, str): value = URN(value) # depends on [control=['if'], data=[]] elif not isinstance(value, URN): raise TypeError('New urn must be string or {} instead of {}'.format(type(URN), type(value))) # depends on [control=['if'], data=[]] self._urn = value
def request(self, method: str, url: StrOrURL, **kwargs: Any) -> '_RequestContextManager': """Perform HTTP request.""" return _RequestContextManager(self._request(method, url, **kwargs))
def function[request, parameter[self, method, url]]: constant[Perform HTTP request.] return[call[name[_RequestContextManager], parameter[call[name[self]._request, parameter[name[method], name[url]]]]]]
keyword[def] identifier[request] ( identifier[self] , identifier[method] : identifier[str] , identifier[url] : identifier[StrOrURL] , ** identifier[kwargs] : identifier[Any] )-> literal[string] : literal[string] keyword[return] identifier[_RequestContextManager] ( identifier[self] . identifier[_request] ( identifier[method] , identifier[url] ,** identifier[kwargs] ))
def request(self, method: str, url: StrOrURL, **kwargs: Any) -> '_RequestContextManager': """Perform HTTP request.""" return _RequestContextManager(self._request(method, url, **kwargs))
def _run_markdownlint(matched_filenames, show_lint_files): """Run markdownlint on matched_filenames.""" from prospector.message import Message, Location for filename in matched_filenames: _debug_linter_status("mdl", filename, show_lint_files) try: proc = subprocess.Popen(["mdl"] + matched_filenames, stdout=subprocess.PIPE, stderr=subprocess.PIPE) lines = proc.communicate()[0].decode().splitlines() except OSError as error: if error.errno == errno.ENOENT: return [] lines = [ re.match(r"([\w\-.\/\\ ]+)\:([0-9]+)\: (\w+) (.+)", l).groups(1) for l in lines ] return_dict = dict() for filename, lineno, code, msg in lines: key = _Key(filename, int(lineno), code) loc = Location(filename, None, None, int(lineno), 0) return_dict[key] = Message("markdownlint", code, loc, msg) return return_dict
def function[_run_markdownlint, parameter[matched_filenames, show_lint_files]]: constant[Run markdownlint on matched_filenames.] from relative_module[prospector.message] import module[Message], module[Location] for taget[name[filename]] in starred[name[matched_filenames]] begin[:] call[name[_debug_linter_status], parameter[constant[mdl], name[filename], name[show_lint_files]]] <ast.Try object at 0x7da20e955f00> variable[lines] assign[=] <ast.ListComp object at 0x7da20e9569e0> variable[return_dict] assign[=] call[name[dict], parameter[]] for taget[tuple[[<ast.Name object at 0x7da18f720400>, <ast.Name object at 0x7da18f721b10>, <ast.Name object at 0x7da18f722050>, <ast.Name object at 0x7da18f720340>]]] in starred[name[lines]] begin[:] variable[key] assign[=] call[name[_Key], parameter[name[filename], call[name[int], parameter[name[lineno]]], name[code]]] variable[loc] assign[=] call[name[Location], parameter[name[filename], constant[None], constant[None], call[name[int], parameter[name[lineno]]], constant[0]]] call[name[return_dict]][name[key]] assign[=] call[name[Message], parameter[constant[markdownlint], name[code], name[loc], name[msg]]] return[name[return_dict]]
keyword[def] identifier[_run_markdownlint] ( identifier[matched_filenames] , identifier[show_lint_files] ): literal[string] keyword[from] identifier[prospector] . identifier[message] keyword[import] identifier[Message] , identifier[Location] keyword[for] identifier[filename] keyword[in] identifier[matched_filenames] : identifier[_debug_linter_status] ( literal[string] , identifier[filename] , identifier[show_lint_files] ) keyword[try] : identifier[proc] = identifier[subprocess] . identifier[Popen] ([ literal[string] ]+ identifier[matched_filenames] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] , identifier[stderr] = identifier[subprocess] . identifier[PIPE] ) identifier[lines] = identifier[proc] . identifier[communicate] ()[ literal[int] ]. identifier[decode] (). identifier[splitlines] () keyword[except] identifier[OSError] keyword[as] identifier[error] : keyword[if] identifier[error] . identifier[errno] == identifier[errno] . identifier[ENOENT] : keyword[return] [] identifier[lines] =[ identifier[re] . identifier[match] ( literal[string] , identifier[l] ). identifier[groups] ( literal[int] ) keyword[for] identifier[l] keyword[in] identifier[lines] ] identifier[return_dict] = identifier[dict] () keyword[for] identifier[filename] , identifier[lineno] , identifier[code] , identifier[msg] keyword[in] identifier[lines] : identifier[key] = identifier[_Key] ( identifier[filename] , identifier[int] ( identifier[lineno] ), identifier[code] ) identifier[loc] = identifier[Location] ( identifier[filename] , keyword[None] , keyword[None] , identifier[int] ( identifier[lineno] ), literal[int] ) identifier[return_dict] [ identifier[key] ]= identifier[Message] ( literal[string] , identifier[code] , identifier[loc] , identifier[msg] ) keyword[return] identifier[return_dict]
def _run_markdownlint(matched_filenames, show_lint_files): """Run markdownlint on matched_filenames.""" from prospector.message import Message, Location for filename in matched_filenames: _debug_linter_status('mdl', filename, show_lint_files) # depends on [control=['for'], data=['filename']] try: proc = subprocess.Popen(['mdl'] + matched_filenames, stdout=subprocess.PIPE, stderr=subprocess.PIPE) lines = proc.communicate()[0].decode().splitlines() # depends on [control=['try'], data=[]] except OSError as error: if error.errno == errno.ENOENT: return [] # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['error']] lines = [re.match('([\\w\\-.\\/\\\\ ]+)\\:([0-9]+)\\: (\\w+) (.+)', l).groups(1) for l in lines] return_dict = dict() for (filename, lineno, code, msg) in lines: key = _Key(filename, int(lineno), code) loc = Location(filename, None, None, int(lineno), 0) return_dict[key] = Message('markdownlint', code, loc, msg) # depends on [control=['for'], data=[]] return return_dict
def bisect_right(a, x, lo=0, hi=None): """Return the index where to insert item x in list a, assuming a is sorted. The return value i is such that all e in a[:i] have e <= x, and all e in a[i:] have e > x. So if x already appears in the list, a.insert(x) will insert just after the rightmost x already there. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. """ if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(a) while lo < hi: mid = (lo+hi)//2 if x < a[mid]: hi = mid else: lo = mid+1 return lo
def function[bisect_right, parameter[a, x, lo, hi]]: constant[Return the index where to insert item x in list a, assuming a is sorted. The return value i is such that all e in a[:i] have e <= x, and all e in a[i:] have e > x. So if x already appears in the list, a.insert(x) will insert just after the rightmost x already there. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. ] if compare[name[lo] less[<] constant[0]] begin[:] <ast.Raise object at 0x7da20c796920> if compare[name[hi] is constant[None]] begin[:] variable[hi] assign[=] call[name[len], parameter[name[a]]] while compare[name[lo] less[<] name[hi]] begin[:] variable[mid] assign[=] binary_operation[binary_operation[name[lo] + name[hi]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]] if compare[name[x] less[<] call[name[a]][name[mid]]] begin[:] variable[hi] assign[=] name[mid] return[name[lo]]
keyword[def] identifier[bisect_right] ( identifier[a] , identifier[x] , identifier[lo] = literal[int] , identifier[hi] = keyword[None] ): literal[string] keyword[if] identifier[lo] < literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[hi] keyword[is] keyword[None] : identifier[hi] = identifier[len] ( identifier[a] ) keyword[while] identifier[lo] < identifier[hi] : identifier[mid] =( identifier[lo] + identifier[hi] )// literal[int] keyword[if] identifier[x] < identifier[a] [ identifier[mid] ]: identifier[hi] = identifier[mid] keyword[else] : identifier[lo] = identifier[mid] + literal[int] keyword[return] identifier[lo]
def bisect_right(a, x, lo=0, hi=None): """Return the index where to insert item x in list a, assuming a is sorted. The return value i is such that all e in a[:i] have e <= x, and all e in a[i:] have e > x. So if x already appears in the list, a.insert(x) will insert just after the rightmost x already there. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. """ if lo < 0: raise ValueError('lo must be non-negative') # depends on [control=['if'], data=[]] if hi is None: hi = len(a) # depends on [control=['if'], data=['hi']] while lo < hi: mid = (lo + hi) // 2 if x < a[mid]: hi = mid # depends on [control=['if'], data=[]] else: lo = mid + 1 # depends on [control=['while'], data=['lo', 'hi']] return lo
def data_from_query(self, cmd): """ Callback for .execute_command() for DELETE/GET/HEAD requests """ res = None ckey = "%s /%s" % (self.command, cmd) if not isinstance(self._query_params, dict): self._query_params = {} if ckey in _NCMD: self._cmd = _NCMD[ckey] else: for key in sorted(_RCMD, key=len, reverse=True): if not key.startswith("%s " % self.command): continue m = _RCMD[key].name.match(cmd) if m: self._cmd = _RCMD[key] self._query_params.update(m.groupdict()) break try: if not self._cmd: raise self.req_error(404) if not self._cmd.to_log: self._to_log = False if self._cmd.to_auth: self.authenticate(self._cmd.auth_users) if self._cmd.static: if self._cmd.handler: res = self._cmd.handler(self) return self.static_file(cmd, res) res = self._cmd.handler(self) if not isinstance(res, HttpResponse): return self.response_dumps(res) return res finally: self._query_params = {}
def function[data_from_query, parameter[self, cmd]]: constant[ Callback for .execute_command() for DELETE/GET/HEAD requests ] variable[res] assign[=] constant[None] variable[ckey] assign[=] binary_operation[constant[%s /%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18f00fa90>, <ast.Name object at 0x7da18f00fe20>]]] if <ast.UnaryOp object at 0x7da18f00ddb0> begin[:] name[self]._query_params assign[=] dictionary[[], []] if compare[name[ckey] in name[_NCMD]] begin[:] name[self]._cmd assign[=] call[name[_NCMD]][name[ckey]] <ast.Try object at 0x7da18dc07e50>
keyword[def] identifier[data_from_query] ( identifier[self] , identifier[cmd] ): literal[string] identifier[res] = keyword[None] identifier[ckey] = literal[string] %( identifier[self] . identifier[command] , identifier[cmd] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[_query_params] , identifier[dict] ): identifier[self] . identifier[_query_params] ={} keyword[if] identifier[ckey] keyword[in] identifier[_NCMD] : identifier[self] . identifier[_cmd] = identifier[_NCMD] [ identifier[ckey] ] keyword[else] : keyword[for] identifier[key] keyword[in] identifier[sorted] ( identifier[_RCMD] , identifier[key] = identifier[len] , identifier[reverse] = keyword[True] ): keyword[if] keyword[not] identifier[key] . identifier[startswith] ( literal[string] % identifier[self] . identifier[command] ): keyword[continue] identifier[m] = identifier[_RCMD] [ identifier[key] ]. identifier[name] . identifier[match] ( identifier[cmd] ) keyword[if] identifier[m] : identifier[self] . identifier[_cmd] = identifier[_RCMD] [ identifier[key] ] identifier[self] . identifier[_query_params] . identifier[update] ( identifier[m] . identifier[groupdict] ()) keyword[break] keyword[try] : keyword[if] keyword[not] identifier[self] . identifier[_cmd] : keyword[raise] identifier[self] . identifier[req_error] ( literal[int] ) keyword[if] keyword[not] identifier[self] . identifier[_cmd] . identifier[to_log] : identifier[self] . identifier[_to_log] = keyword[False] keyword[if] identifier[self] . identifier[_cmd] . identifier[to_auth] : identifier[self] . identifier[authenticate] ( identifier[self] . identifier[_cmd] . identifier[auth_users] ) keyword[if] identifier[self] . identifier[_cmd] . identifier[static] : keyword[if] identifier[self] . identifier[_cmd] . identifier[handler] : identifier[res] = identifier[self] . identifier[_cmd] . identifier[handler] ( identifier[self] ) keyword[return] identifier[self] . identifier[static_file] ( identifier[cmd] , identifier[res] ) identifier[res] = identifier[self] . identifier[_cmd] . identifier[handler] ( identifier[self] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[res] , identifier[HttpResponse] ): keyword[return] identifier[self] . identifier[response_dumps] ( identifier[res] ) keyword[return] identifier[res] keyword[finally] : identifier[self] . identifier[_query_params] ={}
def data_from_query(self, cmd): """ Callback for .execute_command() for DELETE/GET/HEAD requests """ res = None ckey = '%s /%s' % (self.command, cmd) if not isinstance(self._query_params, dict): self._query_params = {} # depends on [control=['if'], data=[]] if ckey in _NCMD: self._cmd = _NCMD[ckey] # depends on [control=['if'], data=['ckey', '_NCMD']] else: for key in sorted(_RCMD, key=len, reverse=True): if not key.startswith('%s ' % self.command): continue # depends on [control=['if'], data=[]] m = _RCMD[key].name.match(cmd) if m: self._cmd = _RCMD[key] self._query_params.update(m.groupdict()) break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] try: if not self._cmd: raise self.req_error(404) # depends on [control=['if'], data=[]] if not self._cmd.to_log: self._to_log = False # depends on [control=['if'], data=[]] if self._cmd.to_auth: self.authenticate(self._cmd.auth_users) # depends on [control=['if'], data=[]] if self._cmd.static: if self._cmd.handler: res = self._cmd.handler(self) # depends on [control=['if'], data=[]] return self.static_file(cmd, res) # depends on [control=['if'], data=[]] res = self._cmd.handler(self) if not isinstance(res, HttpResponse): return self.response_dumps(res) # depends on [control=['if'], data=[]] return res # depends on [control=['try'], data=[]] finally: self._query_params = {}
def add(self, leaf): """ Add the leaf (transaction) to the log and the merkle tree. Note: Currently data is serialised same way for inserting it in the log as well as the merkle tree, only difference is the tree needs binary data to the textual (utf-8) representation is converted to bytes. """ # Serializing here to avoid serialisation in `_addToStore` and # `_addToTree` serz_leaf = self.serialize_for_txn_log(leaf) self._addToStore(serz_leaf, serialized=True) serz_leaf_for_tree = self.serialize_for_tree(leaf) merkle_info = self._addToTree(serz_leaf_for_tree, serialized=True) return merkle_info
def function[add, parameter[self, leaf]]: constant[ Add the leaf (transaction) to the log and the merkle tree. Note: Currently data is serialised same way for inserting it in the log as well as the merkle tree, only difference is the tree needs binary data to the textual (utf-8) representation is converted to bytes. ] variable[serz_leaf] assign[=] call[name[self].serialize_for_txn_log, parameter[name[leaf]]] call[name[self]._addToStore, parameter[name[serz_leaf]]] variable[serz_leaf_for_tree] assign[=] call[name[self].serialize_for_tree, parameter[name[leaf]]] variable[merkle_info] assign[=] call[name[self]._addToTree, parameter[name[serz_leaf_for_tree]]] return[name[merkle_info]]
keyword[def] identifier[add] ( identifier[self] , identifier[leaf] ): literal[string] identifier[serz_leaf] = identifier[self] . identifier[serialize_for_txn_log] ( identifier[leaf] ) identifier[self] . identifier[_addToStore] ( identifier[serz_leaf] , identifier[serialized] = keyword[True] ) identifier[serz_leaf_for_tree] = identifier[self] . identifier[serialize_for_tree] ( identifier[leaf] ) identifier[merkle_info] = identifier[self] . identifier[_addToTree] ( identifier[serz_leaf_for_tree] , identifier[serialized] = keyword[True] ) keyword[return] identifier[merkle_info]
def add(self, leaf): """ Add the leaf (transaction) to the log and the merkle tree. Note: Currently data is serialised same way for inserting it in the log as well as the merkle tree, only difference is the tree needs binary data to the textual (utf-8) representation is converted to bytes. """ # Serializing here to avoid serialisation in `_addToStore` and # `_addToTree` serz_leaf = self.serialize_for_txn_log(leaf) self._addToStore(serz_leaf, serialized=True) serz_leaf_for_tree = self.serialize_for_tree(leaf) merkle_info = self._addToTree(serz_leaf_for_tree, serialized=True) return merkle_info
def update(self, iterable={}, **kwargs): """ Updates recursively a self with a given iterable. TODO: rewrite this ugly stuff """ def _merge(a, *args): for key, value in itertools.chain(*args): if key in a and isinstance(value, (dict, Conf)): value = _merge(a[key], value.items()) a[key] = value return a # adopt iterable sequence to unified interface: (key, value) if isinstance(iterable, (dict, Conf)): iterable = iterable.items() # iterate and update values _merge(self._data, iterable, kwargs.items())
def function[update, parameter[self, iterable]]: constant[ Updates recursively a self with a given iterable. TODO: rewrite this ugly stuff ] def function[_merge, parameter[a]]: for taget[tuple[[<ast.Name object at 0x7da1b1320160>, <ast.Name object at 0x7da1b1320580>]]] in starred[call[name[itertools].chain, parameter[<ast.Starred object at 0x7da20c6e7f40>]]] begin[:] if <ast.BoolOp object at 0x7da1b26ac190> begin[:] variable[value] assign[=] call[name[_merge], parameter[call[name[a]][name[key]], call[name[value].items, parameter[]]]] call[name[a]][name[key]] assign[=] name[value] return[name[a]] if call[name[isinstance], parameter[name[iterable], tuple[[<ast.Name object at 0x7da1b131b3d0>, <ast.Name object at 0x7da1b131b3a0>]]]] begin[:] variable[iterable] assign[=] call[name[iterable].items, parameter[]] call[name[_merge], parameter[name[self]._data, name[iterable], call[name[kwargs].items, parameter[]]]]
keyword[def] identifier[update] ( identifier[self] , identifier[iterable] ={},** identifier[kwargs] ): literal[string] keyword[def] identifier[_merge] ( identifier[a] ,* identifier[args] ): keyword[for] identifier[key] , identifier[value] keyword[in] identifier[itertools] . identifier[chain] (* identifier[args] ): keyword[if] identifier[key] keyword[in] identifier[a] keyword[and] identifier[isinstance] ( identifier[value] ,( identifier[dict] , identifier[Conf] )): identifier[value] = identifier[_merge] ( identifier[a] [ identifier[key] ], identifier[value] . identifier[items] ()) identifier[a] [ identifier[key] ]= identifier[value] keyword[return] identifier[a] keyword[if] identifier[isinstance] ( identifier[iterable] ,( identifier[dict] , identifier[Conf] )): identifier[iterable] = identifier[iterable] . identifier[items] () identifier[_merge] ( identifier[self] . identifier[_data] , identifier[iterable] , identifier[kwargs] . identifier[items] ())
def update(self, iterable={}, **kwargs): """ Updates recursively a self with a given iterable. TODO: rewrite this ugly stuff """ def _merge(a, *args): for (key, value) in itertools.chain(*args): if key in a and isinstance(value, (dict, Conf)): value = _merge(a[key], value.items()) # depends on [control=['if'], data=[]] a[key] = value # depends on [control=['for'], data=[]] return a # adopt iterable sequence to unified interface: (key, value) if isinstance(iterable, (dict, Conf)): iterable = iterable.items() # depends on [control=['if'], data=[]] # iterate and update values _merge(self._data, iterable, kwargs.items())
def _apply_dvportgroup_out_shaping(pg_name, out_shaping, out_shaping_conf): ''' Applies the values in out_shaping_conf to an out_shaping object pg_name The name of the portgroup out_shaping The vim.DVSTrafficShapingPolicy to apply the config to out_shaping_conf The out shaping config ''' log.trace('Building portgroup\'s \'%s\' out shaping policy', pg_name) if out_shaping_conf.get('average_bandwidth'): out_shaping.averageBandwidth = vim.LongPolicy() out_shaping.averageBandwidth.value = \ out_shaping_conf['average_bandwidth'] if out_shaping_conf.get('burst_size'): out_shaping.burstSize = vim.LongPolicy() out_shaping.burstSize.value = out_shaping_conf['burst_size'] if 'enabled' in out_shaping_conf: out_shaping.enabled = vim.BoolPolicy() out_shaping.enabled.value = out_shaping_conf['enabled'] if out_shaping_conf.get('peak_bandwidth'): out_shaping.peakBandwidth = vim.LongPolicy() out_shaping.peakBandwidth.value = out_shaping_conf['peak_bandwidth']
def function[_apply_dvportgroup_out_shaping, parameter[pg_name, out_shaping, out_shaping_conf]]: constant[ Applies the values in out_shaping_conf to an out_shaping object pg_name The name of the portgroup out_shaping The vim.DVSTrafficShapingPolicy to apply the config to out_shaping_conf The out shaping config ] call[name[log].trace, parameter[constant[Building portgroup's '%s' out shaping policy], name[pg_name]]] if call[name[out_shaping_conf].get, parameter[constant[average_bandwidth]]] begin[:] name[out_shaping].averageBandwidth assign[=] call[name[vim].LongPolicy, parameter[]] name[out_shaping].averageBandwidth.value assign[=] call[name[out_shaping_conf]][constant[average_bandwidth]] if call[name[out_shaping_conf].get, parameter[constant[burst_size]]] begin[:] name[out_shaping].burstSize assign[=] call[name[vim].LongPolicy, parameter[]] name[out_shaping].burstSize.value assign[=] call[name[out_shaping_conf]][constant[burst_size]] if compare[constant[enabled] in name[out_shaping_conf]] begin[:] name[out_shaping].enabled assign[=] call[name[vim].BoolPolicy, parameter[]] name[out_shaping].enabled.value assign[=] call[name[out_shaping_conf]][constant[enabled]] if call[name[out_shaping_conf].get, parameter[constant[peak_bandwidth]]] begin[:] name[out_shaping].peakBandwidth assign[=] call[name[vim].LongPolicy, parameter[]] name[out_shaping].peakBandwidth.value assign[=] call[name[out_shaping_conf]][constant[peak_bandwidth]]
keyword[def] identifier[_apply_dvportgroup_out_shaping] ( identifier[pg_name] , identifier[out_shaping] , identifier[out_shaping_conf] ): literal[string] identifier[log] . identifier[trace] ( literal[string] , identifier[pg_name] ) keyword[if] identifier[out_shaping_conf] . identifier[get] ( literal[string] ): identifier[out_shaping] . identifier[averageBandwidth] = identifier[vim] . identifier[LongPolicy] () identifier[out_shaping] . identifier[averageBandwidth] . identifier[value] = identifier[out_shaping_conf] [ literal[string] ] keyword[if] identifier[out_shaping_conf] . identifier[get] ( literal[string] ): identifier[out_shaping] . identifier[burstSize] = identifier[vim] . identifier[LongPolicy] () identifier[out_shaping] . identifier[burstSize] . identifier[value] = identifier[out_shaping_conf] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[out_shaping_conf] : identifier[out_shaping] . identifier[enabled] = identifier[vim] . identifier[BoolPolicy] () identifier[out_shaping] . identifier[enabled] . identifier[value] = identifier[out_shaping_conf] [ literal[string] ] keyword[if] identifier[out_shaping_conf] . identifier[get] ( literal[string] ): identifier[out_shaping] . identifier[peakBandwidth] = identifier[vim] . identifier[LongPolicy] () identifier[out_shaping] . identifier[peakBandwidth] . identifier[value] = identifier[out_shaping_conf] [ literal[string] ]
def _apply_dvportgroup_out_shaping(pg_name, out_shaping, out_shaping_conf): """ Applies the values in out_shaping_conf to an out_shaping object pg_name The name of the portgroup out_shaping The vim.DVSTrafficShapingPolicy to apply the config to out_shaping_conf The out shaping config """ log.trace("Building portgroup's '%s' out shaping policy", pg_name) if out_shaping_conf.get('average_bandwidth'): out_shaping.averageBandwidth = vim.LongPolicy() out_shaping.averageBandwidth.value = out_shaping_conf['average_bandwidth'] # depends on [control=['if'], data=[]] if out_shaping_conf.get('burst_size'): out_shaping.burstSize = vim.LongPolicy() out_shaping.burstSize.value = out_shaping_conf['burst_size'] # depends on [control=['if'], data=[]] if 'enabled' in out_shaping_conf: out_shaping.enabled = vim.BoolPolicy() out_shaping.enabled.value = out_shaping_conf['enabled'] # depends on [control=['if'], data=['out_shaping_conf']] if out_shaping_conf.get('peak_bandwidth'): out_shaping.peakBandwidth = vim.LongPolicy() out_shaping.peakBandwidth.value = out_shaping_conf['peak_bandwidth'] # depends on [control=['if'], data=[]]
def in_hours(self, when): """ Find if the given :class:`~datetime.datetime` is in business hours for this office. :param datetime.datetime when: The time to check :returns: True if the given time is in business hours for the office, False otherwise. :rtype: bool """ # convert to local timezone when = when.astimezone(self.tz) # is it a work day? if when.weekday() not in self.hours: # not a work day return False # work out if it is one of the ranges for start, end in self.hours[when.weekday()]: if start <= when.time() <= end: # it's in that range # is it a public holiday? check if it is any range for hstart, hend in self.holidays: if when >= hstart and when <= hend: # it's inside a holiday area. return False # not in a holiday zone, which means it's business time. return True # not in any range of hours, and was on a work day return False
def function[in_hours, parameter[self, when]]: constant[ Find if the given :class:`~datetime.datetime` is in business hours for this office. :param datetime.datetime when: The time to check :returns: True if the given time is in business hours for the office, False otherwise. :rtype: bool ] variable[when] assign[=] call[name[when].astimezone, parameter[name[self].tz]] if compare[call[name[when].weekday, parameter[]] <ast.NotIn object at 0x7da2590d7190> name[self].hours] begin[:] return[constant[False]] for taget[tuple[[<ast.Name object at 0x7da1b28de830>, <ast.Name object at 0x7da1b28de9b0>]]] in starred[call[name[self].hours][call[name[when].weekday, parameter[]]]] begin[:] if compare[name[start] less_or_equal[<=] call[name[when].time, parameter[]]] begin[:] for taget[tuple[[<ast.Name object at 0x7da1b2814af0>, <ast.Name object at 0x7da1b2814a90>]]] in starred[name[self].holidays] begin[:] if <ast.BoolOp object at 0x7da1b28149d0> begin[:] return[constant[False]] return[constant[True]] return[constant[False]]
keyword[def] identifier[in_hours] ( identifier[self] , identifier[when] ): literal[string] identifier[when] = identifier[when] . identifier[astimezone] ( identifier[self] . identifier[tz] ) keyword[if] identifier[when] . identifier[weekday] () keyword[not] keyword[in] identifier[self] . identifier[hours] : keyword[return] keyword[False] keyword[for] identifier[start] , identifier[end] keyword[in] identifier[self] . identifier[hours] [ identifier[when] . identifier[weekday] ()]: keyword[if] identifier[start] <= identifier[when] . identifier[time] ()<= identifier[end] : keyword[for] identifier[hstart] , identifier[hend] keyword[in] identifier[self] . identifier[holidays] : keyword[if] identifier[when] >= identifier[hstart] keyword[and] identifier[when] <= identifier[hend] : keyword[return] keyword[False] keyword[return] keyword[True] keyword[return] keyword[False]
def in_hours(self, when): """ Find if the given :class:`~datetime.datetime` is in business hours for this office. :param datetime.datetime when: The time to check :returns: True if the given time is in business hours for the office, False otherwise. :rtype: bool """ # convert to local timezone when = when.astimezone(self.tz) # is it a work day? if when.weekday() not in self.hours: # not a work day return False # depends on [control=['if'], data=[]] # work out if it is one of the ranges for (start, end) in self.hours[when.weekday()]: if start <= when.time() <= end: # it's in that range # is it a public holiday? check if it is any range for (hstart, hend) in self.holidays: if when >= hstart and when <= hend: # it's inside a holiday area. return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # not in a holiday zone, which means it's business time. return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # not in any range of hours, and was on a work day return False
def before_app_websocket(self, func: Callable) -> Callable: """Add a before request websocket to the App. This is designed to be used as a decorator, and has the same arguments as :meth:`~quart.Quart.before_websocket`. It applies to all requests to the app this blueprint is registered on. An example usage, .. code-block:: python blueprint = Blueprint(__name__) @blueprint.before_app_websocket def before(): ... """ self.record_once(lambda state: state.app.before_websocket(func)) return func
def function[before_app_websocket, parameter[self, func]]: constant[Add a before request websocket to the App. This is designed to be used as a decorator, and has the same arguments as :meth:`~quart.Quart.before_websocket`. It applies to all requests to the app this blueprint is registered on. An example usage, .. code-block:: python blueprint = Blueprint(__name__) @blueprint.before_app_websocket def before(): ... ] call[name[self].record_once, parameter[<ast.Lambda object at 0x7da204346e00>]] return[name[func]]
keyword[def] identifier[before_app_websocket] ( identifier[self] , identifier[func] : identifier[Callable] )-> identifier[Callable] : literal[string] identifier[self] . identifier[record_once] ( keyword[lambda] identifier[state] : identifier[state] . identifier[app] . identifier[before_websocket] ( identifier[func] )) keyword[return] identifier[func]
def before_app_websocket(self, func: Callable) -> Callable: """Add a before request websocket to the App. This is designed to be used as a decorator, and has the same arguments as :meth:`~quart.Quart.before_websocket`. It applies to all requests to the app this blueprint is registered on. An example usage, .. code-block:: python blueprint = Blueprint(__name__) @blueprint.before_app_websocket def before(): ... """ self.record_once(lambda state: state.app.before_websocket(func)) return func
def get_elasticsearch_info(): """Check Elasticsearch connection.""" from elasticsearch import ( Elasticsearch, ConnectionError as ESConnectionError ) if hasattr(settings, 'ELASTICSEARCH_URL'): url = settings.ELASTICSEARCH_URL else: return {"status": NO_CONFIG} start = datetime.now() try: search = Elasticsearch(url, request_timeout=TIMEOUT_SECONDS) search.info() except ESConnectionError: return {"status": DOWN} del search # The elasticsearch library has no "close" or "disconnect." micro = (datetime.now() - start).microseconds return { "status": UP, "response_microseconds": micro, }
def function[get_elasticsearch_info, parameter[]]: constant[Check Elasticsearch connection.] from relative_module[elasticsearch] import module[Elasticsearch], module[ConnectionError] if call[name[hasattr], parameter[name[settings], constant[ELASTICSEARCH_URL]]] begin[:] variable[url] assign[=] name[settings].ELASTICSEARCH_URL variable[start] assign[=] call[name[datetime].now, parameter[]] <ast.Try object at 0x7da18c4cc160> <ast.Delete object at 0x7da18eb568c0> variable[micro] assign[=] binary_operation[call[name[datetime].now, parameter[]] - name[start]].microseconds return[dictionary[[<ast.Constant object at 0x7da18eb57940>, <ast.Constant object at 0x7da18eb56440>], [<ast.Name object at 0x7da18eb54220>, <ast.Name object at 0x7da18eb57010>]]]
keyword[def] identifier[get_elasticsearch_info] (): literal[string] keyword[from] identifier[elasticsearch] keyword[import] ( identifier[Elasticsearch] , identifier[ConnectionError] keyword[as] identifier[ESConnectionError] ) keyword[if] identifier[hasattr] ( identifier[settings] , literal[string] ): identifier[url] = identifier[settings] . identifier[ELASTICSEARCH_URL] keyword[else] : keyword[return] { literal[string] : identifier[NO_CONFIG] } identifier[start] = identifier[datetime] . identifier[now] () keyword[try] : identifier[search] = identifier[Elasticsearch] ( identifier[url] , identifier[request_timeout] = identifier[TIMEOUT_SECONDS] ) identifier[search] . identifier[info] () keyword[except] identifier[ESConnectionError] : keyword[return] { literal[string] : identifier[DOWN] } keyword[del] identifier[search] identifier[micro] =( identifier[datetime] . identifier[now] ()- identifier[start] ). identifier[microseconds] keyword[return] { literal[string] : identifier[UP] , literal[string] : identifier[micro] , }
def get_elasticsearch_info(): """Check Elasticsearch connection.""" from elasticsearch import Elasticsearch, ConnectionError as ESConnectionError if hasattr(settings, 'ELASTICSEARCH_URL'): url = settings.ELASTICSEARCH_URL # depends on [control=['if'], data=[]] else: return {'status': NO_CONFIG} start = datetime.now() try: search = Elasticsearch(url, request_timeout=TIMEOUT_SECONDS) search.info() # depends on [control=['try'], data=[]] except ESConnectionError: return {'status': DOWN} # depends on [control=['except'], data=[]] del search # The elasticsearch library has no "close" or "disconnect." micro = (datetime.now() - start).microseconds return {'status': UP, 'response_microseconds': micro}
def find_particles_in_tile(positions, tile): """ Finds the particles in a tile, as numpy.ndarray of ints. Parameters ---------- positions : `numpy.ndarray` [N,3] array of the particle positions to check in the tile tile : :class:`peri.util.Tile` instance Tile of the region inside which to check for particles. Returns ------- numpy.ndarray, int The indices of the particles in the tile. """ bools = tile.contains(positions) return np.arange(bools.size)[bools]
def function[find_particles_in_tile, parameter[positions, tile]]: constant[ Finds the particles in a tile, as numpy.ndarray of ints. Parameters ---------- positions : `numpy.ndarray` [N,3] array of the particle positions to check in the tile tile : :class:`peri.util.Tile` instance Tile of the region inside which to check for particles. Returns ------- numpy.ndarray, int The indices of the particles in the tile. ] variable[bools] assign[=] call[name[tile].contains, parameter[name[positions]]] return[call[call[name[np].arange, parameter[name[bools].size]]][name[bools]]]
keyword[def] identifier[find_particles_in_tile] ( identifier[positions] , identifier[tile] ): literal[string] identifier[bools] = identifier[tile] . identifier[contains] ( identifier[positions] ) keyword[return] identifier[np] . identifier[arange] ( identifier[bools] . identifier[size] )[ identifier[bools] ]
def find_particles_in_tile(positions, tile): """ Finds the particles in a tile, as numpy.ndarray of ints. Parameters ---------- positions : `numpy.ndarray` [N,3] array of the particle positions to check in the tile tile : :class:`peri.util.Tile` instance Tile of the region inside which to check for particles. Returns ------- numpy.ndarray, int The indices of the particles in the tile. """ bools = tile.contains(positions) return np.arange(bools.size)[bools]
def expand_brackets(s): """Remove whitespace and expand all brackets.""" s = ''.join(s.split()) while True: start = s.find('(') if start == -1: break count = 1 # Number of hanging open brackets p = start + 1 while p < len(s): if s[p] == '(': count += 1 if s[p] == ')': count -= 1 if not count: break p += 1 if count: raise ValueError("Unbalanced parenthesis in '{0}'.".format(s)) if start == 0 or s[start - 1] != '*': s = s[0:start] + s[start + 1:p] + s[p + 1:] else: m = BRACKET_RE.search(s) if m: factor = int(m.group('factor')) matchstart = m.start('factor') s = s[0:matchstart] + (factor - 1) * (s[start + 1:p] + ',') + s[start + 1:p] + s[p + 1:] else: raise ValueError("Failed to parse '{0}'.".format(s)) return s
def function[expand_brackets, parameter[s]]: constant[Remove whitespace and expand all brackets.] variable[s] assign[=] call[constant[].join, parameter[call[name[s].split, parameter[]]]] while constant[True] begin[:] variable[start] assign[=] call[name[s].find, parameter[constant[(]]] if compare[name[start] equal[==] <ast.UnaryOp object at 0x7da1b106e710>] begin[:] break variable[count] assign[=] constant[1] variable[p] assign[=] binary_operation[name[start] + constant[1]] while compare[name[p] less[<] call[name[len], parameter[name[s]]]] begin[:] if compare[call[name[s]][name[p]] equal[==] constant[(]] begin[:] <ast.AugAssign object at 0x7da1b106fc10> if compare[call[name[s]][name[p]] equal[==] constant[)]] begin[:] <ast.AugAssign object at 0x7da1b106d450> if <ast.UnaryOp object at 0x7da1b106e620> begin[:] break <ast.AugAssign object at 0x7da1b106c100> if name[count] begin[:] <ast.Raise object at 0x7da1b106dc60> if <ast.BoolOp object at 0x7da1b106d630> begin[:] variable[s] assign[=] binary_operation[binary_operation[call[name[s]][<ast.Slice object at 0x7da1b106fbb0>] + call[name[s]][<ast.Slice object at 0x7da1b106f040>]] + call[name[s]][<ast.Slice object at 0x7da1b106f4c0>]] return[name[s]]
keyword[def] identifier[expand_brackets] ( identifier[s] ): literal[string] identifier[s] = literal[string] . identifier[join] ( identifier[s] . identifier[split] ()) keyword[while] keyword[True] : identifier[start] = identifier[s] . identifier[find] ( literal[string] ) keyword[if] identifier[start] ==- literal[int] : keyword[break] identifier[count] = literal[int] identifier[p] = identifier[start] + literal[int] keyword[while] identifier[p] < identifier[len] ( identifier[s] ): keyword[if] identifier[s] [ identifier[p] ]== literal[string] : identifier[count] += literal[int] keyword[if] identifier[s] [ identifier[p] ]== literal[string] : identifier[count] -= literal[int] keyword[if] keyword[not] identifier[count] : keyword[break] identifier[p] += literal[int] keyword[if] identifier[count] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[s] )) keyword[if] identifier[start] == literal[int] keyword[or] identifier[s] [ identifier[start] - literal[int] ]!= literal[string] : identifier[s] = identifier[s] [ literal[int] : identifier[start] ]+ identifier[s] [ identifier[start] + literal[int] : identifier[p] ]+ identifier[s] [ identifier[p] + literal[int] :] keyword[else] : identifier[m] = identifier[BRACKET_RE] . identifier[search] ( identifier[s] ) keyword[if] identifier[m] : identifier[factor] = identifier[int] ( identifier[m] . identifier[group] ( literal[string] )) identifier[matchstart] = identifier[m] . identifier[start] ( literal[string] ) identifier[s] = identifier[s] [ literal[int] : identifier[matchstart] ]+( identifier[factor] - literal[int] )*( identifier[s] [ identifier[start] + literal[int] : identifier[p] ]+ literal[string] )+ identifier[s] [ identifier[start] + literal[int] : identifier[p] ]+ identifier[s] [ identifier[p] + literal[int] :] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[s] )) keyword[return] identifier[s]
def expand_brackets(s): """Remove whitespace and expand all brackets.""" s = ''.join(s.split()) while True: start = s.find('(') if start == -1: break # depends on [control=['if'], data=[]] count = 1 # Number of hanging open brackets p = start + 1 while p < len(s): if s[p] == '(': count += 1 # depends on [control=['if'], data=[]] if s[p] == ')': count -= 1 # depends on [control=['if'], data=[]] if not count: break # depends on [control=['if'], data=[]] p += 1 # depends on [control=['while'], data=['p']] if count: raise ValueError("Unbalanced parenthesis in '{0}'.".format(s)) # depends on [control=['if'], data=[]] if start == 0 or s[start - 1] != '*': s = s[0:start] + s[start + 1:p] + s[p + 1:] # depends on [control=['if'], data=[]] else: m = BRACKET_RE.search(s) if m: factor = int(m.group('factor')) matchstart = m.start('factor') s = s[0:matchstart] + (factor - 1) * (s[start + 1:p] + ',') + s[start + 1:p] + s[p + 1:] # depends on [control=['if'], data=[]] else: raise ValueError("Failed to parse '{0}'.".format(s)) # depends on [control=['while'], data=[]] return s
def numdiff2(f, x0, dv=1e-8): '''Returns the derivative of f w.r.t. to multidimensional vector x0 If x0 is of dimension R1 x ... x Rd dimension of f is assumed to be in the form S1 x ... x Sf x Rn. The last dimension corresponds to various observations. The value returned is of dimension : S1 x ... x Sf x R1 x ... x Rd x Rn ''' dd = x0.shape f0 = f(x0) nobs = f0.shape[-1] f_shape = f0.shape[:-1] out_shape = f_shape + dd + (nobs,) ret = np.zeros(out_shape) for ind in product( *[range(i) for i in dd] ): x = x0.copy() x[ind] += dv x2 = x0.copy() x2[ind] -= dv df = (f(x) - f(x2))/dv/2.0 obj = [ Ellipsis] + list(ind) + [slice(None, None, None)] #obj = tuple(obj) ret[obj] = df return ret
def function[numdiff2, parameter[f, x0, dv]]: constant[Returns the derivative of f w.r.t. to multidimensional vector x0 If x0 is of dimension R1 x ... x Rd dimension of f is assumed to be in the form S1 x ... x Sf x Rn. The last dimension corresponds to various observations. The value returned is of dimension : S1 x ... x Sf x R1 x ... x Rd x Rn ] variable[dd] assign[=] name[x0].shape variable[f0] assign[=] call[name[f], parameter[name[x0]]] variable[nobs] assign[=] call[name[f0].shape][<ast.UnaryOp object at 0x7da18bcc9a50>] variable[f_shape] assign[=] call[name[f0].shape][<ast.Slice object at 0x7da18bcc8af0>] variable[out_shape] assign[=] binary_operation[binary_operation[name[f_shape] + name[dd]] + tuple[[<ast.Name object at 0x7da18bcc8520>]]] variable[ret] assign[=] call[name[np].zeros, parameter[name[out_shape]]] for taget[name[ind]] in starred[call[name[product], parameter[<ast.Starred object at 0x7da18bcc9e40>]]] begin[:] variable[x] assign[=] call[name[x0].copy, parameter[]] <ast.AugAssign object at 0x7da20e957e80> variable[x2] assign[=] call[name[x0].copy, parameter[]] <ast.AugAssign object at 0x7da20e955b40> variable[df] assign[=] binary_operation[binary_operation[binary_operation[call[name[f], parameter[name[x]]] - call[name[f], parameter[name[x2]]]] / name[dv]] / constant[2.0]] variable[obj] assign[=] binary_operation[binary_operation[list[[<ast.Name object at 0x7da20e956b60>]] + call[name[list], parameter[name[ind]]]] + list[[<ast.Call object at 0x7da20e954400>]]] call[name[ret]][name[obj]] assign[=] name[df] return[name[ret]]
keyword[def] identifier[numdiff2] ( identifier[f] , identifier[x0] , identifier[dv] = literal[int] ): literal[string] identifier[dd] = identifier[x0] . identifier[shape] identifier[f0] = identifier[f] ( identifier[x0] ) identifier[nobs] = identifier[f0] . identifier[shape] [- literal[int] ] identifier[f_shape] = identifier[f0] . identifier[shape] [:- literal[int] ] identifier[out_shape] = identifier[f_shape] + identifier[dd] +( identifier[nobs] ,) identifier[ret] = identifier[np] . identifier[zeros] ( identifier[out_shape] ) keyword[for] identifier[ind] keyword[in] identifier[product] (*[ identifier[range] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[dd] ]): identifier[x] = identifier[x0] . identifier[copy] () identifier[x] [ identifier[ind] ]+= identifier[dv] identifier[x2] = identifier[x0] . identifier[copy] () identifier[x2] [ identifier[ind] ]-= identifier[dv] identifier[df] =( identifier[f] ( identifier[x] )- identifier[f] ( identifier[x2] ))/ identifier[dv] / literal[int] identifier[obj] =[ identifier[Ellipsis] ]+ identifier[list] ( identifier[ind] )+[ identifier[slice] ( keyword[None] , keyword[None] , keyword[None] )] identifier[ret] [ identifier[obj] ]= identifier[df] keyword[return] identifier[ret]
def numdiff2(f, x0, dv=1e-08): """Returns the derivative of f w.r.t. to multidimensional vector x0 If x0 is of dimension R1 x ... x Rd dimension of f is assumed to be in the form S1 x ... x Sf x Rn. The last dimension corresponds to various observations. The value returned is of dimension : S1 x ... x Sf x R1 x ... x Rd x Rn """ dd = x0.shape f0 = f(x0) nobs = f0.shape[-1] f_shape = f0.shape[:-1] out_shape = f_shape + dd + (nobs,) ret = np.zeros(out_shape) for ind in product(*[range(i) for i in dd]): x = x0.copy() x[ind] += dv x2 = x0.copy() x2[ind] -= dv df = (f(x) - f(x2)) / dv / 2.0 obj = [Ellipsis] + list(ind) + [slice(None, None, None)] #obj = tuple(obj) ret[obj] = df # depends on [control=['for'], data=['ind']] return ret
def _run_program(self, bin, fastafile, params=None): """ Run MotifSampler and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) # TODO: test organism #cmd = "%s -f %s -b %s -m %s -w %s -n %s -o %s -s %s > /dev/null 2>&1" % ( cmd = "%s -f %s -b %s -m %s -w %s -n %s -o %s -s %s" % ( bin, fastafile, params["background_model"], params["pwmfile"], params["width"], params["number"], params["outfile"], params["strand"], ) #print cmd p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() #stdout,stderr = "","" #p = Popen(cmd, shell=True) #p.wait() motifs = [] if os.path.exists(params["outfile"]): with open(params["outfile"]) as f: motifs = self.parse_out(f) for motif in motifs: motif.id = "%s_%s" % (self.name, motif.id) return motifs, stdout, stderr
def function[_run_program, parameter[self, bin, fastafile, params]]: constant[ Run MotifSampler and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. ] variable[params] assign[=] call[name[self]._parse_params, parameter[name[params]]] variable[cmd] assign[=] binary_operation[constant[%s -f %s -b %s -m %s -w %s -n %s -o %s -s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2044c01c0>, <ast.Name object at 0x7da2044c3b80>, <ast.Subscript object at 0x7da2044c3280>, <ast.Subscript object at 0x7da2044c1ba0>, <ast.Subscript object at 0x7da2044c2cb0>, <ast.Subscript object at 0x7da2044c1450>, <ast.Subscript object at 0x7da2044c1390>, <ast.Subscript object at 0x7da2044c1ea0>]]] variable[p] assign[=] call[name[Popen], parameter[name[cmd]]] <ast.Tuple object at 0x7da2044c0b80> assign[=] call[name[p].communicate, parameter[]] variable[motifs] assign[=] list[[]] if call[name[os].path.exists, parameter[call[name[params]][constant[outfile]]]] begin[:] with call[name[open], parameter[call[name[params]][constant[outfile]]]] begin[:] variable[motifs] assign[=] call[name[self].parse_out, parameter[name[f]]] for taget[name[motif]] in starred[name[motifs]] begin[:] name[motif].id assign[=] binary_operation[constant[%s_%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da2044c02e0>, <ast.Attribute object at 0x7da2044c1180>]]] return[tuple[[<ast.Name object at 0x7da2044c3e20>, <ast.Name object at 0x7da2044c0a30>, <ast.Name object at 0x7da2044c2230>]]]
keyword[def] identifier[_run_program] ( identifier[self] , identifier[bin] , identifier[fastafile] , identifier[params] = keyword[None] ): literal[string] identifier[params] = identifier[self] . identifier[_parse_params] ( identifier[params] ) identifier[cmd] = literal[string] %( identifier[bin] , identifier[fastafile] , identifier[params] [ literal[string] ], identifier[params] [ literal[string] ], identifier[params] [ literal[string] ], identifier[params] [ literal[string] ], identifier[params] [ literal[string] ], identifier[params] [ literal[string] ], ) identifier[p] = identifier[Popen] ( identifier[cmd] , identifier[shell] = keyword[True] , identifier[stdout] = identifier[PIPE] , identifier[stderr] = identifier[PIPE] ) identifier[stdout] , identifier[stderr] = identifier[p] . identifier[communicate] () identifier[motifs] =[] keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[params] [ literal[string] ]): keyword[with] identifier[open] ( identifier[params] [ literal[string] ]) keyword[as] identifier[f] : identifier[motifs] = identifier[self] . identifier[parse_out] ( identifier[f] ) keyword[for] identifier[motif] keyword[in] identifier[motifs] : identifier[motif] . identifier[id] = literal[string] %( identifier[self] . identifier[name] , identifier[motif] . identifier[id] ) keyword[return] identifier[motifs] , identifier[stdout] , identifier[stderr]
def _run_program(self, bin, fastafile, params=None): """ Run MotifSampler and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) # TODO: test organism #cmd = "%s -f %s -b %s -m %s -w %s -n %s -o %s -s %s > /dev/null 2>&1" % ( cmd = '%s -f %s -b %s -m %s -w %s -n %s -o %s -s %s' % (bin, fastafile, params['background_model'], params['pwmfile'], params['width'], params['number'], params['outfile'], params['strand']) #print cmd p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) (stdout, stderr) = p.communicate() #stdout,stderr = "","" #p = Popen(cmd, shell=True) #p.wait() motifs = [] if os.path.exists(params['outfile']): with open(params['outfile']) as f: motifs = self.parse_out(f) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]] for motif in motifs: motif.id = '%s_%s' % (self.name, motif.id) # depends on [control=['for'], data=['motif']] return (motifs, stdout, stderr)
def uclus(a, b, distance_function): """ Given two collections ``a`` and ``b``, this will return the *median* of all distances. ``distance_function`` is used to determine the distance between two elements. Example:: >>> single([1, 2], [3, 100], lambda x, y: abs(x-y)) 2.5 """ distances = sorted([distance_function(x, y) for x in a for y in b]) midpoint, rest = len(distances) // 2, len(distances) % 2 if not rest: return sum(distances[midpoint-1:midpoint+1]) / 2 else: return distances[midpoint]
def function[uclus, parameter[a, b, distance_function]]: constant[ Given two collections ``a`` and ``b``, this will return the *median* of all distances. ``distance_function`` is used to determine the distance between two elements. Example:: >>> single([1, 2], [3, 100], lambda x, y: abs(x-y)) 2.5 ] variable[distances] assign[=] call[name[sorted], parameter[<ast.ListComp object at 0x7da18f09cbb0>]] <ast.Tuple object at 0x7da18f09ed40> assign[=] tuple[[<ast.BinOp object at 0x7da18f09fa30>, <ast.BinOp object at 0x7da18f09ee00>]] if <ast.UnaryOp object at 0x7da18f09eb60> begin[:] return[binary_operation[call[name[sum], parameter[call[name[distances]][<ast.Slice object at 0x7da18f09f790>]]] / constant[2]]]
keyword[def] identifier[uclus] ( identifier[a] , identifier[b] , identifier[distance_function] ): literal[string] identifier[distances] = identifier[sorted] ([ identifier[distance_function] ( identifier[x] , identifier[y] ) keyword[for] identifier[x] keyword[in] identifier[a] keyword[for] identifier[y] keyword[in] identifier[b] ]) identifier[midpoint] , identifier[rest] = identifier[len] ( identifier[distances] )// literal[int] , identifier[len] ( identifier[distances] )% literal[int] keyword[if] keyword[not] identifier[rest] : keyword[return] identifier[sum] ( identifier[distances] [ identifier[midpoint] - literal[int] : identifier[midpoint] + literal[int] ])/ literal[int] keyword[else] : keyword[return] identifier[distances] [ identifier[midpoint] ]
def uclus(a, b, distance_function): """ Given two collections ``a`` and ``b``, this will return the *median* of all distances. ``distance_function`` is used to determine the distance between two elements. Example:: >>> single([1, 2], [3, 100], lambda x, y: abs(x-y)) 2.5 """ distances = sorted([distance_function(x, y) for x in a for y in b]) (midpoint, rest) = (len(distances) // 2, len(distances) % 2) if not rest: return sum(distances[midpoint - 1:midpoint + 1]) / 2 # depends on [control=['if'], data=[]] else: return distances[midpoint]
def optimal_distribution(data, distr_to_check=('norm', 'expon', 'uniform')): '''Calculate the parameters of a fit of different distributions to a data set and returns the distribution of the minimal ks-distance. Parameters: data: array of data points to be fitted Options: distr_to_check: tuple of distributions to be checked Returns: FitResults object with fitted parameters, errors and distribution type\ of the fit with the smallest fit distance Note: Uses Kolmogorov-Smirnov test to estimate distance and p-value. ''' fit_results = [fit(data, d) for d in distr_to_check] return min(fit_results, key=lambda fit: fit.errs[0])
def function[optimal_distribution, parameter[data, distr_to_check]]: constant[Calculate the parameters of a fit of different distributions to a data set and returns the distribution of the minimal ks-distance. Parameters: data: array of data points to be fitted Options: distr_to_check: tuple of distributions to be checked Returns: FitResults object with fitted parameters, errors and distribution type of the fit with the smallest fit distance Note: Uses Kolmogorov-Smirnov test to estimate distance and p-value. ] variable[fit_results] assign[=] <ast.ListComp object at 0x7da18bc73880> return[call[name[min], parameter[name[fit_results]]]]
keyword[def] identifier[optimal_distribution] ( identifier[data] , identifier[distr_to_check] =( literal[string] , literal[string] , literal[string] )): literal[string] identifier[fit_results] =[ identifier[fit] ( identifier[data] , identifier[d] ) keyword[for] identifier[d] keyword[in] identifier[distr_to_check] ] keyword[return] identifier[min] ( identifier[fit_results] , identifier[key] = keyword[lambda] identifier[fit] : identifier[fit] . identifier[errs] [ literal[int] ])
def optimal_distribution(data, distr_to_check=('norm', 'expon', 'uniform')): """Calculate the parameters of a fit of different distributions to a data set and returns the distribution of the minimal ks-distance. Parameters: data: array of data points to be fitted Options: distr_to_check: tuple of distributions to be checked Returns: FitResults object with fitted parameters, errors and distribution type of the fit with the smallest fit distance Note: Uses Kolmogorov-Smirnov test to estimate distance and p-value. """ fit_results = [fit(data, d) for d in distr_to_check] return min(fit_results, key=lambda fit: fit.errs[0])
def memory_read32(self, addr, num_words, zone=None): """Reads memory from the target system in units of 32-bits. Args: self (JLink): the ``JLink`` instance addr (int): start address to read from num_words (int): number of words to read zone (str): memory zone to read from Returns: List of words read from the target system. Raises: JLinkException: if memory could not be read """ return self.memory_read(addr, num_words, zone=zone, nbits=32)
def function[memory_read32, parameter[self, addr, num_words, zone]]: constant[Reads memory from the target system in units of 32-bits. Args: self (JLink): the ``JLink`` instance addr (int): start address to read from num_words (int): number of words to read zone (str): memory zone to read from Returns: List of words read from the target system. Raises: JLinkException: if memory could not be read ] return[call[name[self].memory_read, parameter[name[addr], name[num_words]]]]
keyword[def] identifier[memory_read32] ( identifier[self] , identifier[addr] , identifier[num_words] , identifier[zone] = keyword[None] ): literal[string] keyword[return] identifier[self] . identifier[memory_read] ( identifier[addr] , identifier[num_words] , identifier[zone] = identifier[zone] , identifier[nbits] = literal[int] )
def memory_read32(self, addr, num_words, zone=None): """Reads memory from the target system in units of 32-bits. Args: self (JLink): the ``JLink`` instance addr (int): start address to read from num_words (int): number of words to read zone (str): memory zone to read from Returns: List of words read from the target system. Raises: JLinkException: if memory could not be read """ return self.memory_read(addr, num_words, zone=zone, nbits=32)
def make_timestamp(el_time): """ Generate an hour-minutes-seconds timestamp from an interval in seconds. Assumes numeric input of a time interval in seconds. Converts this interval to a string of the format "#h #m #s", indicating the number of hours, minutes, and seconds in the interval. Intervals greater than 24h are unproblematic. Parameters ---------- el_time |int| or |float| -- Time interval in seconds to be converted to h/m/s format Returns ------- stamp |str| -- String timestamp in #h #m #s format """ # Calc hours hrs = el_time // 3600.0 # Calc minutes mins = (el_time % 3600.0) // 60.0 # Calc seconds secs = el_time % 60.0 # Construct timestamp string stamp = "{0}h {1}m {2}s".format(int(hrs), int(mins), int(secs)) # Return return stamp
def function[make_timestamp, parameter[el_time]]: constant[ Generate an hour-minutes-seconds timestamp from an interval in seconds. Assumes numeric input of a time interval in seconds. Converts this interval to a string of the format "#h #m #s", indicating the number of hours, minutes, and seconds in the interval. Intervals greater than 24h are unproblematic. Parameters ---------- el_time |int| or |float| -- Time interval in seconds to be converted to h/m/s format Returns ------- stamp |str| -- String timestamp in #h #m #s format ] variable[hrs] assign[=] binary_operation[name[el_time] <ast.FloorDiv object at 0x7da2590d6bc0> constant[3600.0]] variable[mins] assign[=] binary_operation[binary_operation[name[el_time] <ast.Mod object at 0x7da2590d6920> constant[3600.0]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[60.0]] variable[secs] assign[=] binary_operation[name[el_time] <ast.Mod object at 0x7da2590d6920> constant[60.0]] variable[stamp] assign[=] call[constant[{0}h {1}m {2}s].format, parameter[call[name[int], parameter[name[hrs]]], call[name[int], parameter[name[mins]]], call[name[int], parameter[name[secs]]]]] return[name[stamp]]
keyword[def] identifier[make_timestamp] ( identifier[el_time] ): literal[string] identifier[hrs] = identifier[el_time] // literal[int] identifier[mins] =( identifier[el_time] % literal[int] )// literal[int] identifier[secs] = identifier[el_time] % literal[int] identifier[stamp] = literal[string] . identifier[format] ( identifier[int] ( identifier[hrs] ), identifier[int] ( identifier[mins] ), identifier[int] ( identifier[secs] )) keyword[return] identifier[stamp]
def make_timestamp(el_time): """ Generate an hour-minutes-seconds timestamp from an interval in seconds. Assumes numeric input of a time interval in seconds. Converts this interval to a string of the format "#h #m #s", indicating the number of hours, minutes, and seconds in the interval. Intervals greater than 24h are unproblematic. Parameters ---------- el_time |int| or |float| -- Time interval in seconds to be converted to h/m/s format Returns ------- stamp |str| -- String timestamp in #h #m #s format """ # Calc hours hrs = el_time // 3600.0 # Calc minutes mins = el_time % 3600.0 // 60.0 # Calc seconds secs = el_time % 60.0 # Construct timestamp string stamp = '{0}h {1}m {2}s'.format(int(hrs), int(mins), int(secs)) # Return return stamp
def auto_memoize(func): """ Based on django.util.functional.memoize. Automatically memoizes instace methods for the lifespan of an object. Only works with methods taking non-keword arguments. Note that the args to the function must be usable as dictionary keys. Also, the first argument MUST be self. This decorator will not work for functions or class methods, only object methods. """ @wraps(func) def wrapper(*args): inst = args[0] inst._memoized_values = getattr(inst, '_memoized_values', {}) key = (func, args[1:]) if key not in inst._memoized_values: inst._memoized_values[key] = func(*args) return inst._memoized_values[key] return wrapper
def function[auto_memoize, parameter[func]]: constant[ Based on django.util.functional.memoize. Automatically memoizes instace methods for the lifespan of an object. Only works with methods taking non-keword arguments. Note that the args to the function must be usable as dictionary keys. Also, the first argument MUST be self. This decorator will not work for functions or class methods, only object methods. ] def function[wrapper, parameter[]]: variable[inst] assign[=] call[name[args]][constant[0]] name[inst]._memoized_values assign[=] call[name[getattr], parameter[name[inst], constant[_memoized_values], dictionary[[], []]]] variable[key] assign[=] tuple[[<ast.Name object at 0x7da18fe925c0>, <ast.Subscript object at 0x7da18fe92590>]] if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[inst]._memoized_values] begin[:] call[name[inst]._memoized_values][name[key]] assign[=] call[name[func], parameter[<ast.Starred object at 0x7da18fe93e50>]] return[call[name[inst]._memoized_values][name[key]]] return[name[wrapper]]
keyword[def] identifier[auto_memoize] ( identifier[func] ): literal[string] @ identifier[wraps] ( identifier[func] ) keyword[def] identifier[wrapper] (* identifier[args] ): identifier[inst] = identifier[args] [ literal[int] ] identifier[inst] . identifier[_memoized_values] = identifier[getattr] ( identifier[inst] , literal[string] ,{}) identifier[key] =( identifier[func] , identifier[args] [ literal[int] :]) keyword[if] identifier[key] keyword[not] keyword[in] identifier[inst] . identifier[_memoized_values] : identifier[inst] . identifier[_memoized_values] [ identifier[key] ]= identifier[func] (* identifier[args] ) keyword[return] identifier[inst] . identifier[_memoized_values] [ identifier[key] ] keyword[return] identifier[wrapper]
def auto_memoize(func): """ Based on django.util.functional.memoize. Automatically memoizes instace methods for the lifespan of an object. Only works with methods taking non-keword arguments. Note that the args to the function must be usable as dictionary keys. Also, the first argument MUST be self. This decorator will not work for functions or class methods, only object methods. """ @wraps(func) def wrapper(*args): inst = args[0] inst._memoized_values = getattr(inst, '_memoized_values', {}) key = (func, args[1:]) if key not in inst._memoized_values: inst._memoized_values[key] = func(*args) # depends on [control=['if'], data=['key']] return inst._memoized_values[key] return wrapper
def list(gandi, id, altnames, csr, cert, all_status, status, dates, limit): """ List certificates. """ options = {'items_per_page': limit} if not all_status: options['status'] = ['valid', 'pending'] output_keys = ['cn', 'plan'] if id: output_keys.append('id') if status: output_keys.append('status') if dates: output_keys.extend(['date_created', 'date_end']) if altnames: output_keys.append('altnames') if csr: output_keys.append('csr') if cert: output_keys.append('cert') result = gandi.certificate.list(options) for num, cert in enumerate(result): if num: gandi.separator_line() cert['plan'] = package_desc(gandi, cert['package']) output_cert(gandi, cert, output_keys) return result
def function[list, parameter[gandi, id, altnames, csr, cert, all_status, status, dates, limit]]: constant[ List certificates. ] variable[options] assign[=] dictionary[[<ast.Constant object at 0x7da18eb54f10>], [<ast.Name object at 0x7da18eb54220>]] if <ast.UnaryOp object at 0x7da18eb57010> begin[:] call[name[options]][constant[status]] assign[=] list[[<ast.Constant object at 0x7da18eb56bc0>, <ast.Constant object at 0x7da18eb565c0>]] variable[output_keys] assign[=] list[[<ast.Constant object at 0x7da18eb563b0>, <ast.Constant object at 0x7da18eb550f0>]] if name[id] begin[:] call[name[output_keys].append, parameter[constant[id]]] if name[status] begin[:] call[name[output_keys].append, parameter[constant[status]]] if name[dates] begin[:] call[name[output_keys].extend, parameter[list[[<ast.Constant object at 0x7da18dc04220>, <ast.Constant object at 0x7da18dc05150>]]]] if name[altnames] begin[:] call[name[output_keys].append, parameter[constant[altnames]]] if name[csr] begin[:] call[name[output_keys].append, parameter[constant[csr]]] if name[cert] begin[:] call[name[output_keys].append, parameter[constant[cert]]] variable[result] assign[=] call[name[gandi].certificate.list, parameter[name[options]]] for taget[tuple[[<ast.Name object at 0x7da20c7ca7d0>, <ast.Name object at 0x7da20c7ca410>]]] in starred[call[name[enumerate], parameter[name[result]]]] begin[:] if name[num] begin[:] call[name[gandi].separator_line, parameter[]] call[name[cert]][constant[plan]] assign[=] call[name[package_desc], parameter[name[gandi], call[name[cert]][constant[package]]]] call[name[output_cert], parameter[name[gandi], name[cert], name[output_keys]]] return[name[result]]
keyword[def] identifier[list] ( identifier[gandi] , identifier[id] , identifier[altnames] , identifier[csr] , identifier[cert] , identifier[all_status] , identifier[status] , identifier[dates] , identifier[limit] ): literal[string] identifier[options] ={ literal[string] : identifier[limit] } keyword[if] keyword[not] identifier[all_status] : identifier[options] [ literal[string] ]=[ literal[string] , literal[string] ] identifier[output_keys] =[ literal[string] , literal[string] ] keyword[if] identifier[id] : identifier[output_keys] . identifier[append] ( literal[string] ) keyword[if] identifier[status] : identifier[output_keys] . identifier[append] ( literal[string] ) keyword[if] identifier[dates] : identifier[output_keys] . identifier[extend] ([ literal[string] , literal[string] ]) keyword[if] identifier[altnames] : identifier[output_keys] . identifier[append] ( literal[string] ) keyword[if] identifier[csr] : identifier[output_keys] . identifier[append] ( literal[string] ) keyword[if] identifier[cert] : identifier[output_keys] . identifier[append] ( literal[string] ) identifier[result] = identifier[gandi] . identifier[certificate] . identifier[list] ( identifier[options] ) keyword[for] identifier[num] , identifier[cert] keyword[in] identifier[enumerate] ( identifier[result] ): keyword[if] identifier[num] : identifier[gandi] . identifier[separator_line] () identifier[cert] [ literal[string] ]= identifier[package_desc] ( identifier[gandi] , identifier[cert] [ literal[string] ]) identifier[output_cert] ( identifier[gandi] , identifier[cert] , identifier[output_keys] ) keyword[return] identifier[result]
def list(gandi, id, altnames, csr, cert, all_status, status, dates, limit): """ List certificates. """ options = {'items_per_page': limit} if not all_status: options['status'] = ['valid', 'pending'] # depends on [control=['if'], data=[]] output_keys = ['cn', 'plan'] if id: output_keys.append('id') # depends on [control=['if'], data=[]] if status: output_keys.append('status') # depends on [control=['if'], data=[]] if dates: output_keys.extend(['date_created', 'date_end']) # depends on [control=['if'], data=[]] if altnames: output_keys.append('altnames') # depends on [control=['if'], data=[]] if csr: output_keys.append('csr') # depends on [control=['if'], data=[]] if cert: output_keys.append('cert') # depends on [control=['if'], data=[]] result = gandi.certificate.list(options) for (num, cert) in enumerate(result): if num: gandi.separator_line() # depends on [control=['if'], data=[]] cert['plan'] = package_desc(gandi, cert['package']) output_cert(gandi, cert, output_keys) # depends on [control=['for'], data=[]] return result
def _int(int_or_str: Any) -> int: "return an integer where a single character string may be expected" if isinstance(int_or_str, str): return ord(int_or_str) if isinstance(int_or_str, bytes): return int_or_str[0] return int(int_or_str)
def function[_int, parameter[int_or_str]]: constant[return an integer where a single character string may be expected] if call[name[isinstance], parameter[name[int_or_str], name[str]]] begin[:] return[call[name[ord], parameter[name[int_or_str]]]] if call[name[isinstance], parameter[name[int_or_str], name[bytes]]] begin[:] return[call[name[int_or_str]][constant[0]]] return[call[name[int], parameter[name[int_or_str]]]]
keyword[def] identifier[_int] ( identifier[int_or_str] : identifier[Any] )-> identifier[int] : literal[string] keyword[if] identifier[isinstance] ( identifier[int_or_str] , identifier[str] ): keyword[return] identifier[ord] ( identifier[int_or_str] ) keyword[if] identifier[isinstance] ( identifier[int_or_str] , identifier[bytes] ): keyword[return] identifier[int_or_str] [ literal[int] ] keyword[return] identifier[int] ( identifier[int_or_str] )
def _int(int_or_str: Any) -> int: """return an integer where a single character string may be expected""" if isinstance(int_or_str, str): return ord(int_or_str) # depends on [control=['if'], data=[]] if isinstance(int_or_str, bytes): return int_or_str[0] # depends on [control=['if'], data=[]] return int(int_or_str)
def unindex_objects(mapping_type, ids, es=None, index=None): """Remove documents of a specified mapping_type from the index. This allows for asynchronous deleting. If a mapping_type extends Indexable, you can add a ``pre_delete`` hook for the model that it's based on like this:: @receiver(dbsignals.pre_delete, sender=MyModel) def remove_from_index(sender, instance, **kw): from elasticutils.contrib.django import tasks tasks.unindex_objects.delay(MyMappingType, [instance.id]) :arg mapping_type: the mapping type for these ids :arg ids: the list of ids of things to remove :arg es: The `Elasticsearch` to use. If you don't specify an `Elasticsearch`, it'll use `mapping_type.get_es()`. :arg index: The name of the index to use. If you don't specify one it'll use `mapping_type.get_index()`. """ if settings.ES_DISABLED: return for id_ in ids: mapping_type.unindex(id_, es=es, index=index)
def function[unindex_objects, parameter[mapping_type, ids, es, index]]: constant[Remove documents of a specified mapping_type from the index. This allows for asynchronous deleting. If a mapping_type extends Indexable, you can add a ``pre_delete`` hook for the model that it's based on like this:: @receiver(dbsignals.pre_delete, sender=MyModel) def remove_from_index(sender, instance, **kw): from elasticutils.contrib.django import tasks tasks.unindex_objects.delay(MyMappingType, [instance.id]) :arg mapping_type: the mapping type for these ids :arg ids: the list of ids of things to remove :arg es: The `Elasticsearch` to use. If you don't specify an `Elasticsearch`, it'll use `mapping_type.get_es()`. :arg index: The name of the index to use. If you don't specify one it'll use `mapping_type.get_index()`. ] if name[settings].ES_DISABLED begin[:] return[None] for taget[name[id_]] in starred[name[ids]] begin[:] call[name[mapping_type].unindex, parameter[name[id_]]]
keyword[def] identifier[unindex_objects] ( identifier[mapping_type] , identifier[ids] , identifier[es] = keyword[None] , identifier[index] = keyword[None] ): literal[string] keyword[if] identifier[settings] . identifier[ES_DISABLED] : keyword[return] keyword[for] identifier[id_] keyword[in] identifier[ids] : identifier[mapping_type] . identifier[unindex] ( identifier[id_] , identifier[es] = identifier[es] , identifier[index] = identifier[index] )
def unindex_objects(mapping_type, ids, es=None, index=None): """Remove documents of a specified mapping_type from the index. This allows for asynchronous deleting. If a mapping_type extends Indexable, you can add a ``pre_delete`` hook for the model that it's based on like this:: @receiver(dbsignals.pre_delete, sender=MyModel) def remove_from_index(sender, instance, **kw): from elasticutils.contrib.django import tasks tasks.unindex_objects.delay(MyMappingType, [instance.id]) :arg mapping_type: the mapping type for these ids :arg ids: the list of ids of things to remove :arg es: The `Elasticsearch` to use. If you don't specify an `Elasticsearch`, it'll use `mapping_type.get_es()`. :arg index: The name of the index to use. If you don't specify one it'll use `mapping_type.get_index()`. """ if settings.ES_DISABLED: return # depends on [control=['if'], data=[]] for id_ in ids: mapping_type.unindex(id_, es=es, index=index) # depends on [control=['for'], data=['id_']]
def _selu(attrs, inputs, proto_obj): """Selu function""" new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'selu'}) return 'LeakyReLU', new_attrs, inputs
def function[_selu, parameter[attrs, inputs, proto_obj]]: constant[Selu function] variable[new_attrs] assign[=] call[name[translation_utils]._add_extra_attributes, parameter[name[attrs], dictionary[[<ast.Constant object at 0x7da1b2029ba0>], [<ast.Constant object at 0x7da1b202a740>]]]] return[tuple[[<ast.Constant object at 0x7da1b2028bb0>, <ast.Name object at 0x7da1b2028fa0>, <ast.Name object at 0x7da1b2028490>]]]
keyword[def] identifier[_selu] ( identifier[attrs] , identifier[inputs] , identifier[proto_obj] ): literal[string] identifier[new_attrs] = identifier[translation_utils] . identifier[_add_extra_attributes] ( identifier[attrs] ,{ literal[string] : literal[string] }) keyword[return] literal[string] , identifier[new_attrs] , identifier[inputs]
def _selu(attrs, inputs, proto_obj): """Selu function""" new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'selu'}) return ('LeakyReLU', new_attrs, inputs)
def insert_cylinder(im, xyz0, xyz1, r): r""" Inserts a cylinder of given radius onto a given image Parameters ---------- im : array_like Original voxelated image xyz0, xyz1 : 3-by-1 array_like Voxel coordinates of the two end points of the cylinder r : int Radius of the cylinder Returns ------- im : ND-array Original voxelated image overlayed with the cylinder Notes ----- This function is only implemented for 3D images """ if im.ndim != 3: raise Exception('This function is only implemented for 3D images') # Converting coordinates to numpy array xyz0, xyz1 = [sp.array(xyz).astype(int) for xyz in (xyz0, xyz1)] r = int(r) L = sp.absolute(xyz0 - xyz1).max() + 1 xyz_line = [sp.linspace(xyz0[i], xyz1[i], L).astype(int) for i in range(3)] xyz_min = sp.amin(xyz_line, axis=1) - r xyz_max = sp.amax(xyz_line, axis=1) + r shape_template = xyz_max - xyz_min + 1 template = sp.zeros(shape=shape_template) # Shortcut for orthogonal cylinders if (xyz0 == xyz1).sum() == 2: unique_dim = [xyz0[i] != xyz1[i] for i in range(3)].index(True) shape_template[unique_dim] = 1 template_2D = disk(radius=r).reshape(shape_template) template = sp.repeat(template_2D, repeats=L, axis=unique_dim) xyz_min[unique_dim] += r xyz_max[unique_dim] += -r else: xyz_line_in_template_coords = [xyz_line[i] - xyz_min[i] for i in range(3)] template[tuple(xyz_line_in_template_coords)] = 1 template = spim.distance_transform_edt(template == 0) <= r im[xyz_min[0]:xyz_max[0]+1, xyz_min[1]:xyz_max[1]+1, xyz_min[2]:xyz_max[2]+1] += template return im
def function[insert_cylinder, parameter[im, xyz0, xyz1, r]]: constant[ Inserts a cylinder of given radius onto a given image Parameters ---------- im : array_like Original voxelated image xyz0, xyz1 : 3-by-1 array_like Voxel coordinates of the two end points of the cylinder r : int Radius of the cylinder Returns ------- im : ND-array Original voxelated image overlayed with the cylinder Notes ----- This function is only implemented for 3D images ] if compare[name[im].ndim not_equal[!=] constant[3]] begin[:] <ast.Raise object at 0x7da1b07f8bb0> <ast.Tuple object at 0x7da1b07fa620> assign[=] <ast.ListComp object at 0x7da1b07fa770> variable[r] assign[=] call[name[int], parameter[name[r]]] variable[L] assign[=] binary_operation[call[call[name[sp].absolute, parameter[binary_operation[name[xyz0] - name[xyz1]]]].max, parameter[]] + constant[1]] variable[xyz_line] assign[=] <ast.ListComp object at 0x7da1b07faad0> variable[xyz_min] assign[=] binary_operation[call[name[sp].amin, parameter[name[xyz_line]]] - name[r]] variable[xyz_max] assign[=] binary_operation[call[name[sp].amax, parameter[name[xyz_line]]] + name[r]] variable[shape_template] assign[=] binary_operation[binary_operation[name[xyz_max] - name[xyz_min]] + constant[1]] variable[template] assign[=] call[name[sp].zeros, parameter[]] if compare[call[compare[name[xyz0] equal[==] name[xyz1]].sum, parameter[]] equal[==] constant[2]] begin[:] variable[unique_dim] assign[=] call[<ast.ListComp object at 0x7da1b07fa5c0>.index, parameter[constant[True]]] call[name[shape_template]][name[unique_dim]] assign[=] constant[1] variable[template_2D] assign[=] call[call[name[disk], parameter[]].reshape, parameter[name[shape_template]]] variable[template] assign[=] call[name[sp].repeat, parameter[name[template_2D]]] <ast.AugAssign object at 0x7da1b07fb8e0> <ast.AugAssign object at 0x7da1b07fbd90> <ast.AugAssign object at 0x7da1b07fa890> return[name[im]]
keyword[def] identifier[insert_cylinder] ( identifier[im] , identifier[xyz0] , identifier[xyz1] , identifier[r] ): literal[string] keyword[if] identifier[im] . identifier[ndim] != literal[int] : keyword[raise] identifier[Exception] ( literal[string] ) identifier[xyz0] , identifier[xyz1] =[ identifier[sp] . identifier[array] ( identifier[xyz] ). identifier[astype] ( identifier[int] ) keyword[for] identifier[xyz] keyword[in] ( identifier[xyz0] , identifier[xyz1] )] identifier[r] = identifier[int] ( identifier[r] ) identifier[L] = identifier[sp] . identifier[absolute] ( identifier[xyz0] - identifier[xyz1] ). identifier[max] ()+ literal[int] identifier[xyz_line] =[ identifier[sp] . identifier[linspace] ( identifier[xyz0] [ identifier[i] ], identifier[xyz1] [ identifier[i] ], identifier[L] ). identifier[astype] ( identifier[int] ) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] )] identifier[xyz_min] = identifier[sp] . identifier[amin] ( identifier[xyz_line] , identifier[axis] = literal[int] )- identifier[r] identifier[xyz_max] = identifier[sp] . identifier[amax] ( identifier[xyz_line] , identifier[axis] = literal[int] )+ identifier[r] identifier[shape_template] = identifier[xyz_max] - identifier[xyz_min] + literal[int] identifier[template] = identifier[sp] . identifier[zeros] ( identifier[shape] = identifier[shape_template] ) keyword[if] ( identifier[xyz0] == identifier[xyz1] ). identifier[sum] ()== literal[int] : identifier[unique_dim] =[ identifier[xyz0] [ identifier[i] ]!= identifier[xyz1] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] )]. identifier[index] ( keyword[True] ) identifier[shape_template] [ identifier[unique_dim] ]= literal[int] identifier[template_2D] = identifier[disk] ( identifier[radius] = identifier[r] ). identifier[reshape] ( identifier[shape_template] ) identifier[template] = identifier[sp] . identifier[repeat] ( identifier[template_2D] , identifier[repeats] = identifier[L] , identifier[axis] = identifier[unique_dim] ) identifier[xyz_min] [ identifier[unique_dim] ]+= identifier[r] identifier[xyz_max] [ identifier[unique_dim] ]+=- identifier[r] keyword[else] : identifier[xyz_line_in_template_coords] =[ identifier[xyz_line] [ identifier[i] ]- identifier[xyz_min] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] )] identifier[template] [ identifier[tuple] ( identifier[xyz_line_in_template_coords] )]= literal[int] identifier[template] = identifier[spim] . identifier[distance_transform_edt] ( identifier[template] == literal[int] )<= identifier[r] identifier[im] [ identifier[xyz_min] [ literal[int] ]: identifier[xyz_max] [ literal[int] ]+ literal[int] , identifier[xyz_min] [ literal[int] ]: identifier[xyz_max] [ literal[int] ]+ literal[int] , identifier[xyz_min] [ literal[int] ]: identifier[xyz_max] [ literal[int] ]+ literal[int] ]+= identifier[template] keyword[return] identifier[im]
def insert_cylinder(im, xyz0, xyz1, r): """ Inserts a cylinder of given radius onto a given image Parameters ---------- im : array_like Original voxelated image xyz0, xyz1 : 3-by-1 array_like Voxel coordinates of the two end points of the cylinder r : int Radius of the cylinder Returns ------- im : ND-array Original voxelated image overlayed with the cylinder Notes ----- This function is only implemented for 3D images """ if im.ndim != 3: raise Exception('This function is only implemented for 3D images') # depends on [control=['if'], data=[]] # Converting coordinates to numpy array (xyz0, xyz1) = [sp.array(xyz).astype(int) for xyz in (xyz0, xyz1)] r = int(r) L = sp.absolute(xyz0 - xyz1).max() + 1 xyz_line = [sp.linspace(xyz0[i], xyz1[i], L).astype(int) for i in range(3)] xyz_min = sp.amin(xyz_line, axis=1) - r xyz_max = sp.amax(xyz_line, axis=1) + r shape_template = xyz_max - xyz_min + 1 template = sp.zeros(shape=shape_template) # Shortcut for orthogonal cylinders if (xyz0 == xyz1).sum() == 2: unique_dim = [xyz0[i] != xyz1[i] for i in range(3)].index(True) shape_template[unique_dim] = 1 template_2D = disk(radius=r).reshape(shape_template) template = sp.repeat(template_2D, repeats=L, axis=unique_dim) xyz_min[unique_dim] += r xyz_max[unique_dim] += -r # depends on [control=['if'], data=[]] else: xyz_line_in_template_coords = [xyz_line[i] - xyz_min[i] for i in range(3)] template[tuple(xyz_line_in_template_coords)] = 1 template = spim.distance_transform_edt(template == 0) <= r im[xyz_min[0]:xyz_max[0] + 1, xyz_min[1]:xyz_max[1] + 1, xyz_min[2]:xyz_max[2] + 1] += template return im
def _get_filters(nodes, context): """Get filters to apply to a list of SqlNodes. Args: nodes: List[SqlNode], the SqlNodes to get filters for. context: CompilationContext, global compilation state and metadata. Returns: List[Expression], list of SQLAlchemy expressions. """ filters = [] for node in nodes: for filter_block in sql_context_helpers.get_filters(node, context): filter_sql_expression = _transform_filter_to_sql(filter_block, node, context) filters.append(filter_sql_expression) return filters
def function[_get_filters, parameter[nodes, context]]: constant[Get filters to apply to a list of SqlNodes. Args: nodes: List[SqlNode], the SqlNodes to get filters for. context: CompilationContext, global compilation state and metadata. Returns: List[Expression], list of SQLAlchemy expressions. ] variable[filters] assign[=] list[[]] for taget[name[node]] in starred[name[nodes]] begin[:] for taget[name[filter_block]] in starred[call[name[sql_context_helpers].get_filters, parameter[name[node], name[context]]]] begin[:] variable[filter_sql_expression] assign[=] call[name[_transform_filter_to_sql], parameter[name[filter_block], name[node], name[context]]] call[name[filters].append, parameter[name[filter_sql_expression]]] return[name[filters]]
keyword[def] identifier[_get_filters] ( identifier[nodes] , identifier[context] ): literal[string] identifier[filters] =[] keyword[for] identifier[node] keyword[in] identifier[nodes] : keyword[for] identifier[filter_block] keyword[in] identifier[sql_context_helpers] . identifier[get_filters] ( identifier[node] , identifier[context] ): identifier[filter_sql_expression] = identifier[_transform_filter_to_sql] ( identifier[filter_block] , identifier[node] , identifier[context] ) identifier[filters] . identifier[append] ( identifier[filter_sql_expression] ) keyword[return] identifier[filters]
def _get_filters(nodes, context): """Get filters to apply to a list of SqlNodes. Args: nodes: List[SqlNode], the SqlNodes to get filters for. context: CompilationContext, global compilation state and metadata. Returns: List[Expression], list of SQLAlchemy expressions. """ filters = [] for node in nodes: for filter_block in sql_context_helpers.get_filters(node, context): filter_sql_expression = _transform_filter_to_sql(filter_block, node, context) filters.append(filter_sql_expression) # depends on [control=['for'], data=['filter_block']] # depends on [control=['for'], data=['node']] return filters
def set_item(key,value): """Write JSON content from value argument to cached file and return""" CACHED_KEY_FILE = os.path.join(CURRENT_DIR, key) open(CACHED_KEY_FILE, "wb").write(json.dumps({"_": value}).encode('UTF-8')) return value
def function[set_item, parameter[key, value]]: constant[Write JSON content from value argument to cached file and return] variable[CACHED_KEY_FILE] assign[=] call[name[os].path.join, parameter[name[CURRENT_DIR], name[key]]] call[call[name[open], parameter[name[CACHED_KEY_FILE], constant[wb]]].write, parameter[call[call[name[json].dumps, parameter[dictionary[[<ast.Constant object at 0x7da18eb576a0>], [<ast.Name object at 0x7da18eb54400>]]]].encode, parameter[constant[UTF-8]]]]] return[name[value]]
keyword[def] identifier[set_item] ( identifier[key] , identifier[value] ): literal[string] identifier[CACHED_KEY_FILE] = identifier[os] . identifier[path] . identifier[join] ( identifier[CURRENT_DIR] , identifier[key] ) identifier[open] ( identifier[CACHED_KEY_FILE] , literal[string] ). identifier[write] ( identifier[json] . identifier[dumps] ({ literal[string] : identifier[value] }). identifier[encode] ( literal[string] )) keyword[return] identifier[value]
def set_item(key, value): """Write JSON content from value argument to cached file and return""" CACHED_KEY_FILE = os.path.join(CURRENT_DIR, key) open(CACHED_KEY_FILE, 'wb').write(json.dumps({'_': value}).encode('UTF-8')) return value
def add_extra_chain_cert(self, certobj): """ Add certificate to chain :param certobj: The X509 certificate object to add to the chain :return: None """ if not isinstance(certobj, X509): raise TypeError("certobj must be an X509 instance") copy = _lib.X509_dup(certobj._x509) add_result = _lib.SSL_CTX_add_extra_chain_cert(self._context, copy) if not add_result: # TODO: This is untested. _lib.X509_free(copy) _raise_current_error()
def function[add_extra_chain_cert, parameter[self, certobj]]: constant[ Add certificate to chain :param certobj: The X509 certificate object to add to the chain :return: None ] if <ast.UnaryOp object at 0x7da1b025a650> begin[:] <ast.Raise object at 0x7da1b0259870> variable[copy] assign[=] call[name[_lib].X509_dup, parameter[name[certobj]._x509]] variable[add_result] assign[=] call[name[_lib].SSL_CTX_add_extra_chain_cert, parameter[name[self]._context, name[copy]]] if <ast.UnaryOp object at 0x7da1b025a200> begin[:] call[name[_lib].X509_free, parameter[name[copy]]] call[name[_raise_current_error], parameter[]]
keyword[def] identifier[add_extra_chain_cert] ( identifier[self] , identifier[certobj] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[certobj] , identifier[X509] ): keyword[raise] identifier[TypeError] ( literal[string] ) identifier[copy] = identifier[_lib] . identifier[X509_dup] ( identifier[certobj] . identifier[_x509] ) identifier[add_result] = identifier[_lib] . identifier[SSL_CTX_add_extra_chain_cert] ( identifier[self] . identifier[_context] , identifier[copy] ) keyword[if] keyword[not] identifier[add_result] : identifier[_lib] . identifier[X509_free] ( identifier[copy] ) identifier[_raise_current_error] ()
def add_extra_chain_cert(self, certobj): """ Add certificate to chain :param certobj: The X509 certificate object to add to the chain :return: None """ if not isinstance(certobj, X509): raise TypeError('certobj must be an X509 instance') # depends on [control=['if'], data=[]] copy = _lib.X509_dup(certobj._x509) add_result = _lib.SSL_CTX_add_extra_chain_cert(self._context, copy) if not add_result: # TODO: This is untested. _lib.X509_free(copy) _raise_current_error() # depends on [control=['if'], data=[]]
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'response_type') and self.response_type is not None: _dict['response_type'] = self.response_type if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text if hasattr(self, 'time') and self.time is not None: _dict['time'] = self.time if hasattr(self, 'typing') and self.typing is not None: _dict['typing'] = self.typing if hasattr(self, 'source') and self.source is not None: _dict['source'] = self.source if hasattr(self, 'title') and self.title is not None: _dict['title'] = self.title if hasattr(self, 'description') and self.description is not None: _dict['description'] = self.description if hasattr(self, 'preference') and self.preference is not None: _dict['preference'] = self.preference if hasattr(self, 'options') and self.options is not None: _dict['options'] = [x._to_dict() for x in self.options] if hasattr(self, 'message_to_human_agent' ) and self.message_to_human_agent is not None: _dict['message_to_human_agent'] = self.message_to_human_agent if hasattr(self, 'topic') and self.topic is not None: _dict['topic'] = self.topic if hasattr(self, 'dialog_node') and self.dialog_node is not None: _dict['dialog_node'] = self.dialog_node if hasattr(self, 'suggestions') and self.suggestions is not None: _dict['suggestions'] = [x._to_dict() for x in self.suggestions] return _dict
def function[_to_dict, parameter[self]]: constant[Return a json dictionary representing this model.] variable[_dict] assign[=] dictionary[[], []] if <ast.BoolOp object at 0x7da1b2347af0> begin[:] call[name[_dict]][constant[response_type]] assign[=] name[self].response_type if <ast.BoolOp object at 0x7da1b2347100> begin[:] call[name[_dict]][constant[text]] assign[=] name[self].text if <ast.BoolOp object at 0x7da1b2345f30> begin[:] call[name[_dict]][constant[time]] assign[=] name[self].time if <ast.BoolOp object at 0x7da1b2344df0> begin[:] call[name[_dict]][constant[typing]] assign[=] name[self].typing if <ast.BoolOp object at 0x7da1b2346a10> begin[:] call[name[_dict]][constant[source]] assign[=] name[self].source if <ast.BoolOp object at 0x7da1b2344160> begin[:] call[name[_dict]][constant[title]] assign[=] name[self].title if <ast.BoolOp object at 0x7da1b2347790> begin[:] call[name[_dict]][constant[description]] assign[=] name[self].description if <ast.BoolOp object at 0x7da1b23461d0> begin[:] call[name[_dict]][constant[preference]] assign[=] name[self].preference if <ast.BoolOp object at 0x7da1b2344880> begin[:] call[name[_dict]][constant[options]] assign[=] <ast.ListComp object at 0x7da1b2345330> if <ast.BoolOp object at 0x7da1b2346650> begin[:] call[name[_dict]][constant[message_to_human_agent]] assign[=] name[self].message_to_human_agent if <ast.BoolOp object at 0x7da1b2346ec0> begin[:] call[name[_dict]][constant[topic]] assign[=] name[self].topic if <ast.BoolOp object at 0x7da1b2347880> begin[:] call[name[_dict]][constant[dialog_node]] assign[=] name[self].dialog_node if <ast.BoolOp object at 0x7da1b23449a0> begin[:] call[name[_dict]][constant[suggestions]] assign[=] <ast.ListComp object at 0x7da1b23473d0> return[name[_dict]]
keyword[def] identifier[_to_dict] ( identifier[self] ): literal[string] identifier[_dict] ={} keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[response_type] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[response_type] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[text] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[text] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[time] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[time] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[typing] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[typing] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[source] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[source] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[title] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[title] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[description] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[description] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[preference] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[preference] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[options] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]=[ identifier[x] . identifier[_to_dict] () keyword[for] identifier[x] keyword[in] identifier[self] . identifier[options] ] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[message_to_human_agent] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[message_to_human_agent] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[topic] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[topic] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[dialog_node] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[dialog_node] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[suggestions] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]=[ identifier[x] . identifier[_to_dict] () keyword[for] identifier[x] keyword[in] identifier[self] . identifier[suggestions] ] keyword[return] identifier[_dict]
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'response_type') and self.response_type is not None: _dict['response_type'] = self.response_type # depends on [control=['if'], data=[]] if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text # depends on [control=['if'], data=[]] if hasattr(self, 'time') and self.time is not None: _dict['time'] = self.time # depends on [control=['if'], data=[]] if hasattr(self, 'typing') and self.typing is not None: _dict['typing'] = self.typing # depends on [control=['if'], data=[]] if hasattr(self, 'source') and self.source is not None: _dict['source'] = self.source # depends on [control=['if'], data=[]] if hasattr(self, 'title') and self.title is not None: _dict['title'] = self.title # depends on [control=['if'], data=[]] if hasattr(self, 'description') and self.description is not None: _dict['description'] = self.description # depends on [control=['if'], data=[]] if hasattr(self, 'preference') and self.preference is not None: _dict['preference'] = self.preference # depends on [control=['if'], data=[]] if hasattr(self, 'options') and self.options is not None: _dict['options'] = [x._to_dict() for x in self.options] # depends on [control=['if'], data=[]] if hasattr(self, 'message_to_human_agent') and self.message_to_human_agent is not None: _dict['message_to_human_agent'] = self.message_to_human_agent # depends on [control=['if'], data=[]] if hasattr(self, 'topic') and self.topic is not None: _dict['topic'] = self.topic # depends on [control=['if'], data=[]] if hasattr(self, 'dialog_node') and self.dialog_node is not None: _dict['dialog_node'] = self.dialog_node # depends on [control=['if'], data=[]] if hasattr(self, 'suggestions') and self.suggestions is not None: _dict['suggestions'] = [x._to_dict() for x in self.suggestions] # depends on [control=['if'], data=[]] return _dict
def update_H(self, mean_field, l): """Updates the spin hamiltonian and recalculates its eigenbasis""" self.H_s = self.spin_hamiltonian(mean_field, l) try: self.eig_energies, self.eig_states = diagonalize(self.H_s) except np.linalg.linalg.LinAlgError: np.savez('errorhamil', H=self.H_s, fiel=mean_field, lamb=l) raise except ValueError: np.savez('errorhamil', H=self.H_s, fiel=mean_field, lamb=l) print(mean_field, l) raise
def function[update_H, parameter[self, mean_field, l]]: constant[Updates the spin hamiltonian and recalculates its eigenbasis] name[self].H_s assign[=] call[name[self].spin_hamiltonian, parameter[name[mean_field], name[l]]] <ast.Try object at 0x7da18fe91bd0>
keyword[def] identifier[update_H] ( identifier[self] , identifier[mean_field] , identifier[l] ): literal[string] identifier[self] . identifier[H_s] = identifier[self] . identifier[spin_hamiltonian] ( identifier[mean_field] , identifier[l] ) keyword[try] : identifier[self] . identifier[eig_energies] , identifier[self] . identifier[eig_states] = identifier[diagonalize] ( identifier[self] . identifier[H_s] ) keyword[except] identifier[np] . identifier[linalg] . identifier[linalg] . identifier[LinAlgError] : identifier[np] . identifier[savez] ( literal[string] , identifier[H] = identifier[self] . identifier[H_s] , identifier[fiel] = identifier[mean_field] , identifier[lamb] = identifier[l] ) keyword[raise] keyword[except] identifier[ValueError] : identifier[np] . identifier[savez] ( literal[string] , identifier[H] = identifier[self] . identifier[H_s] , identifier[fiel] = identifier[mean_field] , identifier[lamb] = identifier[l] ) identifier[print] ( identifier[mean_field] , identifier[l] ) keyword[raise]
def update_H(self, mean_field, l): """Updates the spin hamiltonian and recalculates its eigenbasis""" self.H_s = self.spin_hamiltonian(mean_field, l) try: (self.eig_energies, self.eig_states) = diagonalize(self.H_s) # depends on [control=['try'], data=[]] except np.linalg.linalg.LinAlgError: np.savez('errorhamil', H=self.H_s, fiel=mean_field, lamb=l) raise # depends on [control=['except'], data=[]] except ValueError: np.savez('errorhamil', H=self.H_s, fiel=mean_field, lamb=l) print(mean_field, l) raise # depends on [control=['except'], data=[]]
def convert_environment(datadir, version, always_yes): """ Converts an environment TO the version specified by `version`. :param datadir: The datadir to convert. :param version: The version to convert TO. :param always_yes: True if the user shouldn't be prompted about the migration. """ # Since we don't call either load() or new() we have to call require_images ourselves. require_images() inp = None old_version = _get_current_format(datadir) migration_func = migrations[(old_version, version)] if version > CURRENT_FORMAT_VERSION: raise DatacatsError('Cannot migrate to a version higher than the ' 'current one.') if version < 1: raise DatacatsError('Datadir versioning starts at 1.') if not always_yes: while inp != 'y' and inp != 'n': inp = raw_input(migration_func.__doc__.format(version)) if inp == 'n': sys.exit(1) lockfile = LockFile(path_join(datadir, '.migration_lock')) lockfile.acquire() try: # FIXME: If we wanted to, we could find a set of conversions which # would bring us up to the one we want if there's no direct path. # This isn't necessary with just two formats, but it may be useful # at 3. # Call the appropriate conversion function migration_func(datadir) finally: lockfile.release()
def function[convert_environment, parameter[datadir, version, always_yes]]: constant[ Converts an environment TO the version specified by `version`. :param datadir: The datadir to convert. :param version: The version to convert TO. :param always_yes: True if the user shouldn't be prompted about the migration. ] call[name[require_images], parameter[]] variable[inp] assign[=] constant[None] variable[old_version] assign[=] call[name[_get_current_format], parameter[name[datadir]]] variable[migration_func] assign[=] call[name[migrations]][tuple[[<ast.Name object at 0x7da1b26adb40>, <ast.Name object at 0x7da1b26acfa0>]]] if compare[name[version] greater[>] name[CURRENT_FORMAT_VERSION]] begin[:] <ast.Raise object at 0x7da1b26af5b0> if compare[name[version] less[<] constant[1]] begin[:] <ast.Raise object at 0x7da1b26ae290> if <ast.UnaryOp object at 0x7da1b26afd00> begin[:] while <ast.BoolOp object at 0x7da1b26acd60> begin[:] variable[inp] assign[=] call[name[raw_input], parameter[call[name[migration_func].__doc__.format, parameter[name[version]]]]] if compare[name[inp] equal[==] constant[n]] begin[:] call[name[sys].exit, parameter[constant[1]]] variable[lockfile] assign[=] call[name[LockFile], parameter[call[name[path_join], parameter[name[datadir], constant[.migration_lock]]]]] call[name[lockfile].acquire, parameter[]] <ast.Try object at 0x7da1b26adcc0>
keyword[def] identifier[convert_environment] ( identifier[datadir] , identifier[version] , identifier[always_yes] ): literal[string] identifier[require_images] () identifier[inp] = keyword[None] identifier[old_version] = identifier[_get_current_format] ( identifier[datadir] ) identifier[migration_func] = identifier[migrations] [( identifier[old_version] , identifier[version] )] keyword[if] identifier[version] > identifier[CURRENT_FORMAT_VERSION] : keyword[raise] identifier[DatacatsError] ( literal[string] literal[string] ) keyword[if] identifier[version] < literal[int] : keyword[raise] identifier[DatacatsError] ( literal[string] ) keyword[if] keyword[not] identifier[always_yes] : keyword[while] identifier[inp] != literal[string] keyword[and] identifier[inp] != literal[string] : identifier[inp] = identifier[raw_input] ( identifier[migration_func] . identifier[__doc__] . identifier[format] ( identifier[version] )) keyword[if] identifier[inp] == literal[string] : identifier[sys] . identifier[exit] ( literal[int] ) identifier[lockfile] = identifier[LockFile] ( identifier[path_join] ( identifier[datadir] , literal[string] )) identifier[lockfile] . identifier[acquire] () keyword[try] : identifier[migration_func] ( identifier[datadir] ) keyword[finally] : identifier[lockfile] . identifier[release] ()
def convert_environment(datadir, version, always_yes): """ Converts an environment TO the version specified by `version`. :param datadir: The datadir to convert. :param version: The version to convert TO. :param always_yes: True if the user shouldn't be prompted about the migration. """ # Since we don't call either load() or new() we have to call require_images ourselves. require_images() inp = None old_version = _get_current_format(datadir) migration_func = migrations[old_version, version] if version > CURRENT_FORMAT_VERSION: raise DatacatsError('Cannot migrate to a version higher than the current one.') # depends on [control=['if'], data=[]] if version < 1: raise DatacatsError('Datadir versioning starts at 1.') # depends on [control=['if'], data=[]] if not always_yes: while inp != 'y' and inp != 'n': inp = raw_input(migration_func.__doc__.format(version)) # depends on [control=['while'], data=[]] if inp == 'n': sys.exit(1) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] lockfile = LockFile(path_join(datadir, '.migration_lock')) lockfile.acquire() try: # FIXME: If we wanted to, we could find a set of conversions which # would bring us up to the one we want if there's no direct path. # This isn't necessary with just two formats, but it may be useful # at 3. # Call the appropriate conversion function migration_func(datadir) # depends on [control=['try'], data=[]] finally: lockfile.release()
def obj(self): """ Returns passed object but if chain method is used returns the last processed result """ if self._wrapped is not self.Null: return self._wrapped else: return self.object
def function[obj, parameter[self]]: constant[ Returns passed object but if chain method is used returns the last processed result ] if compare[name[self]._wrapped is_not name[self].Null] begin[:] return[name[self]._wrapped]
keyword[def] identifier[obj] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_wrapped] keyword[is] keyword[not] identifier[self] . identifier[Null] : keyword[return] identifier[self] . identifier[_wrapped] keyword[else] : keyword[return] identifier[self] . identifier[object]
def obj(self): """ Returns passed object but if chain method is used returns the last processed result """ if self._wrapped is not self.Null: return self._wrapped # depends on [control=['if'], data=[]] else: return self.object
def libvlc_audio_set_format_callbacks(mp, setup, cleanup): '''Set decoded audio format. This only works in combination with L{libvlc_audio_set_callbacks}(). @param mp: the media player. @param setup: callback to select the audio format (cannot be NULL). @param cleanup: callback to release any allocated resources (or NULL). @version: LibVLC 2.0.0 or later. ''' f = _Cfunctions.get('libvlc_audio_set_format_callbacks', None) or \ _Cfunction('libvlc_audio_set_format_callbacks', ((1,), (1,), (1,),), None, None, MediaPlayer, AudioSetupCb, AudioCleanupCb) return f(mp, setup, cleanup)
def function[libvlc_audio_set_format_callbacks, parameter[mp, setup, cleanup]]: constant[Set decoded audio format. This only works in combination with L{libvlc_audio_set_callbacks}(). @param mp: the media player. @param setup: callback to select the audio format (cannot be NULL). @param cleanup: callback to release any allocated resources (or NULL). @version: LibVLC 2.0.0 or later. ] variable[f] assign[=] <ast.BoolOp object at 0x7da1b1600670> return[call[name[f], parameter[name[mp], name[setup], name[cleanup]]]]
keyword[def] identifier[libvlc_audio_set_format_callbacks] ( identifier[mp] , identifier[setup] , identifier[cleanup] ): literal[string] identifier[f] = identifier[_Cfunctions] . identifier[get] ( literal[string] , keyword[None] ) keyword[or] identifier[_Cfunction] ( literal[string] ,(( literal[int] ,),( literal[int] ,),( literal[int] ,),), keyword[None] , keyword[None] , identifier[MediaPlayer] , identifier[AudioSetupCb] , identifier[AudioCleanupCb] ) keyword[return] identifier[f] ( identifier[mp] , identifier[setup] , identifier[cleanup] )
def libvlc_audio_set_format_callbacks(mp, setup, cleanup): """Set decoded audio format. This only works in combination with L{libvlc_audio_set_callbacks}(). @param mp: the media player. @param setup: callback to select the audio format (cannot be NULL). @param cleanup: callback to release any allocated resources (or NULL). @version: LibVLC 2.0.0 or later. """ f = _Cfunctions.get('libvlc_audio_set_format_callbacks', None) or _Cfunction('libvlc_audio_set_format_callbacks', ((1,), (1,), (1,)), None, None, MediaPlayer, AudioSetupCb, AudioCleanupCb) return f(mp, setup, cleanup)
def path_exists(path): """ Check if file exists either remote or local. Parameters: ----------- path : path to file Returns: -------- exists : bool """ if path.startswith(("http://", "https://")): try: urlopen(path).info() return True except HTTPError as e: if e.code == 404: return False else: raise elif path.startswith("s3://"): bucket = get_boto3_bucket(path.split("/")[2]) key = "/".join(path.split("/")[3:]) for obj in bucket.objects.filter(Prefix=key): if obj.key == key: return True else: return False else: logger.debug("%s exists: %s", path, os.path.exists(path)) return os.path.exists(path)
def function[path_exists, parameter[path]]: constant[ Check if file exists either remote or local. Parameters: ----------- path : path to file Returns: -------- exists : bool ] if call[name[path].startswith, parameter[tuple[[<ast.Constant object at 0x7da1b0016c50>, <ast.Constant object at 0x7da1b00142b0>]]]] begin[:] <ast.Try object at 0x7da1b00150c0>
keyword[def] identifier[path_exists] ( identifier[path] ): literal[string] keyword[if] identifier[path] . identifier[startswith] (( literal[string] , literal[string] )): keyword[try] : identifier[urlopen] ( identifier[path] ). identifier[info] () keyword[return] keyword[True] keyword[except] identifier[HTTPError] keyword[as] identifier[e] : keyword[if] identifier[e] . identifier[code] == literal[int] : keyword[return] keyword[False] keyword[else] : keyword[raise] keyword[elif] identifier[path] . identifier[startswith] ( literal[string] ): identifier[bucket] = identifier[get_boto3_bucket] ( identifier[path] . identifier[split] ( literal[string] )[ literal[int] ]) identifier[key] = literal[string] . identifier[join] ( identifier[path] . identifier[split] ( literal[string] )[ literal[int] :]) keyword[for] identifier[obj] keyword[in] identifier[bucket] . identifier[objects] . identifier[filter] ( identifier[Prefix] = identifier[key] ): keyword[if] identifier[obj] . identifier[key] == identifier[key] : keyword[return] keyword[True] keyword[else] : keyword[return] keyword[False] keyword[else] : identifier[logger] . identifier[debug] ( literal[string] , identifier[path] , identifier[os] . identifier[path] . identifier[exists] ( identifier[path] )) keyword[return] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] )
def path_exists(path): """ Check if file exists either remote or local. Parameters: ----------- path : path to file Returns: -------- exists : bool """ if path.startswith(('http://', 'https://')): try: urlopen(path).info() return True # depends on [control=['try'], data=[]] except HTTPError as e: if e.code == 404: return False # depends on [control=['if'], data=[]] else: raise # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] elif path.startswith('s3://'): bucket = get_boto3_bucket(path.split('/')[2]) key = '/'.join(path.split('/')[3:]) for obj in bucket.objects.filter(Prefix=key): if obj.key == key: return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['obj']] else: return False # depends on [control=['if'], data=[]] else: logger.debug('%s exists: %s', path, os.path.exists(path)) return os.path.exists(path)
def bulk_update(manager, model_objs, fields_to_update): """ Bulk updates a list of model objects that are already saved. :type model_objs: list of :class:`Models<django:django.db.models.Model>` :param model_objs: A list of model objects that have been updated. fields_to_update: A list of fields to be updated. Only these fields will be updated :signals: Emits a post_bulk_operation signal when completed. Examples: .. code-block:: python # Create a couple test models model_obj1 = TestModel.objects.create(int_field=1, float_field=2.0, char_field='Hi') model_obj2 = TestModel.objects.create(int_field=3, float_field=4.0, char_field='Hello') # Change their fields and do a bulk update model_obj1.int_field = 10 model_obj1.float_field = 20.0 model_obj2.int_field = 30 model_obj2.float_field = 40.0 bulk_update(TestModel.objects, [model_obj1, model_obj2], ['int_field', 'float_field']) # Reload the models and view their changes model_obj1 = TestModel.objects.get(id=model_obj1.id) print(model_obj1.int_field, model_obj1.float_field) 10, 20.0 model_obj2 = TestModel.objects.get(id=model_obj2.id) print(model_obj2.int_field, model_obj2.float_field) 10, 20.0 """ # Add the pk to the value fields so we can join value_fields = [manager.model._meta.pk.attname] + fields_to_update # Build the row values row_values = [ [_get_prepped_model_field(model_obj, field_name) for field_name in value_fields] for model_obj in model_objs ] # If we do not have any values or fields to update just return if len(row_values) == 0 or len(fields_to_update) == 0: return # Create a map of db types db_types = [ manager.model._meta.get_field(field).db_type(connection) for field in value_fields ] # Build the value fields sql value_fields_sql = ', '.join( '"{field}"'.format(field=manager.model._meta.get_field(field).column) for field in value_fields ) # Build the set sql update_fields_sql = ', '.join([ '"{field}" = "new_values"."{field}"'.format( field=manager.model._meta.get_field(field).column ) for field in fields_to_update ]) # Build the values sql values_sql = ', '.join([ '({0})'.format( ', '.join([ '%s::{0}'.format( db_types[i] ) if not row_number and i else '%s' for i, _ in enumerate(row) ]) ) for row_number, row in enumerate(row_values) ]) # Start building the query update_sql = ( 'UPDATE {table} ' 'SET {update_fields_sql} ' 'FROM (VALUES {values_sql}) AS new_values ({value_fields_sql}) ' 'WHERE "{table}"."{pk_field}" = "new_values"."{pk_field}"' ).format( table=manager.model._meta.db_table, pk_field=manager.model._meta.pk.column, update_fields_sql=update_fields_sql, values_sql=values_sql, value_fields_sql=value_fields_sql ) # Combine all the row values update_sql_params = list(itertools.chain(*row_values)) # Run the update query with connection.cursor() as cursor: cursor.execute(update_sql, update_sql_params) # call the bulk operation signal post_bulk_operation.send(sender=manager.model, model=manager.model)
def function[bulk_update, parameter[manager, model_objs, fields_to_update]]: constant[ Bulk updates a list of model objects that are already saved. :type model_objs: list of :class:`Models<django:django.db.models.Model>` :param model_objs: A list of model objects that have been updated. fields_to_update: A list of fields to be updated. Only these fields will be updated :signals: Emits a post_bulk_operation signal when completed. Examples: .. code-block:: python # Create a couple test models model_obj1 = TestModel.objects.create(int_field=1, float_field=2.0, char_field='Hi') model_obj2 = TestModel.objects.create(int_field=3, float_field=4.0, char_field='Hello') # Change their fields and do a bulk update model_obj1.int_field = 10 model_obj1.float_field = 20.0 model_obj2.int_field = 30 model_obj2.float_field = 40.0 bulk_update(TestModel.objects, [model_obj1, model_obj2], ['int_field', 'float_field']) # Reload the models and view their changes model_obj1 = TestModel.objects.get(id=model_obj1.id) print(model_obj1.int_field, model_obj1.float_field) 10, 20.0 model_obj2 = TestModel.objects.get(id=model_obj2.id) print(model_obj2.int_field, model_obj2.float_field) 10, 20.0 ] variable[value_fields] assign[=] binary_operation[list[[<ast.Attribute object at 0x7da18dc063e0>]] + name[fields_to_update]] variable[row_values] assign[=] <ast.ListComp object at 0x7da18dc06aa0> if <ast.BoolOp object at 0x7da18dc04100> begin[:] return[None] variable[db_types] assign[=] <ast.ListComp object at 0x7da18dc05360> variable[value_fields_sql] assign[=] call[constant[, ].join, parameter[<ast.GeneratorExp object at 0x7da18dc04790>]] variable[update_fields_sql] assign[=] call[constant[, ].join, parameter[<ast.ListComp object at 0x7da18dc04700>]] variable[values_sql] assign[=] call[constant[, ].join, parameter[<ast.ListComp object at 0x7da18dc07b80>]] variable[update_sql] assign[=] call[constant[UPDATE {table} SET {update_fields_sql} FROM (VALUES {values_sql}) AS new_values ({value_fields_sql}) WHERE "{table}"."{pk_field}" = "new_values"."{pk_field}"].format, parameter[]] variable[update_sql_params] assign[=] call[name[list], parameter[call[name[itertools].chain, parameter[<ast.Starred object at 0x7da18dc070d0>]]]] with call[name[connection].cursor, parameter[]] begin[:] call[name[cursor].execute, parameter[name[update_sql], name[update_sql_params]]] call[name[post_bulk_operation].send, parameter[]]
keyword[def] identifier[bulk_update] ( identifier[manager] , identifier[model_objs] , identifier[fields_to_update] ): literal[string] identifier[value_fields] =[ identifier[manager] . identifier[model] . identifier[_meta] . identifier[pk] . identifier[attname] ]+ identifier[fields_to_update] identifier[row_values] =[ [ identifier[_get_prepped_model_field] ( identifier[model_obj] , identifier[field_name] ) keyword[for] identifier[field_name] keyword[in] identifier[value_fields] ] keyword[for] identifier[model_obj] keyword[in] identifier[model_objs] ] keyword[if] identifier[len] ( identifier[row_values] )== literal[int] keyword[or] identifier[len] ( identifier[fields_to_update] )== literal[int] : keyword[return] identifier[db_types] =[ identifier[manager] . identifier[model] . identifier[_meta] . identifier[get_field] ( identifier[field] ). identifier[db_type] ( identifier[connection] ) keyword[for] identifier[field] keyword[in] identifier[value_fields] ] identifier[value_fields_sql] = literal[string] . identifier[join] ( literal[string] . identifier[format] ( identifier[field] = identifier[manager] . identifier[model] . identifier[_meta] . identifier[get_field] ( identifier[field] ). identifier[column] ) keyword[for] identifier[field] keyword[in] identifier[value_fields] ) identifier[update_fields_sql] = literal[string] . identifier[join] ([ literal[string] . identifier[format] ( identifier[field] = identifier[manager] . identifier[model] . identifier[_meta] . identifier[get_field] ( identifier[field] ). identifier[column] ) keyword[for] identifier[field] keyword[in] identifier[fields_to_update] ]) identifier[values_sql] = literal[string] . identifier[join] ([ literal[string] . identifier[format] ( literal[string] . identifier[join] ([ literal[string] . identifier[format] ( identifier[db_types] [ identifier[i] ] ) keyword[if] keyword[not] identifier[row_number] keyword[and] identifier[i] keyword[else] literal[string] keyword[for] identifier[i] , identifier[_] keyword[in] identifier[enumerate] ( identifier[row] ) ]) ) keyword[for] identifier[row_number] , identifier[row] keyword[in] identifier[enumerate] ( identifier[row_values] ) ]) identifier[update_sql] =( literal[string] literal[string] literal[string] literal[string] ). identifier[format] ( identifier[table] = identifier[manager] . identifier[model] . identifier[_meta] . identifier[db_table] , identifier[pk_field] = identifier[manager] . identifier[model] . identifier[_meta] . identifier[pk] . identifier[column] , identifier[update_fields_sql] = identifier[update_fields_sql] , identifier[values_sql] = identifier[values_sql] , identifier[value_fields_sql] = identifier[value_fields_sql] ) identifier[update_sql_params] = identifier[list] ( identifier[itertools] . identifier[chain] (* identifier[row_values] )) keyword[with] identifier[connection] . identifier[cursor] () keyword[as] identifier[cursor] : identifier[cursor] . identifier[execute] ( identifier[update_sql] , identifier[update_sql_params] ) identifier[post_bulk_operation] . identifier[send] ( identifier[sender] = identifier[manager] . identifier[model] , identifier[model] = identifier[manager] . identifier[model] )
def bulk_update(manager, model_objs, fields_to_update): """ Bulk updates a list of model objects that are already saved. :type model_objs: list of :class:`Models<django:django.db.models.Model>` :param model_objs: A list of model objects that have been updated. fields_to_update: A list of fields to be updated. Only these fields will be updated :signals: Emits a post_bulk_operation signal when completed. Examples: .. code-block:: python # Create a couple test models model_obj1 = TestModel.objects.create(int_field=1, float_field=2.0, char_field='Hi') model_obj2 = TestModel.objects.create(int_field=3, float_field=4.0, char_field='Hello') # Change their fields and do a bulk update model_obj1.int_field = 10 model_obj1.float_field = 20.0 model_obj2.int_field = 30 model_obj2.float_field = 40.0 bulk_update(TestModel.objects, [model_obj1, model_obj2], ['int_field', 'float_field']) # Reload the models and view their changes model_obj1 = TestModel.objects.get(id=model_obj1.id) print(model_obj1.int_field, model_obj1.float_field) 10, 20.0 model_obj2 = TestModel.objects.get(id=model_obj2.id) print(model_obj2.int_field, model_obj2.float_field) 10, 20.0 """ # Add the pk to the value fields so we can join value_fields = [manager.model._meta.pk.attname] + fields_to_update # Build the row values row_values = [[_get_prepped_model_field(model_obj, field_name) for field_name in value_fields] for model_obj in model_objs] # If we do not have any values or fields to update just return if len(row_values) == 0 or len(fields_to_update) == 0: return # depends on [control=['if'], data=[]] # Create a map of db types db_types = [manager.model._meta.get_field(field).db_type(connection) for field in value_fields] # Build the value fields sql value_fields_sql = ', '.join(('"{field}"'.format(field=manager.model._meta.get_field(field).column) for field in value_fields)) # Build the set sql update_fields_sql = ', '.join(['"{field}" = "new_values"."{field}"'.format(field=manager.model._meta.get_field(field).column) for field in fields_to_update]) # Build the values sql values_sql = ', '.join(['({0})'.format(', '.join(['%s::{0}'.format(db_types[i]) if not row_number and i else '%s' for (i, _) in enumerate(row)])) for (row_number, row) in enumerate(row_values)]) # Start building the query update_sql = 'UPDATE {table} SET {update_fields_sql} FROM (VALUES {values_sql}) AS new_values ({value_fields_sql}) WHERE "{table}"."{pk_field}" = "new_values"."{pk_field}"'.format(table=manager.model._meta.db_table, pk_field=manager.model._meta.pk.column, update_fields_sql=update_fields_sql, values_sql=values_sql, value_fields_sql=value_fields_sql) # Combine all the row values update_sql_params = list(itertools.chain(*row_values)) # Run the update query with connection.cursor() as cursor: cursor.execute(update_sql, update_sql_params) # depends on [control=['with'], data=['cursor']] # call the bulk operation signal post_bulk_operation.send(sender=manager.model, model=manager.model)
def delete_all(self, criteria: Q = None): """Delete the dictionary object by its criteria""" if criteria: # Delete the object from the dictionary and return the deletion count items = self._filter(criteria, self.conn['data'][self.schema_name]) # Delete all the matching identifiers with self.conn['lock']: for identifier in items: self.conn['data'][self.schema_name].pop(identifier, None) return len(items) else: with self.conn['lock']: if self.schema_name in self.conn['data']: del self.conn['data'][self.schema_name]
def function[delete_all, parameter[self, criteria]]: constant[Delete the dictionary object by its criteria] if name[criteria] begin[:] variable[items] assign[=] call[name[self]._filter, parameter[name[criteria], call[call[name[self].conn][constant[data]]][name[self].schema_name]]] with call[name[self].conn][constant[lock]] begin[:] for taget[name[identifier]] in starred[name[items]] begin[:] call[call[call[name[self].conn][constant[data]]][name[self].schema_name].pop, parameter[name[identifier], constant[None]]] return[call[name[len], parameter[name[items]]]]
keyword[def] identifier[delete_all] ( identifier[self] , identifier[criteria] : identifier[Q] = keyword[None] ): literal[string] keyword[if] identifier[criteria] : identifier[items] = identifier[self] . identifier[_filter] ( identifier[criteria] , identifier[self] . identifier[conn] [ literal[string] ][ identifier[self] . identifier[schema_name] ]) keyword[with] identifier[self] . identifier[conn] [ literal[string] ]: keyword[for] identifier[identifier] keyword[in] identifier[items] : identifier[self] . identifier[conn] [ literal[string] ][ identifier[self] . identifier[schema_name] ]. identifier[pop] ( identifier[identifier] , keyword[None] ) keyword[return] identifier[len] ( identifier[items] ) keyword[else] : keyword[with] identifier[self] . identifier[conn] [ literal[string] ]: keyword[if] identifier[self] . identifier[schema_name] keyword[in] identifier[self] . identifier[conn] [ literal[string] ]: keyword[del] identifier[self] . identifier[conn] [ literal[string] ][ identifier[self] . identifier[schema_name] ]
def delete_all(self, criteria: Q=None): """Delete the dictionary object by its criteria""" if criteria: # Delete the object from the dictionary and return the deletion count items = self._filter(criteria, self.conn['data'][self.schema_name]) # Delete all the matching identifiers with self.conn['lock']: for identifier in items: self.conn['data'][self.schema_name].pop(identifier, None) # depends on [control=['for'], data=['identifier']] # depends on [control=['with'], data=[]] return len(items) # depends on [control=['if'], data=[]] else: with self.conn['lock']: if self.schema_name in self.conn['data']: del self.conn['data'][self.schema_name] # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
def convert_to_duckling_language_id(cls, lang): """Ensure a language identifier has the correct duckling format and is supported.""" if lang is not None and cls.is_supported(lang): return lang elif lang is not None and cls.is_supported(lang + "$core"): # Support ISO 639-1 Language Codes (e.g. "en") return lang + "$core" else: raise ValueError("Unsupported language '{}'. Supported languages: {}".format( lang, ", ".join(cls.SUPPORTED_LANGUAGES)))
def function[convert_to_duckling_language_id, parameter[cls, lang]]: constant[Ensure a language identifier has the correct duckling format and is supported.] if <ast.BoolOp object at 0x7da2043450c0> begin[:] return[name[lang]]
keyword[def] identifier[convert_to_duckling_language_id] ( identifier[cls] , identifier[lang] ): literal[string] keyword[if] identifier[lang] keyword[is] keyword[not] keyword[None] keyword[and] identifier[cls] . identifier[is_supported] ( identifier[lang] ): keyword[return] identifier[lang] keyword[elif] identifier[lang] keyword[is] keyword[not] keyword[None] keyword[and] identifier[cls] . identifier[is_supported] ( identifier[lang] + literal[string] ): keyword[return] identifier[lang] + literal[string] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[lang] , literal[string] . identifier[join] ( identifier[cls] . identifier[SUPPORTED_LANGUAGES] )))
def convert_to_duckling_language_id(cls, lang): """Ensure a language identifier has the correct duckling format and is supported.""" if lang is not None and cls.is_supported(lang): return lang # depends on [control=['if'], data=[]] elif lang is not None and cls.is_supported(lang + '$core'): # Support ISO 639-1 Language Codes (e.g. "en") return lang + '$core' # depends on [control=['if'], data=[]] else: raise ValueError("Unsupported language '{}'. Supported languages: {}".format(lang, ', '.join(cls.SUPPORTED_LANGUAGES)))
def MultifactorSchedule(history=None, factors="constant * linear_warmup * rsqrt_decay", constant=0.1, warmup_steps=100, decay_factor=0.5, steps_per_decay=20000): """Factor-based learning rate schedule. Interprets factors in the factors string which can consist of: * constant: interpreted as the constant value, * linear_warmup: interpreted as linear warmup until warmup_steps, * rsqrt_decay: divide by square root of max(step, warmup_steps) * decay_every: Every k steps decay the learning rate by decay_factor. Args: history: the history of training and evaluation (History object). factors: a string with factors separated by "*" that defines the schedule. constant: float, the starting constant for the learning rate schedule. warmup_steps: how many steps to warm up for in the warmup schedule. decay_factor: The amount to decay the learning rate by. steps_per_decay: How often to decay the learning rate. Returns: a function learning_rate(step): float -> float, the step-dependent lr. """ del history cache_args = (factors, constant, warmup_steps) if cache_args in _memoized_multifactor_schedules: return _memoized_multifactor_schedules[cache_args] factors = [n.strip() for n in factors.split("*")] def learning_rate(step): # pylint: disable=invalid-name """Step to learning rate function.""" ret = 1.0 for name in factors: if name == "constant": ret *= constant elif name == "linear_warmup": ret *= np.minimum(1.0, step / warmup_steps) elif name == "rsqrt_decay": ret /= np.sqrt(np.maximum(step, warmup_steps)) elif name == "decay_every": ret *= (decay_factor ** (step//steps_per_decay)) else: raise ValueError("Unknown factor %s." % name) return ret _memoized_multifactor_schedules[cache_args] = learning_rate return learning_rate
def function[MultifactorSchedule, parameter[history, factors, constant, warmup_steps, decay_factor, steps_per_decay]]: constant[Factor-based learning rate schedule. Interprets factors in the factors string which can consist of: * constant: interpreted as the constant value, * linear_warmup: interpreted as linear warmup until warmup_steps, * rsqrt_decay: divide by square root of max(step, warmup_steps) * decay_every: Every k steps decay the learning rate by decay_factor. Args: history: the history of training and evaluation (History object). factors: a string with factors separated by "*" that defines the schedule. constant: float, the starting constant for the learning rate schedule. warmup_steps: how many steps to warm up for in the warmup schedule. decay_factor: The amount to decay the learning rate by. steps_per_decay: How often to decay the learning rate. Returns: a function learning_rate(step): float -> float, the step-dependent lr. ] <ast.Delete object at 0x7da1b203f6d0> variable[cache_args] assign[=] tuple[[<ast.Name object at 0x7da1b203f1c0>, <ast.Name object at 0x7da1b203eb90>, <ast.Name object at 0x7da1b203fb50>]] if compare[name[cache_args] in name[_memoized_multifactor_schedules]] begin[:] return[call[name[_memoized_multifactor_schedules]][name[cache_args]]] variable[factors] assign[=] <ast.ListComp object at 0x7da1b203f010> def function[learning_rate, parameter[step]]: constant[Step to learning rate function.] variable[ret] assign[=] constant[1.0] for taget[name[name]] in starred[name[factors]] begin[:] if compare[name[name] equal[==] constant[constant]] begin[:] <ast.AugAssign object at 0x7da1b203d0c0> return[name[ret]] call[name[_memoized_multifactor_schedules]][name[cache_args]] assign[=] name[learning_rate] return[name[learning_rate]]
keyword[def] identifier[MultifactorSchedule] ( identifier[history] = keyword[None] , identifier[factors] = literal[string] , identifier[constant] = literal[int] , identifier[warmup_steps] = literal[int] , identifier[decay_factor] = literal[int] , identifier[steps_per_decay] = literal[int] ): literal[string] keyword[del] identifier[history] identifier[cache_args] =( identifier[factors] , identifier[constant] , identifier[warmup_steps] ) keyword[if] identifier[cache_args] keyword[in] identifier[_memoized_multifactor_schedules] : keyword[return] identifier[_memoized_multifactor_schedules] [ identifier[cache_args] ] identifier[factors] =[ identifier[n] . identifier[strip] () keyword[for] identifier[n] keyword[in] identifier[factors] . identifier[split] ( literal[string] )] keyword[def] identifier[learning_rate] ( identifier[step] ): literal[string] identifier[ret] = literal[int] keyword[for] identifier[name] keyword[in] identifier[factors] : keyword[if] identifier[name] == literal[string] : identifier[ret] *= identifier[constant] keyword[elif] identifier[name] == literal[string] : identifier[ret] *= identifier[np] . identifier[minimum] ( literal[int] , identifier[step] / identifier[warmup_steps] ) keyword[elif] identifier[name] == literal[string] : identifier[ret] /= identifier[np] . identifier[sqrt] ( identifier[np] . identifier[maximum] ( identifier[step] , identifier[warmup_steps] )) keyword[elif] identifier[name] == literal[string] : identifier[ret] *=( identifier[decay_factor] **( identifier[step] // identifier[steps_per_decay] )) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[name] ) keyword[return] identifier[ret] identifier[_memoized_multifactor_schedules] [ identifier[cache_args] ]= identifier[learning_rate] keyword[return] identifier[learning_rate]
def MultifactorSchedule(history=None, factors='constant * linear_warmup * rsqrt_decay', constant=0.1, warmup_steps=100, decay_factor=0.5, steps_per_decay=20000): """Factor-based learning rate schedule. Interprets factors in the factors string which can consist of: * constant: interpreted as the constant value, * linear_warmup: interpreted as linear warmup until warmup_steps, * rsqrt_decay: divide by square root of max(step, warmup_steps) * decay_every: Every k steps decay the learning rate by decay_factor. Args: history: the history of training and evaluation (History object). factors: a string with factors separated by "*" that defines the schedule. constant: float, the starting constant for the learning rate schedule. warmup_steps: how many steps to warm up for in the warmup schedule. decay_factor: The amount to decay the learning rate by. steps_per_decay: How often to decay the learning rate. Returns: a function learning_rate(step): float -> float, the step-dependent lr. """ del history cache_args = (factors, constant, warmup_steps) if cache_args in _memoized_multifactor_schedules: return _memoized_multifactor_schedules[cache_args] # depends on [control=['if'], data=['cache_args', '_memoized_multifactor_schedules']] factors = [n.strip() for n in factors.split('*')] def learning_rate(step): # pylint: disable=invalid-name 'Step to learning rate function.' ret = 1.0 for name in factors: if name == 'constant': ret *= constant # depends on [control=['if'], data=[]] elif name == 'linear_warmup': ret *= np.minimum(1.0, step / warmup_steps) # depends on [control=['if'], data=[]] elif name == 'rsqrt_decay': ret /= np.sqrt(np.maximum(step, warmup_steps)) # depends on [control=['if'], data=[]] elif name == 'decay_every': ret *= decay_factor ** (step // steps_per_decay) # depends on [control=['if'], data=[]] else: raise ValueError('Unknown factor %s.' % name) # depends on [control=['for'], data=['name']] return ret _memoized_multifactor_schedules[cache_args] = learning_rate return learning_rate
def _create(archive, compression, cmd, format, verbosity, filenames): """Create an LZMA or XZ archive with the lzma Python module.""" if len(filenames) > 1: raise util.PatoolError('multi-file compression not supported in Python lzma') try: with lzma.LZMAFile(archive, mode='wb', **_get_lzma_options(format, preset=9)) as lzmafile: filename = filenames[0] with open(filename, 'rb') as srcfile: data = srcfile.read(READ_SIZE_BYTES) while data: lzmafile.write(data) data = srcfile.read(READ_SIZE_BYTES) except Exception as err: msg = "error creating %s: %s" % (archive, err) raise util.PatoolError(msg) return None
def function[_create, parameter[archive, compression, cmd, format, verbosity, filenames]]: constant[Create an LZMA or XZ archive with the lzma Python module.] if compare[call[name[len], parameter[name[filenames]]] greater[>] constant[1]] begin[:] <ast.Raise object at 0x7da1b07ac3d0> <ast.Try object at 0x7da1b07ae6e0> return[constant[None]]
keyword[def] identifier[_create] ( identifier[archive] , identifier[compression] , identifier[cmd] , identifier[format] , identifier[verbosity] , identifier[filenames] ): literal[string] keyword[if] identifier[len] ( identifier[filenames] )> literal[int] : keyword[raise] identifier[util] . identifier[PatoolError] ( literal[string] ) keyword[try] : keyword[with] identifier[lzma] . identifier[LZMAFile] ( identifier[archive] , identifier[mode] = literal[string] ,** identifier[_get_lzma_options] ( identifier[format] , identifier[preset] = literal[int] )) keyword[as] identifier[lzmafile] : identifier[filename] = identifier[filenames] [ literal[int] ] keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[srcfile] : identifier[data] = identifier[srcfile] . identifier[read] ( identifier[READ_SIZE_BYTES] ) keyword[while] identifier[data] : identifier[lzmafile] . identifier[write] ( identifier[data] ) identifier[data] = identifier[srcfile] . identifier[read] ( identifier[READ_SIZE_BYTES] ) keyword[except] identifier[Exception] keyword[as] identifier[err] : identifier[msg] = literal[string] %( identifier[archive] , identifier[err] ) keyword[raise] identifier[util] . identifier[PatoolError] ( identifier[msg] ) keyword[return] keyword[None]
def _create(archive, compression, cmd, format, verbosity, filenames): """Create an LZMA or XZ archive with the lzma Python module.""" if len(filenames) > 1: raise util.PatoolError('multi-file compression not supported in Python lzma') # depends on [control=['if'], data=[]] try: with lzma.LZMAFile(archive, mode='wb', **_get_lzma_options(format, preset=9)) as lzmafile: filename = filenames[0] with open(filename, 'rb') as srcfile: data = srcfile.read(READ_SIZE_BYTES) while data: lzmafile.write(data) data = srcfile.read(READ_SIZE_BYTES) # depends on [control=['while'], data=[]] # depends on [control=['with'], data=['srcfile']] # depends on [control=['with'], data=['lzmafile']] # depends on [control=['try'], data=[]] except Exception as err: msg = 'error creating %s: %s' % (archive, err) raise util.PatoolError(msg) # depends on [control=['except'], data=['err']] return None
def wait(self, timeout=None): """Wait for process to terminate and, if process is a children of the current one also return its exit code, else None. """ if timeout is not None and not timeout >= 0: raise ValueError("timeout must be a positive integer") return self._platform_impl.process_wait(timeout)
def function[wait, parameter[self, timeout]]: constant[Wait for process to terminate and, if process is a children of the current one also return its exit code, else None. ] if <ast.BoolOp object at 0x7da1b26aea40> begin[:] <ast.Raise object at 0x7da1b26ae9b0> return[call[name[self]._platform_impl.process_wait, parameter[name[timeout]]]]
keyword[def] identifier[wait] ( identifier[self] , identifier[timeout] = keyword[None] ): literal[string] keyword[if] identifier[timeout] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[timeout] >= literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[return] identifier[self] . identifier[_platform_impl] . identifier[process_wait] ( identifier[timeout] )
def wait(self, timeout=None): """Wait for process to terminate and, if process is a children of the current one also return its exit code, else None. """ if timeout is not None and (not timeout >= 0): raise ValueError('timeout must be a positive integer') # depends on [control=['if'], data=[]] return self._platform_impl.process_wait(timeout)
def get_end_cursor(self, project_name, logstore_name, shard_id): """ Get end cursor from log service for batch pull logs Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type shard_id: int :param shard_id: the shard id :return: GetLogsResponse :raise: LogException """ return self.get_cursor(project_name, logstore_name, shard_id, "end")
def function[get_end_cursor, parameter[self, project_name, logstore_name, shard_id]]: constant[ Get end cursor from log service for batch pull logs Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type shard_id: int :param shard_id: the shard id :return: GetLogsResponse :raise: LogException ] return[call[name[self].get_cursor, parameter[name[project_name], name[logstore_name], name[shard_id], constant[end]]]]
keyword[def] identifier[get_end_cursor] ( identifier[self] , identifier[project_name] , identifier[logstore_name] , identifier[shard_id] ): literal[string] keyword[return] identifier[self] . identifier[get_cursor] ( identifier[project_name] , identifier[logstore_name] , identifier[shard_id] , literal[string] )
def get_end_cursor(self, project_name, logstore_name, shard_id): """ Get end cursor from log service for batch pull logs Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type shard_id: int :param shard_id: the shard id :return: GetLogsResponse :raise: LogException """ return self.get_cursor(project_name, logstore_name, shard_id, 'end')
def uninitialize_ui(self): """ Uninitializes the Component ui. :return: Method success. :rtype: bool """ LOGGER.debug("> Uninitializing '{0}' Component ui.".format(self.__class__.__name__)) # Signals / Slots. self.Port_spinBox.valueChanged.disconnect(self.__Port_spinBox__valueChanged) self.Autostart_TCP_Server_checkBox.stateChanged.disconnect( self.__Autostart_TCP_Server_checkBox__stateChanged) self.Start_TCP_Server_pushButton.clicked.disconnect(self.__Start_TCP_Server_pushButton__clicked) self.Stop_TCP_Server_pushButton.clicked.disconnect(self.__Stop_TCP_Server_pushButton__clicked) self.initialized_ui = False return True
def function[uninitialize_ui, parameter[self]]: constant[ Uninitializes the Component ui. :return: Method success. :rtype: bool ] call[name[LOGGER].debug, parameter[call[constant[> Uninitializing '{0}' Component ui.].format, parameter[name[self].__class__.__name__]]]] call[name[self].Port_spinBox.valueChanged.disconnect, parameter[name[self].__Port_spinBox__valueChanged]] call[name[self].Autostart_TCP_Server_checkBox.stateChanged.disconnect, parameter[name[self].__Autostart_TCP_Server_checkBox__stateChanged]] call[name[self].Start_TCP_Server_pushButton.clicked.disconnect, parameter[name[self].__Start_TCP_Server_pushButton__clicked]] call[name[self].Stop_TCP_Server_pushButton.clicked.disconnect, parameter[name[self].__Stop_TCP_Server_pushButton__clicked]] name[self].initialized_ui assign[=] constant[False] return[constant[True]]
keyword[def] identifier[uninitialize_ui] ( identifier[self] ): literal[string] identifier[LOGGER] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[__class__] . identifier[__name__] )) identifier[self] . identifier[Port_spinBox] . identifier[valueChanged] . identifier[disconnect] ( identifier[self] . identifier[__Port_spinBox__valueChanged] ) identifier[self] . identifier[Autostart_TCP_Server_checkBox] . identifier[stateChanged] . identifier[disconnect] ( identifier[self] . identifier[__Autostart_TCP_Server_checkBox__stateChanged] ) identifier[self] . identifier[Start_TCP_Server_pushButton] . identifier[clicked] . identifier[disconnect] ( identifier[self] . identifier[__Start_TCP_Server_pushButton__clicked] ) identifier[self] . identifier[Stop_TCP_Server_pushButton] . identifier[clicked] . identifier[disconnect] ( identifier[self] . identifier[__Stop_TCP_Server_pushButton__clicked] ) identifier[self] . identifier[initialized_ui] = keyword[False] keyword[return] keyword[True]
def uninitialize_ui(self): """ Uninitializes the Component ui. :return: Method success. :rtype: bool """ LOGGER.debug("> Uninitializing '{0}' Component ui.".format(self.__class__.__name__)) # Signals / Slots. self.Port_spinBox.valueChanged.disconnect(self.__Port_spinBox__valueChanged) self.Autostart_TCP_Server_checkBox.stateChanged.disconnect(self.__Autostart_TCP_Server_checkBox__stateChanged) self.Start_TCP_Server_pushButton.clicked.disconnect(self.__Start_TCP_Server_pushButton__clicked) self.Stop_TCP_Server_pushButton.clicked.disconnect(self.__Stop_TCP_Server_pushButton__clicked) self.initialized_ui = False return True
def shutdown(exiting_interpreter=False): """Disconnect the worker, and terminate processes started by ray.init(). This will automatically run at the end when a Python process that uses Ray exits. It is ok to run this twice in a row. The primary use case for this function is to cleanup state between tests. Note that this will clear any remote function definitions, actor definitions, and existing actors, so if you wish to use any previously defined remote functions or actors after calling ray.shutdown(), then you need to redefine them. If they were defined in an imported module, then you will need to reload the module. Args: exiting_interpreter (bool): True if this is called by the atexit hook and false otherwise. If we are exiting the interpreter, we will wait a little while to print any extra error messages. """ if exiting_interpreter and global_worker.mode == SCRIPT_MODE: # This is a duration to sleep before shutting down everything in order # to make sure that log messages finish printing. time.sleep(0.5) disconnect() # Disconnect global state from GCS. global_state.disconnect() # Shut down the Ray processes. global _global_node if _global_node is not None: _global_node.kill_all_processes(check_alive=False, allow_graceful=True) _global_node = None global_worker.set_mode(None)
def function[shutdown, parameter[exiting_interpreter]]: constant[Disconnect the worker, and terminate processes started by ray.init(). This will automatically run at the end when a Python process that uses Ray exits. It is ok to run this twice in a row. The primary use case for this function is to cleanup state between tests. Note that this will clear any remote function definitions, actor definitions, and existing actors, so if you wish to use any previously defined remote functions or actors after calling ray.shutdown(), then you need to redefine them. If they were defined in an imported module, then you will need to reload the module. Args: exiting_interpreter (bool): True if this is called by the atexit hook and false otherwise. If we are exiting the interpreter, we will wait a little while to print any extra error messages. ] if <ast.BoolOp object at 0x7da20c7c9330> begin[:] call[name[time].sleep, parameter[constant[0.5]]] call[name[disconnect], parameter[]] call[name[global_state].disconnect, parameter[]] <ast.Global object at 0x7da20c7ca680> if compare[name[_global_node] is_not constant[None]] begin[:] call[name[_global_node].kill_all_processes, parameter[]] variable[_global_node] assign[=] constant[None] call[name[global_worker].set_mode, parameter[constant[None]]]
keyword[def] identifier[shutdown] ( identifier[exiting_interpreter] = keyword[False] ): literal[string] keyword[if] identifier[exiting_interpreter] keyword[and] identifier[global_worker] . identifier[mode] == identifier[SCRIPT_MODE] : identifier[time] . identifier[sleep] ( literal[int] ) identifier[disconnect] () identifier[global_state] . identifier[disconnect] () keyword[global] identifier[_global_node] keyword[if] identifier[_global_node] keyword[is] keyword[not] keyword[None] : identifier[_global_node] . identifier[kill_all_processes] ( identifier[check_alive] = keyword[False] , identifier[allow_graceful] = keyword[True] ) identifier[_global_node] = keyword[None] identifier[global_worker] . identifier[set_mode] ( keyword[None] )
def shutdown(exiting_interpreter=False): """Disconnect the worker, and terminate processes started by ray.init(). This will automatically run at the end when a Python process that uses Ray exits. It is ok to run this twice in a row. The primary use case for this function is to cleanup state between tests. Note that this will clear any remote function definitions, actor definitions, and existing actors, so if you wish to use any previously defined remote functions or actors after calling ray.shutdown(), then you need to redefine them. If they were defined in an imported module, then you will need to reload the module. Args: exiting_interpreter (bool): True if this is called by the atexit hook and false otherwise. If we are exiting the interpreter, we will wait a little while to print any extra error messages. """ if exiting_interpreter and global_worker.mode == SCRIPT_MODE: # This is a duration to sleep before shutting down everything in order # to make sure that log messages finish printing. time.sleep(0.5) # depends on [control=['if'], data=[]] disconnect() # Disconnect global state from GCS. global_state.disconnect() # Shut down the Ray processes. global _global_node if _global_node is not None: _global_node.kill_all_processes(check_alive=False, allow_graceful=True) _global_node = None # depends on [control=['if'], data=['_global_node']] global_worker.set_mode(None)
def get_default_redirect_uri(self, client_id, request, *args, **kwargs): """Default redirect_uri for the given client.""" request.client = request.client or self._clientgetter(client_id) redirect_uri = request.client.default_redirect_uri log.debug('Found default redirect uri %r', redirect_uri) return redirect_uri
def function[get_default_redirect_uri, parameter[self, client_id, request]]: constant[Default redirect_uri for the given client.] name[request].client assign[=] <ast.BoolOp object at 0x7da1b0247640> variable[redirect_uri] assign[=] name[request].client.default_redirect_uri call[name[log].debug, parameter[constant[Found default redirect uri %r], name[redirect_uri]]] return[name[redirect_uri]]
keyword[def] identifier[get_default_redirect_uri] ( identifier[self] , identifier[client_id] , identifier[request] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[request] . identifier[client] = identifier[request] . identifier[client] keyword[or] identifier[self] . identifier[_clientgetter] ( identifier[client_id] ) identifier[redirect_uri] = identifier[request] . identifier[client] . identifier[default_redirect_uri] identifier[log] . identifier[debug] ( literal[string] , identifier[redirect_uri] ) keyword[return] identifier[redirect_uri]
def get_default_redirect_uri(self, client_id, request, *args, **kwargs): """Default redirect_uri for the given client.""" request.client = request.client or self._clientgetter(client_id) redirect_uri = request.client.default_redirect_uri log.debug('Found default redirect uri %r', redirect_uri) return redirect_uri