code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def get_certificate(self, cert_id, **kwargs): # noqa: E501 """Get trusted certificate by ID. # noqa: E501 An endpoint for retrieving a trusted certificate by ID. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/trusted-certificates/{cert-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_certificate(cert_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str cert_id: The ID of the trusted certificate to be retrieved. (required) :return: TrustedCertificateResp If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.get_certificate_with_http_info(cert_id, **kwargs) # noqa: E501 else: (data) = self.get_certificate_with_http_info(cert_id, **kwargs) # noqa: E501 return data
def function[get_certificate, parameter[self, cert_id]]: constant[Get trusted certificate by ID. # noqa: E501 An endpoint for retrieving a trusted certificate by ID. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/trusted-certificates/{cert-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_certificate(cert_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str cert_id: The ID of the trusted certificate to be retrieved. (required) :return: TrustedCertificateResp If the method is called asynchronously, returns the request thread. ] call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True] if call[name[kwargs].get, parameter[constant[asynchronous]]] begin[:] return[call[name[self].get_certificate_with_http_info, parameter[name[cert_id]]]]
keyword[def] identifier[get_certificate] ( identifier[self] , identifier[cert_id] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]= keyword[True] keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ): keyword[return] identifier[self] . identifier[get_certificate_with_http_info] ( identifier[cert_id] ,** identifier[kwargs] ) keyword[else] : ( identifier[data] )= identifier[self] . identifier[get_certificate_with_http_info] ( identifier[cert_id] ,** identifier[kwargs] ) keyword[return] identifier[data]
def get_certificate(self, cert_id, **kwargs): # noqa: E501 "Get trusted certificate by ID. # noqa: E501\n\n An endpoint for retrieving a trusted certificate by ID. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/trusted-certificates/{cert-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass asynchronous=True\n >>> thread = api.get_certificate(cert_id, asynchronous=True)\n >>> result = thread.get()\n\n :param asynchronous bool\n :param str cert_id: The ID of the trusted certificate to be retrieved. (required)\n :return: TrustedCertificateResp\n If the method is called asynchronously,\n returns the request thread.\n " kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.get_certificate_with_http_info(cert_id, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]] else: data = self.get_certificate_with_http_info(cert_id, **kwargs) # noqa: E501 return data
def disable_logging(lines, min_level_value, max_level_value): """Disables logging statements in these lines whose logging level falls between the specified minimum and maximum levels.""" output = '' while lines: line = lines[0] ret = RE_LOGGING_START.match(line) if not ret: # no logging statement here, so just leave the line as-is and keep going output += line lines = lines[1:] else: # a logging call has started: find all the lines it includes and those it does not logging_lines, remaining_lines = split_call(lines) lines = remaining_lines logging_stmt = ''.join(logging_lines) # replace the logging statement if its level falls b/w min and max if not check_level(logging_stmt, False, min_level_value, max_level_value): output += logging_stmt else: # comment out this logging statement and replace it with pass prefix_ws = ret.group(1) pass_stmt = prefix_ws + PASS_LINE_CONTENTS commented_out_logging_lines = comment_lines(logging_lines) new_lines = pass_stmt + commented_out_logging_lines logging.info('replacing:\n%s\nwith this:\n%s' % (logging_stmt.rstrip(), new_lines.rstrip())) output += new_lines return output
def function[disable_logging, parameter[lines, min_level_value, max_level_value]]: constant[Disables logging statements in these lines whose logging level falls between the specified minimum and maximum levels.] variable[output] assign[=] constant[] while name[lines] begin[:] variable[line] assign[=] call[name[lines]][constant[0]] variable[ret] assign[=] call[name[RE_LOGGING_START].match, parameter[name[line]]] if <ast.UnaryOp object at 0x7da1b1df9f90> begin[:] <ast.AugAssign object at 0x7da1b1df9f00> variable[lines] assign[=] call[name[lines]][<ast.Slice object at 0x7da1b1dfa740>] return[name[output]]
keyword[def] identifier[disable_logging] ( identifier[lines] , identifier[min_level_value] , identifier[max_level_value] ): literal[string] identifier[output] = literal[string] keyword[while] identifier[lines] : identifier[line] = identifier[lines] [ literal[int] ] identifier[ret] = identifier[RE_LOGGING_START] . identifier[match] ( identifier[line] ) keyword[if] keyword[not] identifier[ret] : identifier[output] += identifier[line] identifier[lines] = identifier[lines] [ literal[int] :] keyword[else] : identifier[logging_lines] , identifier[remaining_lines] = identifier[split_call] ( identifier[lines] ) identifier[lines] = identifier[remaining_lines] identifier[logging_stmt] = literal[string] . identifier[join] ( identifier[logging_lines] ) keyword[if] keyword[not] identifier[check_level] ( identifier[logging_stmt] , keyword[False] , identifier[min_level_value] , identifier[max_level_value] ): identifier[output] += identifier[logging_stmt] keyword[else] : identifier[prefix_ws] = identifier[ret] . identifier[group] ( literal[int] ) identifier[pass_stmt] = identifier[prefix_ws] + identifier[PASS_LINE_CONTENTS] identifier[commented_out_logging_lines] = identifier[comment_lines] ( identifier[logging_lines] ) identifier[new_lines] = identifier[pass_stmt] + identifier[commented_out_logging_lines] identifier[logging] . identifier[info] ( literal[string] %( identifier[logging_stmt] . identifier[rstrip] (), identifier[new_lines] . identifier[rstrip] ())) identifier[output] += identifier[new_lines] keyword[return] identifier[output]
def disable_logging(lines, min_level_value, max_level_value): """Disables logging statements in these lines whose logging level falls between the specified minimum and maximum levels.""" output = '' while lines: line = lines[0] ret = RE_LOGGING_START.match(line) if not ret: # no logging statement here, so just leave the line as-is and keep going output += line lines = lines[1:] # depends on [control=['if'], data=[]] else: # a logging call has started: find all the lines it includes and those it does not (logging_lines, remaining_lines) = split_call(lines) lines = remaining_lines logging_stmt = ''.join(logging_lines) # replace the logging statement if its level falls b/w min and max if not check_level(logging_stmt, False, min_level_value, max_level_value): output += logging_stmt # depends on [control=['if'], data=[]] else: # comment out this logging statement and replace it with pass prefix_ws = ret.group(1) pass_stmt = prefix_ws + PASS_LINE_CONTENTS commented_out_logging_lines = comment_lines(logging_lines) new_lines = pass_stmt + commented_out_logging_lines logging.info('replacing:\n%s\nwith this:\n%s' % (logging_stmt.rstrip(), new_lines.rstrip())) output += new_lines # depends on [control=['while'], data=[]] return output
def distributeParams(self,param_name,param_count,center,spread,dist_type): ''' Distributes heterogeneous values of one parameter to the AgentTypes in self.agents. Parameters ---------- param_name : string Name of the parameter to be assigned. param_count : int Number of different values the parameter will take on. center : float A measure of centrality for the distribution of the parameter. spread : float A measure of spread or diffusion for the distribution of the parameter. dist_type : string The type of distribution to be used. Can be "lognormal" or "uniform" (can expand). Returns ------- None ''' # Get a list of discrete values for the parameter if dist_type == 'uniform': # If uniform, center is middle of distribution, spread is distance to either edge param_dist = approxUniform(N=param_count,bot=center-spread,top=center+spread) elif dist_type == 'lognormal': # If lognormal, center is the mean and spread is the standard deviation (in log) tail_N = 3 param_dist = approxLognormal(N=param_count-tail_N,mu=np.log(center)-0.5*spread**2,sigma=spread,tail_N=tail_N,tail_bound=[0.0,0.9], tail_order=np.e) # Distribute the parameters to the various types, assigning consecutive types the same # value if there are more types than values replication_factor = len(self.agents) // param_count # Note: the double division is intenger division in Python 3 and 2.7, this makes it explicit j = 0 b = 0 while j < len(self.agents): for n in range(replication_factor): self.agents[j](AgentCount = int(self.Population*param_dist[0][b]*self.TypeWeight[n])) exec('self.agents[j](' + param_name + '= param_dist[1][b])') j += 1 b += 1
def function[distributeParams, parameter[self, param_name, param_count, center, spread, dist_type]]: constant[ Distributes heterogeneous values of one parameter to the AgentTypes in self.agents. Parameters ---------- param_name : string Name of the parameter to be assigned. param_count : int Number of different values the parameter will take on. center : float A measure of centrality for the distribution of the parameter. spread : float A measure of spread or diffusion for the distribution of the parameter. dist_type : string The type of distribution to be used. Can be "lognormal" or "uniform" (can expand). Returns ------- None ] if compare[name[dist_type] equal[==] constant[uniform]] begin[:] variable[param_dist] assign[=] call[name[approxUniform], parameter[]] variable[replication_factor] assign[=] binary_operation[call[name[len], parameter[name[self].agents]] <ast.FloorDiv object at 0x7da2590d6bc0> name[param_count]] variable[j] assign[=] constant[0] variable[b] assign[=] constant[0] while compare[name[j] less[<] call[name[len], parameter[name[self].agents]]] begin[:] for taget[name[n]] in starred[call[name[range], parameter[name[replication_factor]]]] begin[:] call[call[name[self].agents][name[j]], parameter[]] call[name[exec], parameter[binary_operation[binary_operation[constant[self.agents[j](] + name[param_name]] + constant[= param_dist[1][b])]]]] <ast.AugAssign object at 0x7da2043464d0> <ast.AugAssign object at 0x7da204346470>
keyword[def] identifier[distributeParams] ( identifier[self] , identifier[param_name] , identifier[param_count] , identifier[center] , identifier[spread] , identifier[dist_type] ): literal[string] keyword[if] identifier[dist_type] == literal[string] : identifier[param_dist] = identifier[approxUniform] ( identifier[N] = identifier[param_count] , identifier[bot] = identifier[center] - identifier[spread] , identifier[top] = identifier[center] + identifier[spread] ) keyword[elif] identifier[dist_type] == literal[string] : identifier[tail_N] = literal[int] identifier[param_dist] = identifier[approxLognormal] ( identifier[N] = identifier[param_count] - identifier[tail_N] , identifier[mu] = identifier[np] . identifier[log] ( identifier[center] )- literal[int] * identifier[spread] ** literal[int] , identifier[sigma] = identifier[spread] , identifier[tail_N] = identifier[tail_N] , identifier[tail_bound] =[ literal[int] , literal[int] ], identifier[tail_order] = identifier[np] . identifier[e] ) identifier[replication_factor] = identifier[len] ( identifier[self] . identifier[agents] )// identifier[param_count] identifier[j] = literal[int] identifier[b] = literal[int] keyword[while] identifier[j] < identifier[len] ( identifier[self] . identifier[agents] ): keyword[for] identifier[n] keyword[in] identifier[range] ( identifier[replication_factor] ): identifier[self] . identifier[agents] [ identifier[j] ]( identifier[AgentCount] = identifier[int] ( identifier[self] . identifier[Population] * identifier[param_dist] [ literal[int] ][ identifier[b] ]* identifier[self] . identifier[TypeWeight] [ identifier[n] ])) identifier[exec] ( literal[string] + identifier[param_name] + literal[string] ) identifier[j] += literal[int] identifier[b] += literal[int]
def distributeParams(self, param_name, param_count, center, spread, dist_type): """ Distributes heterogeneous values of one parameter to the AgentTypes in self.agents. Parameters ---------- param_name : string Name of the parameter to be assigned. param_count : int Number of different values the parameter will take on. center : float A measure of centrality for the distribution of the parameter. spread : float A measure of spread or diffusion for the distribution of the parameter. dist_type : string The type of distribution to be used. Can be "lognormal" or "uniform" (can expand). Returns ------- None """ # Get a list of discrete values for the parameter if dist_type == 'uniform': # If uniform, center is middle of distribution, spread is distance to either edge param_dist = approxUniform(N=param_count, bot=center - spread, top=center + spread) # depends on [control=['if'], data=[]] elif dist_type == 'lognormal': # If lognormal, center is the mean and spread is the standard deviation (in log) tail_N = 3 param_dist = approxLognormal(N=param_count - tail_N, mu=np.log(center) - 0.5 * spread ** 2, sigma=spread, tail_N=tail_N, tail_bound=[0.0, 0.9], tail_order=np.e) # depends on [control=['if'], data=[]] # Distribute the parameters to the various types, assigning consecutive types the same # value if there are more types than values replication_factor = len(self.agents) // param_count # Note: the double division is intenger division in Python 3 and 2.7, this makes it explicit j = 0 b = 0 while j < len(self.agents): for n in range(replication_factor): self.agents[j](AgentCount=int(self.Population * param_dist[0][b] * self.TypeWeight[n])) exec('self.agents[j](' + param_name + '= param_dist[1][b])') j += 1 # depends on [control=['for'], data=['n']] b += 1 # depends on [control=['while'], data=['j']]
def LEB128toint(LEBinput): ''' Convert unsigned LEB128 hex to integer ''' reversedbytes = hexreverse(LEBinput) binstr = "" for i in range(len(LEBinput) // 2): if i == 0: assert int(reversedbytes[2*i:(2*i + 2)],16) < 128 else: assert int(reversedbytes[2*i:(2*i + 2)],16) >= 128 tempbin = str(bin(int(reversedbytes[2*i:(2*i + 2)],16))) \ .lstrip("0b").replace("b","").replace("L","") \ .replace("'","").replace('"',"") \ .zfill(8) binstr += tempbin[1:] return int(binstr,2)
def function[LEB128toint, parameter[LEBinput]]: constant[ Convert unsigned LEB128 hex to integer ] variable[reversedbytes] assign[=] call[name[hexreverse], parameter[name[LEBinput]]] variable[binstr] assign[=] constant[] for taget[name[i]] in starred[call[name[range], parameter[binary_operation[call[name[len], parameter[name[LEBinput]]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]]]] begin[:] if compare[name[i] equal[==] constant[0]] begin[:] assert[compare[call[name[int], parameter[call[name[reversedbytes]][<ast.Slice object at 0x7da18eb54520>], constant[16]]] less[<] constant[128]]] variable[tempbin] assign[=] call[call[call[call[call[call[call[name[str], parameter[call[name[bin], parameter[call[name[int], parameter[call[name[reversedbytes]][<ast.Slice object at 0x7da1b26ad780>], constant[16]]]]]]].lstrip, parameter[constant[0b]]].replace, parameter[constant[b], constant[]]].replace, parameter[constant[L], constant[]]].replace, parameter[constant['], constant[]]].replace, parameter[constant["], constant[]]].zfill, parameter[constant[8]]] <ast.AugAssign object at 0x7da1b26aff70> return[call[name[int], parameter[name[binstr], constant[2]]]]
keyword[def] identifier[LEB128toint] ( identifier[LEBinput] ): literal[string] identifier[reversedbytes] = identifier[hexreverse] ( identifier[LEBinput] ) identifier[binstr] = literal[string] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[LEBinput] )// literal[int] ): keyword[if] identifier[i] == literal[int] : keyword[assert] identifier[int] ( identifier[reversedbytes] [ literal[int] * identifier[i] :( literal[int] * identifier[i] + literal[int] )], literal[int] )< literal[int] keyword[else] : keyword[assert] identifier[int] ( identifier[reversedbytes] [ literal[int] * identifier[i] :( literal[int] * identifier[i] + literal[int] )], literal[int] )>= literal[int] identifier[tempbin] = identifier[str] ( identifier[bin] ( identifier[int] ( identifier[reversedbytes] [ literal[int] * identifier[i] :( literal[int] * identifier[i] + literal[int] )], literal[int] ))). identifier[lstrip] ( literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[zfill] ( literal[int] ) identifier[binstr] += identifier[tempbin] [ literal[int] :] keyword[return] identifier[int] ( identifier[binstr] , literal[int] )
def LEB128toint(LEBinput): """ Convert unsigned LEB128 hex to integer """ reversedbytes = hexreverse(LEBinput) binstr = '' for i in range(len(LEBinput) // 2): if i == 0: assert int(reversedbytes[2 * i:2 * i + 2], 16) < 128 # depends on [control=['if'], data=['i']] else: assert int(reversedbytes[2 * i:2 * i + 2], 16) >= 128 tempbin = str(bin(int(reversedbytes[2 * i:2 * i + 2], 16))).lstrip('0b').replace('b', '').replace('L', '').replace("'", '').replace('"', '').zfill(8) binstr += tempbin[1:] # depends on [control=['for'], data=['i']] return int(binstr, 2)
def fit_from_cfg(cls, choosers, chosen_fname, alternatives, cfgname, outcfgname=None): """ Parameters ---------- choosers : DataFrame A dataframe of rows of agents that have made choices. chosen_fname : string A string indicating the column in the choosers dataframe which gives which alternative the choosers have chosen. alternatives : DataFrame A dataframe of alternatives. It should include the current choices from the choosers dataframe as well as some other alternatives from which to sample. Values in choosers[chosen_fname] should index into the alternatives dataframe. cfgname : string The name of the yaml config file from which to read the discrete choice model. outcfgname : string, optional (default cfgname) The name of the output yaml config file where estimation results are written into. Returns ------- lcm : SegmentedMNLDiscreteChoiceModel which was used to fit """ logger.debug('start: fit from configuration {}'.format(cfgname)) lcm = cls.from_yaml(str_or_buffer=cfgname) lcm.fit(choosers, alternatives, choosers[chosen_fname]) for k, v in lcm._group.models.items(): print("LCM RESULTS FOR SEGMENT %s\n" % str(k)) v.report_fit() outcfgname = outcfgname or cfgname lcm.to_yaml(str_or_buffer=outcfgname) logger.debug('finish: fit into configuration {}'.format(outcfgname)) return lcm
def function[fit_from_cfg, parameter[cls, choosers, chosen_fname, alternatives, cfgname, outcfgname]]: constant[ Parameters ---------- choosers : DataFrame A dataframe of rows of agents that have made choices. chosen_fname : string A string indicating the column in the choosers dataframe which gives which alternative the choosers have chosen. alternatives : DataFrame A dataframe of alternatives. It should include the current choices from the choosers dataframe as well as some other alternatives from which to sample. Values in choosers[chosen_fname] should index into the alternatives dataframe. cfgname : string The name of the yaml config file from which to read the discrete choice model. outcfgname : string, optional (default cfgname) The name of the output yaml config file where estimation results are written into. Returns ------- lcm : SegmentedMNLDiscreteChoiceModel which was used to fit ] call[name[logger].debug, parameter[call[constant[start: fit from configuration {}].format, parameter[name[cfgname]]]]] variable[lcm] assign[=] call[name[cls].from_yaml, parameter[]] call[name[lcm].fit, parameter[name[choosers], name[alternatives], call[name[choosers]][name[chosen_fname]]]] for taget[tuple[[<ast.Name object at 0x7da1b2344c10>, <ast.Name object at 0x7da1b23474c0>]]] in starred[call[name[lcm]._group.models.items, parameter[]]] begin[:] call[name[print], parameter[binary_operation[constant[LCM RESULTS FOR SEGMENT %s ] <ast.Mod object at 0x7da2590d6920> call[name[str], parameter[name[k]]]]]] call[name[v].report_fit, parameter[]] variable[outcfgname] assign[=] <ast.BoolOp object at 0x7da1b2346ec0> call[name[lcm].to_yaml, parameter[]] call[name[logger].debug, parameter[call[constant[finish: fit into configuration {}].format, parameter[name[outcfgname]]]]] return[name[lcm]]
keyword[def] identifier[fit_from_cfg] ( identifier[cls] , identifier[choosers] , identifier[chosen_fname] , identifier[alternatives] , identifier[cfgname] , identifier[outcfgname] = keyword[None] ): literal[string] identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[cfgname] )) identifier[lcm] = identifier[cls] . identifier[from_yaml] ( identifier[str_or_buffer] = identifier[cfgname] ) identifier[lcm] . identifier[fit] ( identifier[choosers] , identifier[alternatives] , identifier[choosers] [ identifier[chosen_fname] ]) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[lcm] . identifier[_group] . identifier[models] . identifier[items] (): identifier[print] ( literal[string] % identifier[str] ( identifier[k] )) identifier[v] . identifier[report_fit] () identifier[outcfgname] = identifier[outcfgname] keyword[or] identifier[cfgname] identifier[lcm] . identifier[to_yaml] ( identifier[str_or_buffer] = identifier[outcfgname] ) identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[outcfgname] )) keyword[return] identifier[lcm]
def fit_from_cfg(cls, choosers, chosen_fname, alternatives, cfgname, outcfgname=None): """ Parameters ---------- choosers : DataFrame A dataframe of rows of agents that have made choices. chosen_fname : string A string indicating the column in the choosers dataframe which gives which alternative the choosers have chosen. alternatives : DataFrame A dataframe of alternatives. It should include the current choices from the choosers dataframe as well as some other alternatives from which to sample. Values in choosers[chosen_fname] should index into the alternatives dataframe. cfgname : string The name of the yaml config file from which to read the discrete choice model. outcfgname : string, optional (default cfgname) The name of the output yaml config file where estimation results are written into. Returns ------- lcm : SegmentedMNLDiscreteChoiceModel which was used to fit """ logger.debug('start: fit from configuration {}'.format(cfgname)) lcm = cls.from_yaml(str_or_buffer=cfgname) lcm.fit(choosers, alternatives, choosers[chosen_fname]) for (k, v) in lcm._group.models.items(): print('LCM RESULTS FOR SEGMENT %s\n' % str(k)) v.report_fit() # depends on [control=['for'], data=[]] outcfgname = outcfgname or cfgname lcm.to_yaml(str_or_buffer=outcfgname) logger.debug('finish: fit into configuration {}'.format(outcfgname)) return lcm
def plot_predict(self,h=5,past_values=20,intervals=True,**kwargs): """ Makes forecast with the estimated model Parameters ---------- h : int (default : 5) How many steps ahead would you like to forecast? past_values : int (default : 20) How many past observations to show on the forecast graph? intervals : Boolean Would you like to show 95% prediction intervals for the forecast? Returns ---------- - Plot of the forecast """ import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get('figsize',(10,7)) if self.latent_variables.estimated is False: raise Exception("No latent variables estimated!") else: # Retrieve data, dates and (transformed) latent variables scale, shape, skewness = self._get_scale_and_shape(self.latent_variables.get_z_values(transformed=True)) previous_value = self.data[-1] forecasted_values = np.ones(h)*self.states[-1] date_index = self.shift_dates(h) simulations = 10000 sim_vector = np.zeros([simulations,h]) t_params = self.transform_z() for n in range(0,simulations): rnd_q = np.random.normal(0,np.sqrt(self.latent_variables.get_z_values(transformed=True)[0]),h) exp = forecasted_values.copy() for t in range(0,h): if t == 0: exp[t] = forecasted_values[t] + rnd_q[t] else: exp[t] = exp[t-1] + rnd_q[t] sim_vector[n] = self.family.draw_variable(loc=self.link(exp),shape=shape,scale=scale,skewness=skewness,nsims=exp.shape[0]) sim_vector = np.transpose(sim_vector) forecasted_values = self.link(forecasted_values) if self.model_name2 == 'Skewt': forecasted_values = forecasted_values + ((t_params[-3] - (1.0/t_params[-3]))*t_params[-2]*gas.SkewtScore.tv_variate_exp(t_params[-1])) plt.figure(figsize=figsize) if intervals == True: plt.fill_between(date_index[-h-1:], np.insert([np.percentile(i,5) for i in sim_vector],0,previous_value), np.insert([np.percentile(i,95) for i in sim_vector],0,previous_value), alpha=0.2,label="95 C.I.") plot_values = np.append(self.data[-past_values:],forecasted_values) plot_index = date_index[-h-past_values:] plt.plot(plot_index,plot_values,label=self.data_name) plt.title("Forecast for " + self.data_name) plt.xlabel("Time") plt.ylabel(self.data_name) plt.show()
def function[plot_predict, parameter[self, h, past_values, intervals]]: constant[ Makes forecast with the estimated model Parameters ---------- h : int (default : 5) How many steps ahead would you like to forecast? past_values : int (default : 20) How many past observations to show on the forecast graph? intervals : Boolean Would you like to show 95% prediction intervals for the forecast? Returns ---------- - Plot of the forecast ] import module[matplotlib.pyplot] as alias[plt] import module[seaborn] as alias[sns] variable[figsize] assign[=] call[name[kwargs].get, parameter[constant[figsize], tuple[[<ast.Constant object at 0x7da18dc06680>, <ast.Constant object at 0x7da18dc074c0>]]]] if compare[name[self].latent_variables.estimated is constant[False]] begin[:] <ast.Raise object at 0x7da1b18af490>
keyword[def] identifier[plot_predict] ( identifier[self] , identifier[h] = literal[int] , identifier[past_values] = literal[int] , identifier[intervals] = keyword[True] ,** identifier[kwargs] ): literal[string] keyword[import] identifier[matplotlib] . identifier[pyplot] keyword[as] identifier[plt] keyword[import] identifier[seaborn] keyword[as] identifier[sns] identifier[figsize] = identifier[kwargs] . identifier[get] ( literal[string] ,( literal[int] , literal[int] )) keyword[if] identifier[self] . identifier[latent_variables] . identifier[estimated] keyword[is] keyword[False] : keyword[raise] identifier[Exception] ( literal[string] ) keyword[else] : identifier[scale] , identifier[shape] , identifier[skewness] = identifier[self] . identifier[_get_scale_and_shape] ( identifier[self] . identifier[latent_variables] . identifier[get_z_values] ( identifier[transformed] = keyword[True] )) identifier[previous_value] = identifier[self] . identifier[data] [- literal[int] ] identifier[forecasted_values] = identifier[np] . identifier[ones] ( identifier[h] )* identifier[self] . identifier[states] [- literal[int] ] identifier[date_index] = identifier[self] . identifier[shift_dates] ( identifier[h] ) identifier[simulations] = literal[int] identifier[sim_vector] = identifier[np] . identifier[zeros] ([ identifier[simulations] , identifier[h] ]) identifier[t_params] = identifier[self] . identifier[transform_z] () keyword[for] identifier[n] keyword[in] identifier[range] ( literal[int] , identifier[simulations] ): identifier[rnd_q] = identifier[np] . identifier[random] . identifier[normal] ( literal[int] , identifier[np] . identifier[sqrt] ( identifier[self] . identifier[latent_variables] . identifier[get_z_values] ( identifier[transformed] = keyword[True] )[ literal[int] ]), identifier[h] ) identifier[exp] = identifier[forecasted_values] . identifier[copy] () keyword[for] identifier[t] keyword[in] identifier[range] ( literal[int] , identifier[h] ): keyword[if] identifier[t] == literal[int] : identifier[exp] [ identifier[t] ]= identifier[forecasted_values] [ identifier[t] ]+ identifier[rnd_q] [ identifier[t] ] keyword[else] : identifier[exp] [ identifier[t] ]= identifier[exp] [ identifier[t] - literal[int] ]+ identifier[rnd_q] [ identifier[t] ] identifier[sim_vector] [ identifier[n] ]= identifier[self] . identifier[family] . identifier[draw_variable] ( identifier[loc] = identifier[self] . identifier[link] ( identifier[exp] ), identifier[shape] = identifier[shape] , identifier[scale] = identifier[scale] , identifier[skewness] = identifier[skewness] , identifier[nsims] = identifier[exp] . identifier[shape] [ literal[int] ]) identifier[sim_vector] = identifier[np] . identifier[transpose] ( identifier[sim_vector] ) identifier[forecasted_values] = identifier[self] . identifier[link] ( identifier[forecasted_values] ) keyword[if] identifier[self] . identifier[model_name2] == literal[string] : identifier[forecasted_values] = identifier[forecasted_values] +(( identifier[t_params] [- literal[int] ]-( literal[int] / identifier[t_params] [- literal[int] ]))* identifier[t_params] [- literal[int] ]* identifier[gas] . identifier[SkewtScore] . identifier[tv_variate_exp] ( identifier[t_params] [- literal[int] ])) identifier[plt] . identifier[figure] ( identifier[figsize] = identifier[figsize] ) keyword[if] identifier[intervals] == keyword[True] : identifier[plt] . identifier[fill_between] ( identifier[date_index] [- identifier[h] - literal[int] :], identifier[np] . identifier[insert] ([ identifier[np] . identifier[percentile] ( identifier[i] , literal[int] ) keyword[for] identifier[i] keyword[in] identifier[sim_vector] ], literal[int] , identifier[previous_value] ), identifier[np] . identifier[insert] ([ identifier[np] . identifier[percentile] ( identifier[i] , literal[int] ) keyword[for] identifier[i] keyword[in] identifier[sim_vector] ], literal[int] , identifier[previous_value] ), identifier[alpha] = literal[int] , identifier[label] = literal[string] ) identifier[plot_values] = identifier[np] . identifier[append] ( identifier[self] . identifier[data] [- identifier[past_values] :], identifier[forecasted_values] ) identifier[plot_index] = identifier[date_index] [- identifier[h] - identifier[past_values] :] identifier[plt] . identifier[plot] ( identifier[plot_index] , identifier[plot_values] , identifier[label] = identifier[self] . identifier[data_name] ) identifier[plt] . identifier[title] ( literal[string] + identifier[self] . identifier[data_name] ) identifier[plt] . identifier[xlabel] ( literal[string] ) identifier[plt] . identifier[ylabel] ( identifier[self] . identifier[data_name] ) identifier[plt] . identifier[show] ()
def plot_predict(self, h=5, past_values=20, intervals=True, **kwargs): """ Makes forecast with the estimated model Parameters ---------- h : int (default : 5) How many steps ahead would you like to forecast? past_values : int (default : 20) How many past observations to show on the forecast graph? intervals : Boolean Would you like to show 95% prediction intervals for the forecast? Returns ---------- - Plot of the forecast """ import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get('figsize', (10, 7)) if self.latent_variables.estimated is False: raise Exception('No latent variables estimated!') # depends on [control=['if'], data=[]] else: # Retrieve data, dates and (transformed) latent variables (scale, shape, skewness) = self._get_scale_and_shape(self.latent_variables.get_z_values(transformed=True)) previous_value = self.data[-1] forecasted_values = np.ones(h) * self.states[-1] date_index = self.shift_dates(h) simulations = 10000 sim_vector = np.zeros([simulations, h]) t_params = self.transform_z() for n in range(0, simulations): rnd_q = np.random.normal(0, np.sqrt(self.latent_variables.get_z_values(transformed=True)[0]), h) exp = forecasted_values.copy() for t in range(0, h): if t == 0: exp[t] = forecasted_values[t] + rnd_q[t] # depends on [control=['if'], data=['t']] else: exp[t] = exp[t - 1] + rnd_q[t] # depends on [control=['for'], data=['t']] sim_vector[n] = self.family.draw_variable(loc=self.link(exp), shape=shape, scale=scale, skewness=skewness, nsims=exp.shape[0]) # depends on [control=['for'], data=['n']] sim_vector = np.transpose(sim_vector) forecasted_values = self.link(forecasted_values) if self.model_name2 == 'Skewt': forecasted_values = forecasted_values + (t_params[-3] - 1.0 / t_params[-3]) * t_params[-2] * gas.SkewtScore.tv_variate_exp(t_params[-1]) # depends on [control=['if'], data=[]] plt.figure(figsize=figsize) if intervals == True: plt.fill_between(date_index[-h - 1:], np.insert([np.percentile(i, 5) for i in sim_vector], 0, previous_value), np.insert([np.percentile(i, 95) for i in sim_vector], 0, previous_value), alpha=0.2, label='95 C.I.') # depends on [control=['if'], data=[]] plot_values = np.append(self.data[-past_values:], forecasted_values) plot_index = date_index[-h - past_values:] plt.plot(plot_index, plot_values, label=self.data_name) plt.title('Forecast for ' + self.data_name) plt.xlabel('Time') plt.ylabel(self.data_name) plt.show()
def replace_namespaced_controller_revision(self, name, namespace, body, **kwargs): # noqa: E501 """replace_namespaced_controller_revision # noqa: E501 replace the specified ControllerRevision # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_controller_revision(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ControllerRevision (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1ControllerRevision body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1ControllerRevision If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs) # noqa: E501 else: (data) = self.replace_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs) # noqa: E501 return data
def function[replace_namespaced_controller_revision, parameter[self, name, namespace, body]]: constant[replace_namespaced_controller_revision # noqa: E501 replace the specified ControllerRevision # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_controller_revision(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ControllerRevision (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1ControllerRevision body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1ControllerRevision If the method is called asynchronously, returns the request thread. ] call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True] if call[name[kwargs].get, parameter[constant[async_req]]] begin[:] return[call[name[self].replace_namespaced_controller_revision_with_http_info, parameter[name[name], name[namespace], name[body]]]]
keyword[def] identifier[replace_namespaced_controller_revision] ( identifier[self] , identifier[name] , identifier[namespace] , identifier[body] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]= keyword[True] keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ): keyword[return] identifier[self] . identifier[replace_namespaced_controller_revision_with_http_info] ( identifier[name] , identifier[namespace] , identifier[body] ,** identifier[kwargs] ) keyword[else] : ( identifier[data] )= identifier[self] . identifier[replace_namespaced_controller_revision_with_http_info] ( identifier[name] , identifier[namespace] , identifier[body] ,** identifier[kwargs] ) keyword[return] identifier[data]
def replace_namespaced_controller_revision(self, name, namespace, body, **kwargs): # noqa: E501 "replace_namespaced_controller_revision # noqa: E501\n\n replace the specified ControllerRevision # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.replace_namespaced_controller_revision(name, namespace, body, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str name: name of the ControllerRevision (required)\n :param str namespace: object name and auth scope, such as for teams and projects (required)\n :param V1ControllerRevision body: (required)\n :param str pretty: If 'true', then the output is pretty printed.\n :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed\n :return: V1ControllerRevision\n If the method is called asynchronously,\n returns the request thread.\n " kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]] else: data = self.replace_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs) # noqa: E501 return data
def _get_address_family(table, instance): """ Function to derive address family from a junos table name. :params table: The name of the routing table :returns: address family """ address_family_mapping = {"inet": "ipv4", "inet6": "ipv6", "inetflow": "flow"} if instance == "master": family = table.rsplit(".", 1)[-2] else: family = table.split(".")[-2] try: address_family = address_family_mapping[family] except KeyError: address_family = None return address_family
def function[_get_address_family, parameter[table, instance]]: constant[ Function to derive address family from a junos table name. :params table: The name of the routing table :returns: address family ] variable[address_family_mapping] assign[=] dictionary[[<ast.Constant object at 0x7da1b1d90100>, <ast.Constant object at 0x7da1b1d900d0>, <ast.Constant object at 0x7da1b1d93cd0>], [<ast.Constant object at 0x7da1b1d93d00>, <ast.Constant object at 0x7da1b1d93c10>, <ast.Constant object at 0x7da1b1d93ca0>]] if compare[name[instance] equal[==] constant[master]] begin[:] variable[family] assign[=] call[call[name[table].rsplit, parameter[constant[.], constant[1]]]][<ast.UnaryOp object at 0x7da1b1d93820>] <ast.Try object at 0x7da1b1d93760> return[name[address_family]]
keyword[def] identifier[_get_address_family] ( identifier[table] , identifier[instance] ): literal[string] identifier[address_family_mapping] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } keyword[if] identifier[instance] == literal[string] : identifier[family] = identifier[table] . identifier[rsplit] ( literal[string] , literal[int] )[- literal[int] ] keyword[else] : identifier[family] = identifier[table] . identifier[split] ( literal[string] )[- literal[int] ] keyword[try] : identifier[address_family] = identifier[address_family_mapping] [ identifier[family] ] keyword[except] identifier[KeyError] : identifier[address_family] = keyword[None] keyword[return] identifier[address_family]
def _get_address_family(table, instance): """ Function to derive address family from a junos table name. :params table: The name of the routing table :returns: address family """ address_family_mapping = {'inet': 'ipv4', 'inet6': 'ipv6', 'inetflow': 'flow'} if instance == 'master': family = table.rsplit('.', 1)[-2] # depends on [control=['if'], data=[]] else: family = table.split('.')[-2] try: address_family = address_family_mapping[family] # depends on [control=['try'], data=[]] except KeyError: address_family = None # depends on [control=['except'], data=[]] return address_family
def wait_for_master_death(self): """Wait for a master timeout and take the lead if necessary :return: None """ logger.info("Waiting for master death") timeout = 1.0 self.last_master_ping = time.time() master_timeout = 300 for arbiter_link in self.conf.arbiters: if not arbiter_link.spare: master_timeout = \ arbiter_link.spare_check_interval * arbiter_link.spare_max_check_attempts logger.info("I'll wait master death for %d seconds", master_timeout) while not self.interrupted: # Make a pause and check if the system time changed _, tcdiff = self.make_a_pause(timeout) # If there was a system time change then we have to adapt last_master_ping: if tcdiff: self.last_master_ping += tcdiff if self.new_conf: self.setup_new_conf() sys.stdout.write(".") sys.stdout.flush() # Now check if master is dead or not now = time.time() if now - self.last_master_ping > master_timeout: logger.info("Arbiter Master is dead. The arbiter %s takes the lead!", self.link_to_myself.name) for arbiter_link in self.conf.arbiters: if not arbiter_link.spare: arbiter_link.alive = False self.must_run = True break
def function[wait_for_master_death, parameter[self]]: constant[Wait for a master timeout and take the lead if necessary :return: None ] call[name[logger].info, parameter[constant[Waiting for master death]]] variable[timeout] assign[=] constant[1.0] name[self].last_master_ping assign[=] call[name[time].time, parameter[]] variable[master_timeout] assign[=] constant[300] for taget[name[arbiter_link]] in starred[name[self].conf.arbiters] begin[:] if <ast.UnaryOp object at 0x7da18bc73ee0> begin[:] variable[master_timeout] assign[=] binary_operation[name[arbiter_link].spare_check_interval * name[arbiter_link].spare_max_check_attempts] call[name[logger].info, parameter[constant[I'll wait master death for %d seconds], name[master_timeout]]] while <ast.UnaryOp object at 0x7da18bc714b0> begin[:] <ast.Tuple object at 0x7da18bc70280> assign[=] call[name[self].make_a_pause, parameter[name[timeout]]] if name[tcdiff] begin[:] <ast.AugAssign object at 0x7da18bc72920> if name[self].new_conf begin[:] call[name[self].setup_new_conf, parameter[]] call[name[sys].stdout.write, parameter[constant[.]]] call[name[sys].stdout.flush, parameter[]] variable[now] assign[=] call[name[time].time, parameter[]] if compare[binary_operation[name[now] - name[self].last_master_ping] greater[>] name[master_timeout]] begin[:] call[name[logger].info, parameter[constant[Arbiter Master is dead. The arbiter %s takes the lead!], name[self].link_to_myself.name]] for taget[name[arbiter_link]] in starred[name[self].conf.arbiters] begin[:] if <ast.UnaryOp object at 0x7da1b0ff0220> begin[:] name[arbiter_link].alive assign[=] constant[False] name[self].must_run assign[=] constant[True] break
keyword[def] identifier[wait_for_master_death] ( identifier[self] ): literal[string] identifier[logger] . identifier[info] ( literal[string] ) identifier[timeout] = literal[int] identifier[self] . identifier[last_master_ping] = identifier[time] . identifier[time] () identifier[master_timeout] = literal[int] keyword[for] identifier[arbiter_link] keyword[in] identifier[self] . identifier[conf] . identifier[arbiters] : keyword[if] keyword[not] identifier[arbiter_link] . identifier[spare] : identifier[master_timeout] = identifier[arbiter_link] . identifier[spare_check_interval] * identifier[arbiter_link] . identifier[spare_max_check_attempts] identifier[logger] . identifier[info] ( literal[string] , identifier[master_timeout] ) keyword[while] keyword[not] identifier[self] . identifier[interrupted] : identifier[_] , identifier[tcdiff] = identifier[self] . identifier[make_a_pause] ( identifier[timeout] ) keyword[if] identifier[tcdiff] : identifier[self] . identifier[last_master_ping] += identifier[tcdiff] keyword[if] identifier[self] . identifier[new_conf] : identifier[self] . identifier[setup_new_conf] () identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] ) identifier[sys] . identifier[stdout] . identifier[flush] () identifier[now] = identifier[time] . identifier[time] () keyword[if] identifier[now] - identifier[self] . identifier[last_master_ping] > identifier[master_timeout] : identifier[logger] . identifier[info] ( literal[string] , identifier[self] . identifier[link_to_myself] . identifier[name] ) keyword[for] identifier[arbiter_link] keyword[in] identifier[self] . identifier[conf] . identifier[arbiters] : keyword[if] keyword[not] identifier[arbiter_link] . identifier[spare] : identifier[arbiter_link] . identifier[alive] = keyword[False] identifier[self] . identifier[must_run] = keyword[True] keyword[break]
def wait_for_master_death(self): """Wait for a master timeout and take the lead if necessary :return: None """ logger.info('Waiting for master death') timeout = 1.0 self.last_master_ping = time.time() master_timeout = 300 for arbiter_link in self.conf.arbiters: if not arbiter_link.spare: master_timeout = arbiter_link.spare_check_interval * arbiter_link.spare_max_check_attempts # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['arbiter_link']] logger.info("I'll wait master death for %d seconds", master_timeout) while not self.interrupted: # Make a pause and check if the system time changed (_, tcdiff) = self.make_a_pause(timeout) # If there was a system time change then we have to adapt last_master_ping: if tcdiff: self.last_master_ping += tcdiff # depends on [control=['if'], data=[]] if self.new_conf: self.setup_new_conf() # depends on [control=['if'], data=[]] sys.stdout.write('.') sys.stdout.flush() # Now check if master is dead or not now = time.time() if now - self.last_master_ping > master_timeout: logger.info('Arbiter Master is dead. The arbiter %s takes the lead!', self.link_to_myself.name) for arbiter_link in self.conf.arbiters: if not arbiter_link.spare: arbiter_link.alive = False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['arbiter_link']] self.must_run = True break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
def copy_from_scratch(file_mapping, dry_run=True): """Copy output files from scratch area """ for key, value in file_mapping.items(): if dry_run: print ("copy %s %s" % (value, key)) else: try: outdir = os.path.dirname(key) os.makedirs(outdir) except OSError: pass print ("copy %s %s" % (value, key)) copyfile(value, key) return file_mapping
def function[copy_from_scratch, parameter[file_mapping, dry_run]]: constant[Copy output files from scratch area ] for taget[tuple[[<ast.Name object at 0x7da18f8103a0>, <ast.Name object at 0x7da18f8127a0>]]] in starred[call[name[file_mapping].items, parameter[]]] begin[:] if name[dry_run] begin[:] call[name[print], parameter[binary_operation[constant[copy %s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f812c80>, <ast.Name object at 0x7da18f813fd0>]]]]] return[name[file_mapping]]
keyword[def] identifier[copy_from_scratch] ( identifier[file_mapping] , identifier[dry_run] = keyword[True] ): literal[string] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[file_mapping] . identifier[items] (): keyword[if] identifier[dry_run] : identifier[print] ( literal[string] %( identifier[value] , identifier[key] )) keyword[else] : keyword[try] : identifier[outdir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[key] ) identifier[os] . identifier[makedirs] ( identifier[outdir] ) keyword[except] identifier[OSError] : keyword[pass] identifier[print] ( literal[string] %( identifier[value] , identifier[key] )) identifier[copyfile] ( identifier[value] , identifier[key] ) keyword[return] identifier[file_mapping]
def copy_from_scratch(file_mapping, dry_run=True): """Copy output files from scratch area """ for (key, value) in file_mapping.items(): if dry_run: print('copy %s %s' % (value, key)) # depends on [control=['if'], data=[]] else: try: outdir = os.path.dirname(key) os.makedirs(outdir) # depends on [control=['try'], data=[]] except OSError: pass # depends on [control=['except'], data=[]] print('copy %s %s' % (value, key)) copyfile(value, key) # depends on [control=['for'], data=[]] return file_mapping
def get(self): """Get the first public IP address returned by one of the online services.""" q = queue.Queue() for u, j, k in urls: t = threading.Thread(target=self._get_ip_public, args=(q, u, j, k)) t.daemon = True t.start() timer = Timer(self.timeout) ip = None while not timer.finished() and ip is None: if q.qsize() > 0: ip = q.get() return ', '.join(set([x.strip() for x in ip.split(',')]))
def function[get, parameter[self]]: constant[Get the first public IP address returned by one of the online services.] variable[q] assign[=] call[name[queue].Queue, parameter[]] for taget[tuple[[<ast.Name object at 0x7da18f58d5a0>, <ast.Name object at 0x7da18f58cb50>, <ast.Name object at 0x7da18f58f910>]]] in starred[name[urls]] begin[:] variable[t] assign[=] call[name[threading].Thread, parameter[]] name[t].daemon assign[=] constant[True] call[name[t].start, parameter[]] variable[timer] assign[=] call[name[Timer], parameter[name[self].timeout]] variable[ip] assign[=] constant[None] while <ast.BoolOp object at 0x7da18f58cca0> begin[:] if compare[call[name[q].qsize, parameter[]] greater[>] constant[0]] begin[:] variable[ip] assign[=] call[name[q].get, parameter[]] return[call[constant[, ].join, parameter[call[name[set], parameter[<ast.ListComp object at 0x7da18f58d6f0>]]]]]
keyword[def] identifier[get] ( identifier[self] ): literal[string] identifier[q] = identifier[queue] . identifier[Queue] () keyword[for] identifier[u] , identifier[j] , identifier[k] keyword[in] identifier[urls] : identifier[t] = identifier[threading] . identifier[Thread] ( identifier[target] = identifier[self] . identifier[_get_ip_public] , identifier[args] =( identifier[q] , identifier[u] , identifier[j] , identifier[k] )) identifier[t] . identifier[daemon] = keyword[True] identifier[t] . identifier[start] () identifier[timer] = identifier[Timer] ( identifier[self] . identifier[timeout] ) identifier[ip] = keyword[None] keyword[while] keyword[not] identifier[timer] . identifier[finished] () keyword[and] identifier[ip] keyword[is] keyword[None] : keyword[if] identifier[q] . identifier[qsize] ()> literal[int] : identifier[ip] = identifier[q] . identifier[get] () keyword[return] literal[string] . identifier[join] ( identifier[set] ([ identifier[x] . identifier[strip] () keyword[for] identifier[x] keyword[in] identifier[ip] . identifier[split] ( literal[string] )]))
def get(self): """Get the first public IP address returned by one of the online services.""" q = queue.Queue() for (u, j, k) in urls: t = threading.Thread(target=self._get_ip_public, args=(q, u, j, k)) t.daemon = True t.start() # depends on [control=['for'], data=[]] timer = Timer(self.timeout) ip = None while not timer.finished() and ip is None: if q.qsize() > 0: ip = q.get() # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] return ', '.join(set([x.strip() for x in ip.split(',')]))
def union(self, other): """ Compute the union bounding box of this bounding box and another one. This is equivalent to drawing a bounding box around all corners points of both bounding boxes. Parameters ---------- other : imgaug.BoundingBox Other bounding box with which to generate the union. Returns ------- imgaug.BoundingBox Union bounding box of the two bounding boxes. """ return BoundingBox( x1=min(self.x1, other.x1), y1=min(self.y1, other.y1), x2=max(self.x2, other.x2), y2=max(self.y2, other.y2), )
def function[union, parameter[self, other]]: constant[ Compute the union bounding box of this bounding box and another one. This is equivalent to drawing a bounding box around all corners points of both bounding boxes. Parameters ---------- other : imgaug.BoundingBox Other bounding box with which to generate the union. Returns ------- imgaug.BoundingBox Union bounding box of the two bounding boxes. ] return[call[name[BoundingBox], parameter[]]]
keyword[def] identifier[union] ( identifier[self] , identifier[other] ): literal[string] keyword[return] identifier[BoundingBox] ( identifier[x1] = identifier[min] ( identifier[self] . identifier[x1] , identifier[other] . identifier[x1] ), identifier[y1] = identifier[min] ( identifier[self] . identifier[y1] , identifier[other] . identifier[y1] ), identifier[x2] = identifier[max] ( identifier[self] . identifier[x2] , identifier[other] . identifier[x2] ), identifier[y2] = identifier[max] ( identifier[self] . identifier[y2] , identifier[other] . identifier[y2] ), )
def union(self, other): """ Compute the union bounding box of this bounding box and another one. This is equivalent to drawing a bounding box around all corners points of both bounding boxes. Parameters ---------- other : imgaug.BoundingBox Other bounding box with which to generate the union. Returns ------- imgaug.BoundingBox Union bounding box of the two bounding boxes. """ return BoundingBox(x1=min(self.x1, other.x1), y1=min(self.y1, other.y1), x2=max(self.x2, other.x2), y2=max(self.y2, other.y2))
def _increase_logging(self, loggers): """! @brief Increase logging level for a set of subloggers.""" if self._log_level_delta <= 0: level = max(1, self._default_log_level + self._log_level_delta - 10) for logger in loggers: logging.getLogger(logger).setLevel(level)
def function[_increase_logging, parameter[self, loggers]]: constant[! @brief Increase logging level for a set of subloggers.] if compare[name[self]._log_level_delta less_or_equal[<=] constant[0]] begin[:] variable[level] assign[=] call[name[max], parameter[constant[1], binary_operation[binary_operation[name[self]._default_log_level + name[self]._log_level_delta] - constant[10]]]] for taget[name[logger]] in starred[name[loggers]] begin[:] call[call[name[logging].getLogger, parameter[name[logger]]].setLevel, parameter[name[level]]]
keyword[def] identifier[_increase_logging] ( identifier[self] , identifier[loggers] ): literal[string] keyword[if] identifier[self] . identifier[_log_level_delta] <= literal[int] : identifier[level] = identifier[max] ( literal[int] , identifier[self] . identifier[_default_log_level] + identifier[self] . identifier[_log_level_delta] - literal[int] ) keyword[for] identifier[logger] keyword[in] identifier[loggers] : identifier[logging] . identifier[getLogger] ( identifier[logger] ). identifier[setLevel] ( identifier[level] )
def _increase_logging(self, loggers): """! @brief Increase logging level for a set of subloggers.""" if self._log_level_delta <= 0: level = max(1, self._default_log_level + self._log_level_delta - 10) for logger in loggers: logging.getLogger(logger).setLevel(level) # depends on [control=['for'], data=['logger']] # depends on [control=['if'], data=[]]
def generate_yaml_file(filename, contents): """Creates a yaml file with the given content.""" with open(filename, 'w') as file: file.write(yaml.dump(contents, default_flow_style=False))
def function[generate_yaml_file, parameter[filename, contents]]: constant[Creates a yaml file with the given content.] with call[name[open], parameter[name[filename], constant[w]]] begin[:] call[name[file].write, parameter[call[name[yaml].dump, parameter[name[contents]]]]]
keyword[def] identifier[generate_yaml_file] ( identifier[filename] , identifier[contents] ): literal[string] keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[file] : identifier[file] . identifier[write] ( identifier[yaml] . identifier[dump] ( identifier[contents] , identifier[default_flow_style] = keyword[False] ))
def generate_yaml_file(filename, contents): """Creates a yaml file with the given content.""" with open(filename, 'w') as file: file.write(yaml.dump(contents, default_flow_style=False)) # depends on [control=['with'], data=['file']]
def construct_scalar(self, node): ''' Verify integers and pass them in correctly is they are declared as octal ''' if node.tag == 'tag:yaml.org,2002:int': if node.value == '0': pass elif node.value.startswith('0') and not node.value.startswith(('0b', '0x')): node.value = node.value.lstrip('0') # If value was all zeros, node.value would have been reduced to # an empty string. Change it to '0'. if node.value == '': node.value = '0' return super(SaltYamlSafeLoader, self).construct_scalar(node)
def function[construct_scalar, parameter[self, node]]: constant[ Verify integers and pass them in correctly is they are declared as octal ] if compare[name[node].tag equal[==] constant[tag:yaml.org,2002:int]] begin[:] if compare[name[node].value equal[==] constant[0]] begin[:] pass return[call[call[name[super], parameter[name[SaltYamlSafeLoader], name[self]]].construct_scalar, parameter[name[node]]]]
keyword[def] identifier[construct_scalar] ( identifier[self] , identifier[node] ): literal[string] keyword[if] identifier[node] . identifier[tag] == literal[string] : keyword[if] identifier[node] . identifier[value] == literal[string] : keyword[pass] keyword[elif] identifier[node] . identifier[value] . identifier[startswith] ( literal[string] ) keyword[and] keyword[not] identifier[node] . identifier[value] . identifier[startswith] (( literal[string] , literal[string] )): identifier[node] . identifier[value] = identifier[node] . identifier[value] . identifier[lstrip] ( literal[string] ) keyword[if] identifier[node] . identifier[value] == literal[string] : identifier[node] . identifier[value] = literal[string] keyword[return] identifier[super] ( identifier[SaltYamlSafeLoader] , identifier[self] ). identifier[construct_scalar] ( identifier[node] )
def construct_scalar(self, node): """ Verify integers and pass them in correctly is they are declared as octal """ if node.tag == 'tag:yaml.org,2002:int': if node.value == '0': pass # depends on [control=['if'], data=[]] elif node.value.startswith('0') and (not node.value.startswith(('0b', '0x'))): node.value = node.value.lstrip('0') # If value was all zeros, node.value would have been reduced to # an empty string. Change it to '0'. if node.value == '': node.value = '0' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return super(SaltYamlSafeLoader, self).construct_scalar(node)
def term(term, xpath=None, escape=True): """ Escapes <, > and & characters in the given term for inclusion into XML (like the search query). Also wrap the term in XML tags if xpath is specified. Note that this function doesn't escape the @, $, " and other symbols that are meaningful in a search query. Args: term -- The term text to be escaped (e.g. a search query term). Keyword args: xpath -- An optional xpath, to be specified if the term is to wraped in tags. escape -- An optional parameter - whether to escape the term's XML characters. Default is True. Returns: Properly escaped xml string for queries. >>> term('lorem<4') 'lorem&lt;4' >>> term('3 < bar < 5 $$ True', 'document/foo', False) '<document><foo>3 < bar < 5 $$ True</foo></document>' >>> term('3 < bar < 5 $$ True', 'document/foo') '<document><foo>3 &lt; bar &lt; 5 $$ True</foo></document>' """ prefix = [] postfix = [] if xpath: tags = xpath.split('/') for tag in tags: if tag: prefix.append('<{0}>'.format(tag)) postfix.insert(0, '</{0}>'.format(tag)) if escape: term = cgi.escape(term) return ''.join(prefix + [term] + postfix)
def function[term, parameter[term, xpath, escape]]: constant[ Escapes <, > and & characters in the given term for inclusion into XML (like the search query). Also wrap the term in XML tags if xpath is specified. Note that this function doesn't escape the @, $, " and other symbols that are meaningful in a search query. Args: term -- The term text to be escaped (e.g. a search query term). Keyword args: xpath -- An optional xpath, to be specified if the term is to wraped in tags. escape -- An optional parameter - whether to escape the term's XML characters. Default is True. Returns: Properly escaped xml string for queries. >>> term('lorem<4') 'lorem&lt;4' >>> term('3 < bar < 5 $$ True', 'document/foo', False) '<document><foo>3 < bar < 5 $$ True</foo></document>' >>> term('3 < bar < 5 $$ True', 'document/foo') '<document><foo>3 &lt; bar &lt; 5 $$ True</foo></document>' ] variable[prefix] assign[=] list[[]] variable[postfix] assign[=] list[[]] if name[xpath] begin[:] variable[tags] assign[=] call[name[xpath].split, parameter[constant[/]]] for taget[name[tag]] in starred[name[tags]] begin[:] if name[tag] begin[:] call[name[prefix].append, parameter[call[constant[<{0}>].format, parameter[name[tag]]]]] call[name[postfix].insert, parameter[constant[0], call[constant[</{0}>].format, parameter[name[tag]]]]] if name[escape] begin[:] variable[term] assign[=] call[name[cgi].escape, parameter[name[term]]] return[call[constant[].join, parameter[binary_operation[binary_operation[name[prefix] + list[[<ast.Name object at 0x7da20c76da80>]]] + name[postfix]]]]]
keyword[def] identifier[term] ( identifier[term] , identifier[xpath] = keyword[None] , identifier[escape] = keyword[True] ): literal[string] identifier[prefix] =[] identifier[postfix] =[] keyword[if] identifier[xpath] : identifier[tags] = identifier[xpath] . identifier[split] ( literal[string] ) keyword[for] identifier[tag] keyword[in] identifier[tags] : keyword[if] identifier[tag] : identifier[prefix] . identifier[append] ( literal[string] . identifier[format] ( identifier[tag] )) identifier[postfix] . identifier[insert] ( literal[int] , literal[string] . identifier[format] ( identifier[tag] )) keyword[if] identifier[escape] : identifier[term] = identifier[cgi] . identifier[escape] ( identifier[term] ) keyword[return] literal[string] . identifier[join] ( identifier[prefix] +[ identifier[term] ]+ identifier[postfix] )
def term(term, xpath=None, escape=True): """ Escapes <, > and & characters in the given term for inclusion into XML (like the search query). Also wrap the term in XML tags if xpath is specified. Note that this function doesn't escape the @, $, " and other symbols that are meaningful in a search query. Args: term -- The term text to be escaped (e.g. a search query term). Keyword args: xpath -- An optional xpath, to be specified if the term is to wraped in tags. escape -- An optional parameter - whether to escape the term's XML characters. Default is True. Returns: Properly escaped xml string for queries. >>> term('lorem<4') 'lorem&lt;4' >>> term('3 < bar < 5 $$ True', 'document/foo', False) '<document><foo>3 < bar < 5 $$ True</foo></document>' >>> term('3 < bar < 5 $$ True', 'document/foo') '<document><foo>3 &lt; bar &lt; 5 $$ True</foo></document>' """ prefix = [] postfix = [] if xpath: tags = xpath.split('/') for tag in tags: if tag: prefix.append('<{0}>'.format(tag)) postfix.insert(0, '</{0}>'.format(tag)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tag']] # depends on [control=['if'], data=[]] if escape: term = cgi.escape(term) # depends on [control=['if'], data=[]] return ''.join(prefix + [term] + postfix)
def estimate_shift(signal, genome=None, windowsize=5000, thresh=None, nwindows=1000, maxlag=500, array_kwargs=None, verbose=False): """ Experimental: cross-correlation to estimate the shift width of ChIP-seq data This can be interpreted as the binding site footprint. For ChIP-seq, the plus and minus strand reads tend to be shifted in the 5' direction away from each other. Various ChIP-seq peak-callers estimate this distance; this function provides a quick, tunable way to do so using cross-correlation. The resulting shift can then be incorporated into subsequent calls to `array` by adding the shift_width kwarg. :param signal: genomic_signal object :param genome: String assembly for constructing windows :param nwindows: Number of windows to compute cross-correlation on :param windowsize: Size of each window to compute cross-correlation on. :param thresh: Threshold read coverage to run cross-correlation on. This is likely to be a function of the fragment size provided in `array_kwargs` `windowsize`. If `thresh` is small, then the cross correlation can be noisy. :param maxlag: Max shift to look for :param array_kwargs: Kwargs passed directly to genomic_signal.array, with the default of `bins=windowsize` for single-bp resolution, and `read_strand` will be overwritten. :param verbose: Be verbose. Returns lags and a `maxlag*2+1` x `nwindows` matrix of cross-correlations. You can then plot the average cross-correlation function with:: plt.plot(lags, shift.mean(axis=0)) and get the distance to shift with:: d = lags[np.argmax(shift.mean(axis=0))] and then plot that with:: plt.axvline(d, color='k', linestyle='--') The number of windows with at least `thresh` coverage is:: shift.shape[0] """ if thresh is None: thresh = 0 if genome is None: genome = signal.genome() if array_kwargs is None: array_kwargs = {} array_kwargs.pop('read_strand', None) if 'bins' not in array_kwargs: array_kwargs['bins'] = windowsize def add_strand(f, strand): fields = f.fields[:] while len(fields) < 5: fields.append('.') fields.append(strand) return pybedtools.create_interval_from_list(fields) windows = pybedtools.BedTool()\ .window_maker(genome=genome, w=windowsize) random_subset = pybedtools.BedTool(windows[:nwindows])\ .shuffle(genome=genome).saveas() if verbose: sys.stderr.write("Getting plus-strand signal for %s regions...\n" % nwindows) sys.stderr.flush() plus = signal.array( features=random_subset, read_strand="+", **array_kwargs).astype(float) if verbose: sys.stderr.write("Getting minus-strand signal for %s regions...\n" % nwindows) sys.stderr.flush() minus = signal.array( features=random_subset, read_strand="-", **array_kwargs).astype(float) # only do cross-correlation if you have enough reads to do so enough = ((plus.sum(axis=1) / windowsize) > thresh) \ & ((minus.sum(axis=1) / windowsize) > thresh) if verbose: sys.stderr.write( "Running cross-correlation on %s regions that passed " "threshold\n" % sum(enough)) results = np.zeros((sum(enough), 2 * maxlag + 1)) for i, xy in enumerate(izip(plus[enough], minus[enough])): x, y = xy results[i] = xcorr(x, y, maxlag) lags = np.arange(-maxlag, maxlag + 1) return lags, results
def function[estimate_shift, parameter[signal, genome, windowsize, thresh, nwindows, maxlag, array_kwargs, verbose]]: constant[ Experimental: cross-correlation to estimate the shift width of ChIP-seq data This can be interpreted as the binding site footprint. For ChIP-seq, the plus and minus strand reads tend to be shifted in the 5' direction away from each other. Various ChIP-seq peak-callers estimate this distance; this function provides a quick, tunable way to do so using cross-correlation. The resulting shift can then be incorporated into subsequent calls to `array` by adding the shift_width kwarg. :param signal: genomic_signal object :param genome: String assembly for constructing windows :param nwindows: Number of windows to compute cross-correlation on :param windowsize: Size of each window to compute cross-correlation on. :param thresh: Threshold read coverage to run cross-correlation on. This is likely to be a function of the fragment size provided in `array_kwargs` `windowsize`. If `thresh` is small, then the cross correlation can be noisy. :param maxlag: Max shift to look for :param array_kwargs: Kwargs passed directly to genomic_signal.array, with the default of `bins=windowsize` for single-bp resolution, and `read_strand` will be overwritten. :param verbose: Be verbose. Returns lags and a `maxlag*2+1` x `nwindows` matrix of cross-correlations. You can then plot the average cross-correlation function with:: plt.plot(lags, shift.mean(axis=0)) and get the distance to shift with:: d = lags[np.argmax(shift.mean(axis=0))] and then plot that with:: plt.axvline(d, color='k', linestyle='--') The number of windows with at least `thresh` coverage is:: shift.shape[0] ] if compare[name[thresh] is constant[None]] begin[:] variable[thresh] assign[=] constant[0] if compare[name[genome] is constant[None]] begin[:] variable[genome] assign[=] call[name[signal].genome, parameter[]] if compare[name[array_kwargs] is constant[None]] begin[:] variable[array_kwargs] assign[=] dictionary[[], []] call[name[array_kwargs].pop, parameter[constant[read_strand], constant[None]]] if compare[constant[bins] <ast.NotIn object at 0x7da2590d7190> name[array_kwargs]] begin[:] call[name[array_kwargs]][constant[bins]] assign[=] name[windowsize] def function[add_strand, parameter[f, strand]]: variable[fields] assign[=] call[name[f].fields][<ast.Slice object at 0x7da18bcca620>] while compare[call[name[len], parameter[name[fields]]] less[<] constant[5]] begin[:] call[name[fields].append, parameter[constant[.]]] call[name[fields].append, parameter[name[strand]]] return[call[name[pybedtools].create_interval_from_list, parameter[name[fields]]]] variable[windows] assign[=] call[call[name[pybedtools].BedTool, parameter[]].window_maker, parameter[]] variable[random_subset] assign[=] call[call[call[name[pybedtools].BedTool, parameter[call[name[windows]][<ast.Slice object at 0x7da18f00f160>]]].shuffle, parameter[]].saveas, parameter[]] if name[verbose] begin[:] call[name[sys].stderr.write, parameter[binary_operation[constant[Getting plus-strand signal for %s regions... ] <ast.Mod object at 0x7da2590d6920> name[nwindows]]]] call[name[sys].stderr.flush, parameter[]] variable[plus] assign[=] call[call[name[signal].array, parameter[]].astype, parameter[name[float]]] if name[verbose] begin[:] call[name[sys].stderr.write, parameter[binary_operation[constant[Getting minus-strand signal for %s regions... ] <ast.Mod object at 0x7da2590d6920> name[nwindows]]]] call[name[sys].stderr.flush, parameter[]] variable[minus] assign[=] call[call[name[signal].array, parameter[]].astype, parameter[name[float]]] variable[enough] assign[=] binary_operation[compare[binary_operation[call[name[plus].sum, parameter[]] / name[windowsize]] greater[>] name[thresh]] <ast.BitAnd object at 0x7da2590d6b60> compare[binary_operation[call[name[minus].sum, parameter[]] / name[windowsize]] greater[>] name[thresh]]] if name[verbose] begin[:] call[name[sys].stderr.write, parameter[binary_operation[constant[Running cross-correlation on %s regions that passed threshold ] <ast.Mod object at 0x7da2590d6920> call[name[sum], parameter[name[enough]]]]]] variable[results] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Call object at 0x7da2054a6680>, <ast.BinOp object at 0x7da2054a7a90>]]]] for taget[tuple[[<ast.Name object at 0x7da2054a64a0>, <ast.Name object at 0x7da2054a5810>]]] in starred[call[name[enumerate], parameter[call[name[izip], parameter[call[name[plus]][name[enough]], call[name[minus]][name[enough]]]]]]] begin[:] <ast.Tuple object at 0x7da2054a55d0> assign[=] name[xy] call[name[results]][name[i]] assign[=] call[name[xcorr], parameter[name[x], name[y], name[maxlag]]] variable[lags] assign[=] call[name[np].arange, parameter[<ast.UnaryOp object at 0x7da2054a7190>, binary_operation[name[maxlag] + constant[1]]]] return[tuple[[<ast.Name object at 0x7da2054a5540>, <ast.Name object at 0x7da2054a7b50>]]]
keyword[def] identifier[estimate_shift] ( identifier[signal] , identifier[genome] = keyword[None] , identifier[windowsize] = literal[int] , identifier[thresh] = keyword[None] , identifier[nwindows] = literal[int] , identifier[maxlag] = literal[int] , identifier[array_kwargs] = keyword[None] , identifier[verbose] = keyword[False] ): literal[string] keyword[if] identifier[thresh] keyword[is] keyword[None] : identifier[thresh] = literal[int] keyword[if] identifier[genome] keyword[is] keyword[None] : identifier[genome] = identifier[signal] . identifier[genome] () keyword[if] identifier[array_kwargs] keyword[is] keyword[None] : identifier[array_kwargs] ={} identifier[array_kwargs] . identifier[pop] ( literal[string] , keyword[None] ) keyword[if] literal[string] keyword[not] keyword[in] identifier[array_kwargs] : identifier[array_kwargs] [ literal[string] ]= identifier[windowsize] keyword[def] identifier[add_strand] ( identifier[f] , identifier[strand] ): identifier[fields] = identifier[f] . identifier[fields] [:] keyword[while] identifier[len] ( identifier[fields] )< literal[int] : identifier[fields] . identifier[append] ( literal[string] ) identifier[fields] . identifier[append] ( identifier[strand] ) keyword[return] identifier[pybedtools] . identifier[create_interval_from_list] ( identifier[fields] ) identifier[windows] = identifier[pybedtools] . identifier[BedTool] (). identifier[window_maker] ( identifier[genome] = identifier[genome] , identifier[w] = identifier[windowsize] ) identifier[random_subset] = identifier[pybedtools] . identifier[BedTool] ( identifier[windows] [: identifier[nwindows] ]). identifier[shuffle] ( identifier[genome] = identifier[genome] ). identifier[saveas] () keyword[if] identifier[verbose] : identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] % identifier[nwindows] ) identifier[sys] . identifier[stderr] . identifier[flush] () identifier[plus] = identifier[signal] . identifier[array] ( identifier[features] = identifier[random_subset] , identifier[read_strand] = literal[string] , ** identifier[array_kwargs] ). identifier[astype] ( identifier[float] ) keyword[if] identifier[verbose] : identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] % identifier[nwindows] ) identifier[sys] . identifier[stderr] . identifier[flush] () identifier[minus] = identifier[signal] . identifier[array] ( identifier[features] = identifier[random_subset] , identifier[read_strand] = literal[string] , ** identifier[array_kwargs] ). identifier[astype] ( identifier[float] ) identifier[enough] =(( identifier[plus] . identifier[sum] ( identifier[axis] = literal[int] )/ identifier[windowsize] )> identifier[thresh] )&(( identifier[minus] . identifier[sum] ( identifier[axis] = literal[int] )/ identifier[windowsize] )> identifier[thresh] ) keyword[if] identifier[verbose] : identifier[sys] . identifier[stderr] . identifier[write] ( literal[string] literal[string] % identifier[sum] ( identifier[enough] )) identifier[results] = identifier[np] . identifier[zeros] (( identifier[sum] ( identifier[enough] ), literal[int] * identifier[maxlag] + literal[int] )) keyword[for] identifier[i] , identifier[xy] keyword[in] identifier[enumerate] ( identifier[izip] ( identifier[plus] [ identifier[enough] ], identifier[minus] [ identifier[enough] ])): identifier[x] , identifier[y] = identifier[xy] identifier[results] [ identifier[i] ]= identifier[xcorr] ( identifier[x] , identifier[y] , identifier[maxlag] ) identifier[lags] = identifier[np] . identifier[arange] (- identifier[maxlag] , identifier[maxlag] + literal[int] ) keyword[return] identifier[lags] , identifier[results]
def estimate_shift(signal, genome=None, windowsize=5000, thresh=None, nwindows=1000, maxlag=500, array_kwargs=None, verbose=False): """ Experimental: cross-correlation to estimate the shift width of ChIP-seq data This can be interpreted as the binding site footprint. For ChIP-seq, the plus and minus strand reads tend to be shifted in the 5' direction away from each other. Various ChIP-seq peak-callers estimate this distance; this function provides a quick, tunable way to do so using cross-correlation. The resulting shift can then be incorporated into subsequent calls to `array` by adding the shift_width kwarg. :param signal: genomic_signal object :param genome: String assembly for constructing windows :param nwindows: Number of windows to compute cross-correlation on :param windowsize: Size of each window to compute cross-correlation on. :param thresh: Threshold read coverage to run cross-correlation on. This is likely to be a function of the fragment size provided in `array_kwargs` `windowsize`. If `thresh` is small, then the cross correlation can be noisy. :param maxlag: Max shift to look for :param array_kwargs: Kwargs passed directly to genomic_signal.array, with the default of `bins=windowsize` for single-bp resolution, and `read_strand` will be overwritten. :param verbose: Be verbose. Returns lags and a `maxlag*2+1` x `nwindows` matrix of cross-correlations. You can then plot the average cross-correlation function with:: plt.plot(lags, shift.mean(axis=0)) and get the distance to shift with:: d = lags[np.argmax(shift.mean(axis=0))] and then plot that with:: plt.axvline(d, color='k', linestyle='--') The number of windows with at least `thresh` coverage is:: shift.shape[0] """ if thresh is None: thresh = 0 # depends on [control=['if'], data=['thresh']] if genome is None: genome = signal.genome() # depends on [control=['if'], data=['genome']] if array_kwargs is None: array_kwargs = {} # depends on [control=['if'], data=['array_kwargs']] array_kwargs.pop('read_strand', None) if 'bins' not in array_kwargs: array_kwargs['bins'] = windowsize # depends on [control=['if'], data=['array_kwargs']] def add_strand(f, strand): fields = f.fields[:] while len(fields) < 5: fields.append('.') # depends on [control=['while'], data=[]] fields.append(strand) return pybedtools.create_interval_from_list(fields) windows = pybedtools.BedTool().window_maker(genome=genome, w=windowsize) random_subset = pybedtools.BedTool(windows[:nwindows]).shuffle(genome=genome).saveas() if verbose: sys.stderr.write('Getting plus-strand signal for %s regions...\n' % nwindows) sys.stderr.flush() # depends on [control=['if'], data=[]] plus = signal.array(features=random_subset, read_strand='+', **array_kwargs).astype(float) if verbose: sys.stderr.write('Getting minus-strand signal for %s regions...\n' % nwindows) sys.stderr.flush() # depends on [control=['if'], data=[]] minus = signal.array(features=random_subset, read_strand='-', **array_kwargs).astype(float) # only do cross-correlation if you have enough reads to do so enough = (plus.sum(axis=1) / windowsize > thresh) & (minus.sum(axis=1) / windowsize > thresh) if verbose: sys.stderr.write('Running cross-correlation on %s regions that passed threshold\n' % sum(enough)) # depends on [control=['if'], data=[]] results = np.zeros((sum(enough), 2 * maxlag + 1)) for (i, xy) in enumerate(izip(plus[enough], minus[enough])): (x, y) = xy results[i] = xcorr(x, y, maxlag) # depends on [control=['for'], data=[]] lags = np.arange(-maxlag, maxlag + 1) return (lags, results)
def change_crypto_domain_config(self, crypto_domain_index, access_mode): """ Change the access mode for a crypto domain that is currently included in the crypto configuration of this partition. The access mode will be changed for the specified crypto domain on all crypto adapters currently included in the crypto configuration of this partition. For the general principle for maintaining crypto configurations of partitions, see :meth:`~zhmcclient.Partition.increase_crypto_config`. Authorization requirements: * Object-access permission to this Partition. * Task permission to the "Partition Details" task. Parameters: crypto_domain_index (:term:`integer`): Domain index of the crypto domain to be changed. For values, see :meth:`~zhmcclient.Partition.increase_crypto_config`. access_mode (:term:`string`): The new access mode for the crypto domain. For values, see :meth:`~zhmcclient.Partition.increase_crypto_config`. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ body = {'domain-index': crypto_domain_index, 'access-mode': access_mode} self.manager.session.post( self.uri + '/operations/change-crypto-domain-configuration', body)
def function[change_crypto_domain_config, parameter[self, crypto_domain_index, access_mode]]: constant[ Change the access mode for a crypto domain that is currently included in the crypto configuration of this partition. The access mode will be changed for the specified crypto domain on all crypto adapters currently included in the crypto configuration of this partition. For the general principle for maintaining crypto configurations of partitions, see :meth:`~zhmcclient.Partition.increase_crypto_config`. Authorization requirements: * Object-access permission to this Partition. * Task permission to the "Partition Details" task. Parameters: crypto_domain_index (:term:`integer`): Domain index of the crypto domain to be changed. For values, see :meth:`~zhmcclient.Partition.increase_crypto_config`. access_mode (:term:`string`): The new access mode for the crypto domain. For values, see :meth:`~zhmcclient.Partition.increase_crypto_config`. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` ] variable[body] assign[=] dictionary[[<ast.Constant object at 0x7da18f723460>, <ast.Constant object at 0x7da18f7222f0>], [<ast.Name object at 0x7da18f721e70>, <ast.Name object at 0x7da18f722d10>]] call[name[self].manager.session.post, parameter[binary_operation[name[self].uri + constant[/operations/change-crypto-domain-configuration]], name[body]]]
keyword[def] identifier[change_crypto_domain_config] ( identifier[self] , identifier[crypto_domain_index] , identifier[access_mode] ): literal[string] identifier[body] ={ literal[string] : identifier[crypto_domain_index] , literal[string] : identifier[access_mode] } identifier[self] . identifier[manager] . identifier[session] . identifier[post] ( identifier[self] . identifier[uri] + literal[string] , identifier[body] )
def change_crypto_domain_config(self, crypto_domain_index, access_mode): """ Change the access mode for a crypto domain that is currently included in the crypto configuration of this partition. The access mode will be changed for the specified crypto domain on all crypto adapters currently included in the crypto configuration of this partition. For the general principle for maintaining crypto configurations of partitions, see :meth:`~zhmcclient.Partition.increase_crypto_config`. Authorization requirements: * Object-access permission to this Partition. * Task permission to the "Partition Details" task. Parameters: crypto_domain_index (:term:`integer`): Domain index of the crypto domain to be changed. For values, see :meth:`~zhmcclient.Partition.increase_crypto_config`. access_mode (:term:`string`): The new access mode for the crypto domain. For values, see :meth:`~zhmcclient.Partition.increase_crypto_config`. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ body = {'domain-index': crypto_domain_index, 'access-mode': access_mode} self.manager.session.post(self.uri + '/operations/change-crypto-domain-configuration', body)
def frange(start, stop, step, precision): """A generator that will generate a range of floats.""" value = start while round(value, precision) < stop: yield round(value, precision) value += step
def function[frange, parameter[start, stop, step, precision]]: constant[A generator that will generate a range of floats.] variable[value] assign[=] name[start] while compare[call[name[round], parameter[name[value], name[precision]]] less[<] name[stop]] begin[:] <ast.Yield object at 0x7da18bccafb0> <ast.AugAssign object at 0x7da18bcc9c60>
keyword[def] identifier[frange] ( identifier[start] , identifier[stop] , identifier[step] , identifier[precision] ): literal[string] identifier[value] = identifier[start] keyword[while] identifier[round] ( identifier[value] , identifier[precision] )< identifier[stop] : keyword[yield] identifier[round] ( identifier[value] , identifier[precision] ) identifier[value] += identifier[step]
def frange(start, stop, step, precision): """A generator that will generate a range of floats.""" value = start while round(value, precision) < stop: yield round(value, precision) value += step # depends on [control=['while'], data=[]]
def _add_jobs(self): """ Add configured jobs. """ for name, params in self.jobs.items(): if params.active: params.handler = params.handler(params) self.sched.add_cron_job(params.handler.run, **params.schedule)
def function[_add_jobs, parameter[self]]: constant[ Add configured jobs. ] for taget[tuple[[<ast.Name object at 0x7da207f009d0>, <ast.Name object at 0x7da207f03370>]]] in starred[call[name[self].jobs.items, parameter[]]] begin[:] if name[params].active begin[:] name[params].handler assign[=] call[name[params].handler, parameter[name[params]]] call[name[self].sched.add_cron_job, parameter[name[params].handler.run]]
keyword[def] identifier[_add_jobs] ( identifier[self] ): literal[string] keyword[for] identifier[name] , identifier[params] keyword[in] identifier[self] . identifier[jobs] . identifier[items] (): keyword[if] identifier[params] . identifier[active] : identifier[params] . identifier[handler] = identifier[params] . identifier[handler] ( identifier[params] ) identifier[self] . identifier[sched] . identifier[add_cron_job] ( identifier[params] . identifier[handler] . identifier[run] ,** identifier[params] . identifier[schedule] )
def _add_jobs(self): """ Add configured jobs. """ for (name, params) in self.jobs.items(): if params.active: params.handler = params.handler(params) self.sched.add_cron_job(params.handler.run, **params.schedule) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
def parseSearchTerm(term): """ Turn a string search query into a two-tuple of a search term and a dictionary of search keywords. """ terms = [] keywords = {} for word in term.split(): if word.count(':') == 1: k, v = word.split(u':') if k and v: keywords[k] = v elif k or v: terms.append(k or v) else: terms.append(word) term = u' '.join(terms) if keywords: return term, keywords return term, None
def function[parseSearchTerm, parameter[term]]: constant[ Turn a string search query into a two-tuple of a search term and a dictionary of search keywords. ] variable[terms] assign[=] list[[]] variable[keywords] assign[=] dictionary[[], []] for taget[name[word]] in starred[call[name[term].split, parameter[]]] begin[:] if compare[call[name[word].count, parameter[constant[:]]] equal[==] constant[1]] begin[:] <ast.Tuple object at 0x7da1b0ba9ae0> assign[=] call[name[word].split, parameter[constant[:]]] if <ast.BoolOp object at 0x7da1b0bab2e0> begin[:] call[name[keywords]][name[k]] assign[=] name[v] variable[term] assign[=] call[constant[ ].join, parameter[name[terms]]] if name[keywords] begin[:] return[tuple[[<ast.Name object at 0x7da1b0baa680>, <ast.Name object at 0x7da1b0a21c90>]]] return[tuple[[<ast.Name object at 0x7da1b0a23310>, <ast.Constant object at 0x7da1b0a22110>]]]
keyword[def] identifier[parseSearchTerm] ( identifier[term] ): literal[string] identifier[terms] =[] identifier[keywords] ={} keyword[for] identifier[word] keyword[in] identifier[term] . identifier[split] (): keyword[if] identifier[word] . identifier[count] ( literal[string] )== literal[int] : identifier[k] , identifier[v] = identifier[word] . identifier[split] ( literal[string] ) keyword[if] identifier[k] keyword[and] identifier[v] : identifier[keywords] [ identifier[k] ]= identifier[v] keyword[elif] identifier[k] keyword[or] identifier[v] : identifier[terms] . identifier[append] ( identifier[k] keyword[or] identifier[v] ) keyword[else] : identifier[terms] . identifier[append] ( identifier[word] ) identifier[term] = literal[string] . identifier[join] ( identifier[terms] ) keyword[if] identifier[keywords] : keyword[return] identifier[term] , identifier[keywords] keyword[return] identifier[term] , keyword[None]
def parseSearchTerm(term): """ Turn a string search query into a two-tuple of a search term and a dictionary of search keywords. """ terms = [] keywords = {} for word in term.split(): if word.count(':') == 1: (k, v) = word.split(u':') if k and v: keywords[k] = v # depends on [control=['if'], data=[]] elif k or v: terms.append(k or v) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: terms.append(word) # depends on [control=['for'], data=['word']] term = u' '.join(terms) if keywords: return (term, keywords) # depends on [control=['if'], data=[]] return (term, None)
def replace(self): """ Used to replace a matched string with another. :return: The data after replacement. :rtype: str """ if self.replace_with: # pylint: disable=no-member return substrings( self.regex, self.replace_with, # pylint: disable=no-member self.data, self.occurences, # pylint: disable=no-member ) return self.data
def function[replace, parameter[self]]: constant[ Used to replace a matched string with another. :return: The data after replacement. :rtype: str ] if name[self].replace_with begin[:] return[call[name[substrings], parameter[name[self].regex, name[self].replace_with, name[self].data, name[self].occurences]]] return[name[self].data]
keyword[def] identifier[replace] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[replace_with] : keyword[return] identifier[substrings] ( identifier[self] . identifier[regex] , identifier[self] . identifier[replace_with] , identifier[self] . identifier[data] , identifier[self] . identifier[occurences] , ) keyword[return] identifier[self] . identifier[data]
def replace(self): """ Used to replace a matched string with another. :return: The data after replacement. :rtype: str """ if self.replace_with: # pylint: disable=no-member # pylint: disable=no-member # pylint: disable=no-member return substrings(self.regex, self.replace_with, self.data, self.occurences) # depends on [control=['if'], data=[]] return self.data
def check_no_element_by_selector(self, selector): """Assert an element does not exist matching the given selector.""" elems = find_elements_by_jquery(world.browser, selector) if elems: raise AssertionError("Expected no matching elements, found {}.".format( len(elems)))
def function[check_no_element_by_selector, parameter[self, selector]]: constant[Assert an element does not exist matching the given selector.] variable[elems] assign[=] call[name[find_elements_by_jquery], parameter[name[world].browser, name[selector]]] if name[elems] begin[:] <ast.Raise object at 0x7da18c4cd0c0>
keyword[def] identifier[check_no_element_by_selector] ( identifier[self] , identifier[selector] ): literal[string] identifier[elems] = identifier[find_elements_by_jquery] ( identifier[world] . identifier[browser] , identifier[selector] ) keyword[if] identifier[elems] : keyword[raise] identifier[AssertionError] ( literal[string] . identifier[format] ( identifier[len] ( identifier[elems] )))
def check_no_element_by_selector(self, selector): """Assert an element does not exist matching the given selector.""" elems = find_elements_by_jquery(world.browser, selector) if elems: raise AssertionError('Expected no matching elements, found {}.'.format(len(elems))) # depends on [control=['if'], data=[]]
def define_threshold(dat, s_freq, method, value, nbins=120): """Return the value of the threshold based on relative values. Parameters ---------- dat : ndarray (dtype='float') vector with the data after selection-transformation s_freq : float sampling frequency method : str one of 'mean', 'median', 'std', 'mean+std', 'median+std', 'histmax' value : float value to multiply the values for nbins : int for histmax method, number of bins in the histogram Returns ------- float threshold in useful units. """ if method == 'mean': value = value * mean(dat) elif method == 'median': value = value * median(dat) elif method == 'std': value = value * std(dat) elif method == 'mean+std': value = mean(dat) + value * std(dat) elif method == 'median+std': value = median(dat) + value * std(dat) elif method == 'histmax': hist = histogram(dat, bins=nbins) idx_maxbin = argmax(hist[0]) maxamp = mean((hist[1][idx_maxbin], hist[1][idx_maxbin + 1])) value = value * maxamp return value
def function[define_threshold, parameter[dat, s_freq, method, value, nbins]]: constant[Return the value of the threshold based on relative values. Parameters ---------- dat : ndarray (dtype='float') vector with the data after selection-transformation s_freq : float sampling frequency method : str one of 'mean', 'median', 'std', 'mean+std', 'median+std', 'histmax' value : float value to multiply the values for nbins : int for histmax method, number of bins in the histogram Returns ------- float threshold in useful units. ] if compare[name[method] equal[==] constant[mean]] begin[:] variable[value] assign[=] binary_operation[name[value] * call[name[mean], parameter[name[dat]]]] return[name[value]]
keyword[def] identifier[define_threshold] ( identifier[dat] , identifier[s_freq] , identifier[method] , identifier[value] , identifier[nbins] = literal[int] ): literal[string] keyword[if] identifier[method] == literal[string] : identifier[value] = identifier[value] * identifier[mean] ( identifier[dat] ) keyword[elif] identifier[method] == literal[string] : identifier[value] = identifier[value] * identifier[median] ( identifier[dat] ) keyword[elif] identifier[method] == literal[string] : identifier[value] = identifier[value] * identifier[std] ( identifier[dat] ) keyword[elif] identifier[method] == literal[string] : identifier[value] = identifier[mean] ( identifier[dat] )+ identifier[value] * identifier[std] ( identifier[dat] ) keyword[elif] identifier[method] == literal[string] : identifier[value] = identifier[median] ( identifier[dat] )+ identifier[value] * identifier[std] ( identifier[dat] ) keyword[elif] identifier[method] == literal[string] : identifier[hist] = identifier[histogram] ( identifier[dat] , identifier[bins] = identifier[nbins] ) identifier[idx_maxbin] = identifier[argmax] ( identifier[hist] [ literal[int] ]) identifier[maxamp] = identifier[mean] (( identifier[hist] [ literal[int] ][ identifier[idx_maxbin] ], identifier[hist] [ literal[int] ][ identifier[idx_maxbin] + literal[int] ])) identifier[value] = identifier[value] * identifier[maxamp] keyword[return] identifier[value]
def define_threshold(dat, s_freq, method, value, nbins=120): """Return the value of the threshold based on relative values. Parameters ---------- dat : ndarray (dtype='float') vector with the data after selection-transformation s_freq : float sampling frequency method : str one of 'mean', 'median', 'std', 'mean+std', 'median+std', 'histmax' value : float value to multiply the values for nbins : int for histmax method, number of bins in the histogram Returns ------- float threshold in useful units. """ if method == 'mean': value = value * mean(dat) # depends on [control=['if'], data=[]] elif method == 'median': value = value * median(dat) # depends on [control=['if'], data=[]] elif method == 'std': value = value * std(dat) # depends on [control=['if'], data=[]] elif method == 'mean+std': value = mean(dat) + value * std(dat) # depends on [control=['if'], data=[]] elif method == 'median+std': value = median(dat) + value * std(dat) # depends on [control=['if'], data=[]] elif method == 'histmax': hist = histogram(dat, bins=nbins) idx_maxbin = argmax(hist[0]) maxamp = mean((hist[1][idx_maxbin], hist[1][idx_maxbin + 1])) value = value * maxamp # depends on [control=['if'], data=[]] return value
def mp_spawn(self): """ Spawn worker processes (using multiprocessing) """ processes = [] for x in range(self.queue_worker_amount): process = multiprocessing.Process(target=self.mp_worker) process.start() processes.append(process) for process in processes: process.join()
def function[mp_spawn, parameter[self]]: constant[ Spawn worker processes (using multiprocessing) ] variable[processes] assign[=] list[[]] for taget[name[x]] in starred[call[name[range], parameter[name[self].queue_worker_amount]]] begin[:] variable[process] assign[=] call[name[multiprocessing].Process, parameter[]] call[name[process].start, parameter[]] call[name[processes].append, parameter[name[process]]] for taget[name[process]] in starred[name[processes]] begin[:] call[name[process].join, parameter[]]
keyword[def] identifier[mp_spawn] ( identifier[self] ): literal[string] identifier[processes] =[] keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[self] . identifier[queue_worker_amount] ): identifier[process] = identifier[multiprocessing] . identifier[Process] ( identifier[target] = identifier[self] . identifier[mp_worker] ) identifier[process] . identifier[start] () identifier[processes] . identifier[append] ( identifier[process] ) keyword[for] identifier[process] keyword[in] identifier[processes] : identifier[process] . identifier[join] ()
def mp_spawn(self): """ Spawn worker processes (using multiprocessing) """ processes = [] for x in range(self.queue_worker_amount): process = multiprocessing.Process(target=self.mp_worker) process.start() processes.append(process) # depends on [control=['for'], data=[]] for process in processes: process.join() # depends on [control=['for'], data=['process']]
def prefix_iter(self, ns_uri): """Gets an iterator over the prefixes for the given namespace.""" ni = self.__lookup_uri(ns_uri) return iter(ni.prefixes)
def function[prefix_iter, parameter[self, ns_uri]]: constant[Gets an iterator over the prefixes for the given namespace.] variable[ni] assign[=] call[name[self].__lookup_uri, parameter[name[ns_uri]]] return[call[name[iter], parameter[name[ni].prefixes]]]
keyword[def] identifier[prefix_iter] ( identifier[self] , identifier[ns_uri] ): literal[string] identifier[ni] = identifier[self] . identifier[__lookup_uri] ( identifier[ns_uri] ) keyword[return] identifier[iter] ( identifier[ni] . identifier[prefixes] )
def prefix_iter(self, ns_uri): """Gets an iterator over the prefixes for the given namespace.""" ni = self.__lookup_uri(ns_uri) return iter(ni.prefixes)
def create(self, client=None): """API call: create the zone via a PUT request See https://cloud.google.com/dns/api/v1/managedZones/create :type client: :class:`google.cloud.dns.client.Client` :param client: (Optional) the client to use. If not passed, falls back to the ``client`` stored on the current zone. """ client = self._require_client(client) path = "/projects/%s/managedZones" % (self.project,) api_response = client._connection.api_request( method="POST", path=path, data=self._build_resource() ) self._set_properties(api_response)
def function[create, parameter[self, client]]: constant[API call: create the zone via a PUT request See https://cloud.google.com/dns/api/v1/managedZones/create :type client: :class:`google.cloud.dns.client.Client` :param client: (Optional) the client to use. If not passed, falls back to the ``client`` stored on the current zone. ] variable[client] assign[=] call[name[self]._require_client, parameter[name[client]]] variable[path] assign[=] binary_operation[constant[/projects/%s/managedZones] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da207f03e50>]]] variable[api_response] assign[=] call[name[client]._connection.api_request, parameter[]] call[name[self]._set_properties, parameter[name[api_response]]]
keyword[def] identifier[create] ( identifier[self] , identifier[client] = keyword[None] ): literal[string] identifier[client] = identifier[self] . identifier[_require_client] ( identifier[client] ) identifier[path] = literal[string] %( identifier[self] . identifier[project] ,) identifier[api_response] = identifier[client] . identifier[_connection] . identifier[api_request] ( identifier[method] = literal[string] , identifier[path] = identifier[path] , identifier[data] = identifier[self] . identifier[_build_resource] () ) identifier[self] . identifier[_set_properties] ( identifier[api_response] )
def create(self, client=None): """API call: create the zone via a PUT request See https://cloud.google.com/dns/api/v1/managedZones/create :type client: :class:`google.cloud.dns.client.Client` :param client: (Optional) the client to use. If not passed, falls back to the ``client`` stored on the current zone. """ client = self._require_client(client) path = '/projects/%s/managedZones' % (self.project,) api_response = client._connection.api_request(method='POST', path=path, data=self._build_resource()) self._set_properties(api_response)
def update_deployment_targets(self, machines, project, deployment_group_id): """UpdateDeploymentTargets. [Preview API] Update tags of a list of deployment targets in a deployment group. :param [DeploymentTargetUpdateParameter] machines: Deployment targets with tags to udpdate. :param str project: Project ID or project name :param int deployment_group_id: ID of the deployment group in which deployment targets are updated. :rtype: [DeploymentMachine] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if deployment_group_id is not None: route_values['deploymentGroupId'] = self._serialize.url('deployment_group_id', deployment_group_id, 'int') content = self._serialize.body(machines, '[DeploymentTargetUpdateParameter]') response = self._send(http_method='PATCH', location_id='2f0aa599-c121-4256-a5fd-ba370e0ae7b6', version='5.0-preview.1', route_values=route_values, content=content) return self._deserialize('[DeploymentMachine]', self._unwrap_collection(response))
def function[update_deployment_targets, parameter[self, machines, project, deployment_group_id]]: constant[UpdateDeploymentTargets. [Preview API] Update tags of a list of deployment targets in a deployment group. :param [DeploymentTargetUpdateParameter] machines: Deployment targets with tags to udpdate. :param str project: Project ID or project name :param int deployment_group_id: ID of the deployment group in which deployment targets are updated. :rtype: [DeploymentMachine] ] variable[route_values] assign[=] dictionary[[], []] if compare[name[project] is_not constant[None]] begin[:] call[name[route_values]][constant[project]] assign[=] call[name[self]._serialize.url, parameter[constant[project], name[project], constant[str]]] if compare[name[deployment_group_id] is_not constant[None]] begin[:] call[name[route_values]][constant[deploymentGroupId]] assign[=] call[name[self]._serialize.url, parameter[constant[deployment_group_id], name[deployment_group_id], constant[int]]] variable[content] assign[=] call[name[self]._serialize.body, parameter[name[machines], constant[[DeploymentTargetUpdateParameter]]]] variable[response] assign[=] call[name[self]._send, parameter[]] return[call[name[self]._deserialize, parameter[constant[[DeploymentMachine]], call[name[self]._unwrap_collection, parameter[name[response]]]]]]
keyword[def] identifier[update_deployment_targets] ( identifier[self] , identifier[machines] , identifier[project] , identifier[deployment_group_id] ): literal[string] identifier[route_values] ={} keyword[if] identifier[project] keyword[is] keyword[not] keyword[None] : identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[project] , literal[string] ) keyword[if] identifier[deployment_group_id] keyword[is] keyword[not] keyword[None] : identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[deployment_group_id] , literal[string] ) identifier[content] = identifier[self] . identifier[_serialize] . identifier[body] ( identifier[machines] , literal[string] ) identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] , identifier[location_id] = literal[string] , identifier[version] = literal[string] , identifier[route_values] = identifier[route_values] , identifier[content] = identifier[content] ) keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[self] . identifier[_unwrap_collection] ( identifier[response] ))
def update_deployment_targets(self, machines, project, deployment_group_id): """UpdateDeploymentTargets. [Preview API] Update tags of a list of deployment targets in a deployment group. :param [DeploymentTargetUpdateParameter] machines: Deployment targets with tags to udpdate. :param str project: Project ID or project name :param int deployment_group_id: ID of the deployment group in which deployment targets are updated. :rtype: [DeploymentMachine] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') # depends on [control=['if'], data=['project']] if deployment_group_id is not None: route_values['deploymentGroupId'] = self._serialize.url('deployment_group_id', deployment_group_id, 'int') # depends on [control=['if'], data=['deployment_group_id']] content = self._serialize.body(machines, '[DeploymentTargetUpdateParameter]') response = self._send(http_method='PATCH', location_id='2f0aa599-c121-4256-a5fd-ba370e0ae7b6', version='5.0-preview.1', route_values=route_values, content=content) return self._deserialize('[DeploymentMachine]', self._unwrap_collection(response))
def get_sort_field(attr, model): """ Get's the field to sort on for the given attr. Currently returns attr if it is a field on the given model. If the models has an attribute matching that name and that value has an attribute 'sort_field' than that value is used. TODO: Provide a way to sort based on a non field attribute. """ try: if model._meta.get_field(attr): return attr except FieldDoesNotExist: if isinstance(attr, basestring): val = getattr(model, attr, None) if val and hasattr(val, 'sort_field'): return getattr(model, attr).sort_field return None
def function[get_sort_field, parameter[attr, model]]: constant[ Get's the field to sort on for the given attr. Currently returns attr if it is a field on the given model. If the models has an attribute matching that name and that value has an attribute 'sort_field' than that value is used. TODO: Provide a way to sort based on a non field attribute. ] <ast.Try object at 0x7da1b0b73280>
keyword[def] identifier[get_sort_field] ( identifier[attr] , identifier[model] ): literal[string] keyword[try] : keyword[if] identifier[model] . identifier[_meta] . identifier[get_field] ( identifier[attr] ): keyword[return] identifier[attr] keyword[except] identifier[FieldDoesNotExist] : keyword[if] identifier[isinstance] ( identifier[attr] , identifier[basestring] ): identifier[val] = identifier[getattr] ( identifier[model] , identifier[attr] , keyword[None] ) keyword[if] identifier[val] keyword[and] identifier[hasattr] ( identifier[val] , literal[string] ): keyword[return] identifier[getattr] ( identifier[model] , identifier[attr] ). identifier[sort_field] keyword[return] keyword[None]
def get_sort_field(attr, model): """ Get's the field to sort on for the given attr. Currently returns attr if it is a field on the given model. If the models has an attribute matching that name and that value has an attribute 'sort_field' than that value is used. TODO: Provide a way to sort based on a non field attribute. """ try: if model._meta.get_field(attr): return attr # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except FieldDoesNotExist: if isinstance(attr, basestring): val = getattr(model, attr, None) if val and hasattr(val, 'sort_field'): return getattr(model, attr).sort_field # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return None # depends on [control=['except'], data=[]]
def _sample_actions(self, state: Sequence[tf.Tensor]) -> Tuple[Sequence[tf.Tensor], tf.Tensor, tf.Tensor]: '''Returns sampled action fluents and tensors related to the sampling. Args: state (Sequence[tf.Tensor]): A list of state fluents. Returns: Tuple[Sequence[tf.Tensor], tf.Tensor, tf.Tensor]: A tuple with action fluents, an integer tensor for the number of samples, and a boolean tensor for checking all action preconditions. ''' default = self.compiler.compile_default_action(self.batch_size) bound_constraints = self.compiler.compile_action_bound_constraints(state) action = self._sample_action(bound_constraints, default) n, action, checking = self._check_preconditions(state, action, bound_constraints, default) return action, n, checking
def function[_sample_actions, parameter[self, state]]: constant[Returns sampled action fluents and tensors related to the sampling. Args: state (Sequence[tf.Tensor]): A list of state fluents. Returns: Tuple[Sequence[tf.Tensor], tf.Tensor, tf.Tensor]: A tuple with action fluents, an integer tensor for the number of samples, and a boolean tensor for checking all action preconditions. ] variable[default] assign[=] call[name[self].compiler.compile_default_action, parameter[name[self].batch_size]] variable[bound_constraints] assign[=] call[name[self].compiler.compile_action_bound_constraints, parameter[name[state]]] variable[action] assign[=] call[name[self]._sample_action, parameter[name[bound_constraints], name[default]]] <ast.Tuple object at 0x7da18f00c2e0> assign[=] call[name[self]._check_preconditions, parameter[name[state], name[action], name[bound_constraints], name[default]]] return[tuple[[<ast.Name object at 0x7da18f00eef0>, <ast.Name object at 0x7da18f00fe50>, <ast.Name object at 0x7da18f00c820>]]]
keyword[def] identifier[_sample_actions] ( identifier[self] , identifier[state] : identifier[Sequence] [ identifier[tf] . identifier[Tensor] ])-> identifier[Tuple] [ identifier[Sequence] [ identifier[tf] . identifier[Tensor] ], identifier[tf] . identifier[Tensor] , identifier[tf] . identifier[Tensor] ]: literal[string] identifier[default] = identifier[self] . identifier[compiler] . identifier[compile_default_action] ( identifier[self] . identifier[batch_size] ) identifier[bound_constraints] = identifier[self] . identifier[compiler] . identifier[compile_action_bound_constraints] ( identifier[state] ) identifier[action] = identifier[self] . identifier[_sample_action] ( identifier[bound_constraints] , identifier[default] ) identifier[n] , identifier[action] , identifier[checking] = identifier[self] . identifier[_check_preconditions] ( identifier[state] , identifier[action] , identifier[bound_constraints] , identifier[default] ) keyword[return] identifier[action] , identifier[n] , identifier[checking]
def _sample_actions(self, state: Sequence[tf.Tensor]) -> Tuple[Sequence[tf.Tensor], tf.Tensor, tf.Tensor]: """Returns sampled action fluents and tensors related to the sampling. Args: state (Sequence[tf.Tensor]): A list of state fluents. Returns: Tuple[Sequence[tf.Tensor], tf.Tensor, tf.Tensor]: A tuple with action fluents, an integer tensor for the number of samples, and a boolean tensor for checking all action preconditions. """ default = self.compiler.compile_default_action(self.batch_size) bound_constraints = self.compiler.compile_action_bound_constraints(state) action = self._sample_action(bound_constraints, default) (n, action, checking) = self._check_preconditions(state, action, bound_constraints, default) return (action, n, checking)
def makeringlatticeCIJ(n, k, seed=None): ''' This function generates a directed lattice network with toroidal boundary counditions (i.e. with ring-like "wrapping around"). Parameters ---------- N : int number of vertices K : int number of edges seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- CIJ : NxN np.ndarray connection matrix Notes ----- The lattice is made by placing connections as close as possible to the main diagonal, with wrapping around. No connections are made on the main diagonal. In/Outdegree is kept approx. constant at K/N. ''' rng = get_rng(seed) # initialize CIJ = np.zeros((n, n)) CIJ1 = np.ones((n, n)) kk = 0 count = 0 seq = range(1, n) seq2 = range(n - 1, 0, -1) # fill in while kk < k: count += 1 dCIJ = np.triu(CIJ1, seq[count]) - np.triu(CIJ1, seq[count] + 1) dCIJ2 = np.triu(CIJ1, seq2[count]) - np.triu(CIJ1, seq2[count] + 1) dCIJ = dCIJ + dCIJ.T + dCIJ2 + dCIJ2.T CIJ += dCIJ kk = int(np.sum(CIJ)) # remove excess connections overby = kk - k if overby: i, j = np.where(dCIJ) rp = rng.permutation(np.size(i)) for ii in range(overby): CIJ[i[rp[ii]], j[rp[ii]]] = 0 return CIJ
def function[makeringlatticeCIJ, parameter[n, k, seed]]: constant[ This function generates a directed lattice network with toroidal boundary counditions (i.e. with ring-like "wrapping around"). Parameters ---------- N : int number of vertices K : int number of edges seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- CIJ : NxN np.ndarray connection matrix Notes ----- The lattice is made by placing connections as close as possible to the main diagonal, with wrapping around. No connections are made on the main diagonal. In/Outdegree is kept approx. constant at K/N. ] variable[rng] assign[=] call[name[get_rng], parameter[name[seed]]] variable[CIJ] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da2041db310>, <ast.Name object at 0x7da2041d8d60>]]]] variable[CIJ1] assign[=] call[name[np].ones, parameter[tuple[[<ast.Name object at 0x7da2041d98a0>, <ast.Name object at 0x7da2041d80d0>]]]] variable[kk] assign[=] constant[0] variable[count] assign[=] constant[0] variable[seq] assign[=] call[name[range], parameter[constant[1], name[n]]] variable[seq2] assign[=] call[name[range], parameter[binary_operation[name[n] - constant[1]], constant[0], <ast.UnaryOp object at 0x7da1b08d2f50>]] while compare[name[kk] less[<] name[k]] begin[:] <ast.AugAssign object at 0x7da1b08d30a0> variable[dCIJ] assign[=] binary_operation[call[name[np].triu, parameter[name[CIJ1], call[name[seq]][name[count]]]] - call[name[np].triu, parameter[name[CIJ1], binary_operation[call[name[seq]][name[count]] + constant[1]]]]] variable[dCIJ2] assign[=] binary_operation[call[name[np].triu, parameter[name[CIJ1], call[name[seq2]][name[count]]]] - call[name[np].triu, parameter[name[CIJ1], binary_operation[call[name[seq2]][name[count]] + constant[1]]]]] variable[dCIJ] assign[=] binary_operation[binary_operation[binary_operation[name[dCIJ] + name[dCIJ].T] + name[dCIJ2]] + name[dCIJ2].T] <ast.AugAssign object at 0x7da1b084fb20> variable[kk] assign[=] call[name[int], parameter[call[name[np].sum, parameter[name[CIJ]]]]] variable[overby] assign[=] binary_operation[name[kk] - name[k]] if name[overby] begin[:] <ast.Tuple object at 0x7da1b084cbb0> assign[=] call[name[np].where, parameter[name[dCIJ]]] variable[rp] assign[=] call[name[rng].permutation, parameter[call[name[np].size, parameter[name[i]]]]] for taget[name[ii]] in starred[call[name[range], parameter[name[overby]]]] begin[:] call[name[CIJ]][tuple[[<ast.Subscript object at 0x7da1b08bb700>, <ast.Subscript object at 0x7da1b08b82b0>]]] assign[=] constant[0] return[name[CIJ]]
keyword[def] identifier[makeringlatticeCIJ] ( identifier[n] , identifier[k] , identifier[seed] = keyword[None] ): literal[string] identifier[rng] = identifier[get_rng] ( identifier[seed] ) identifier[CIJ] = identifier[np] . identifier[zeros] (( identifier[n] , identifier[n] )) identifier[CIJ1] = identifier[np] . identifier[ones] (( identifier[n] , identifier[n] )) identifier[kk] = literal[int] identifier[count] = literal[int] identifier[seq] = identifier[range] ( literal[int] , identifier[n] ) identifier[seq2] = identifier[range] ( identifier[n] - literal[int] , literal[int] ,- literal[int] ) keyword[while] identifier[kk] < identifier[k] : identifier[count] += literal[int] identifier[dCIJ] = identifier[np] . identifier[triu] ( identifier[CIJ1] , identifier[seq] [ identifier[count] ])- identifier[np] . identifier[triu] ( identifier[CIJ1] , identifier[seq] [ identifier[count] ]+ literal[int] ) identifier[dCIJ2] = identifier[np] . identifier[triu] ( identifier[CIJ1] , identifier[seq2] [ identifier[count] ])- identifier[np] . identifier[triu] ( identifier[CIJ1] , identifier[seq2] [ identifier[count] ]+ literal[int] ) identifier[dCIJ] = identifier[dCIJ] + identifier[dCIJ] . identifier[T] + identifier[dCIJ2] + identifier[dCIJ2] . identifier[T] identifier[CIJ] += identifier[dCIJ] identifier[kk] = identifier[int] ( identifier[np] . identifier[sum] ( identifier[CIJ] )) identifier[overby] = identifier[kk] - identifier[k] keyword[if] identifier[overby] : identifier[i] , identifier[j] = identifier[np] . identifier[where] ( identifier[dCIJ] ) identifier[rp] = identifier[rng] . identifier[permutation] ( identifier[np] . identifier[size] ( identifier[i] )) keyword[for] identifier[ii] keyword[in] identifier[range] ( identifier[overby] ): identifier[CIJ] [ identifier[i] [ identifier[rp] [ identifier[ii] ]], identifier[j] [ identifier[rp] [ identifier[ii] ]]]= literal[int] keyword[return] identifier[CIJ]
def makeringlatticeCIJ(n, k, seed=None): """ This function generates a directed lattice network with toroidal boundary counditions (i.e. with ring-like "wrapping around"). Parameters ---------- N : int number of vertices K : int number of edges seed : hashable, optional If None (default), use the np.random's global random state to generate random numbers. Otherwise, use a new np.random.RandomState instance seeded with the given value. Returns ------- CIJ : NxN np.ndarray connection matrix Notes ----- The lattice is made by placing connections as close as possible to the main diagonal, with wrapping around. No connections are made on the main diagonal. In/Outdegree is kept approx. constant at K/N. """ rng = get_rng(seed) # initialize CIJ = np.zeros((n, n)) CIJ1 = np.ones((n, n)) kk = 0 count = 0 seq = range(1, n) seq2 = range(n - 1, 0, -1) # fill in while kk < k: count += 1 dCIJ = np.triu(CIJ1, seq[count]) - np.triu(CIJ1, seq[count] + 1) dCIJ2 = np.triu(CIJ1, seq2[count]) - np.triu(CIJ1, seq2[count] + 1) dCIJ = dCIJ + dCIJ.T + dCIJ2 + dCIJ2.T CIJ += dCIJ kk = int(np.sum(CIJ)) # depends on [control=['while'], data=['kk']] # remove excess connections overby = kk - k if overby: (i, j) = np.where(dCIJ) rp = rng.permutation(np.size(i)) for ii in range(overby): CIJ[i[rp[ii]], j[rp[ii]]] = 0 # depends on [control=['for'], data=['ii']] # depends on [control=['if'], data=[]] return CIJ
def utf8(value): """Converts a string argument to a byte string. If the argument is already a byte string or None, it is returned unchanged. Otherwise it must be a unicode string and is encoded as utf8. """ if isinstance(value, _UTF8_TYPES): return value elif isinstance(value, unicode_type): return value.encode("utf-8") else: return str(value)
def function[utf8, parameter[value]]: constant[Converts a string argument to a byte string. If the argument is already a byte string or None, it is returned unchanged. Otherwise it must be a unicode string and is encoded as utf8. ] if call[name[isinstance], parameter[name[value], name[_UTF8_TYPES]]] begin[:] return[name[value]]
keyword[def] identifier[utf8] ( identifier[value] ): literal[string] keyword[if] identifier[isinstance] ( identifier[value] , identifier[_UTF8_TYPES] ): keyword[return] identifier[value] keyword[elif] identifier[isinstance] ( identifier[value] , identifier[unicode_type] ): keyword[return] identifier[value] . identifier[encode] ( literal[string] ) keyword[else] : keyword[return] identifier[str] ( identifier[value] )
def utf8(value): """Converts a string argument to a byte string. If the argument is already a byte string or None, it is returned unchanged. Otherwise it must be a unicode string and is encoded as utf8. """ if isinstance(value, _UTF8_TYPES): return value # depends on [control=['if'], data=[]] elif isinstance(value, unicode_type): return value.encode('utf-8') # depends on [control=['if'], data=[]] else: return str(value)
def get(key, default=''): ''' .. versionadded: 0.14.0 Attempt to retrieve the named value from opts, pillar, grains of the master config, if the named value is not available return the passed default. The default return is an empty string. The value can also represent a value in a nested dict using a ":" delimiter for the dict. This means that if a dict looks like this:: {'pkg': {'apache': 'httpd'}} To retrieve the value associated with the apache key in the pkg dict this key can be passed:: pkg:apache This routine traverses these data stores in this order: - Local minion config (opts) - Minion's grains - Minion's pillar - Master config CLI Example: .. code-block:: bash salt '*' config.get pkg:apache ''' ret = salt.utils.data.traverse_dict_and_list(__opts__, key, '_|-') if ret != '_|-': return ret ret = salt.utils.data.traverse_dict_and_list(__grains__, key, '_|-') if ret != '_|-': return ret ret = salt.utils.data.traverse_dict_and_list(__pillar__, key, '_|-') if ret != '_|-': return ret ret = salt.utils.data.traverse_dict_and_list(__pillar__.get('master', {}), key, '_|-') if ret != '_|-': return ret return default
def function[get, parameter[key, default]]: constant[ .. versionadded: 0.14.0 Attempt to retrieve the named value from opts, pillar, grains of the master config, if the named value is not available return the passed default. The default return is an empty string. The value can also represent a value in a nested dict using a ":" delimiter for the dict. This means that if a dict looks like this:: {'pkg': {'apache': 'httpd'}} To retrieve the value associated with the apache key in the pkg dict this key can be passed:: pkg:apache This routine traverses these data stores in this order: - Local minion config (opts) - Minion's grains - Minion's pillar - Master config CLI Example: .. code-block:: bash salt '*' config.get pkg:apache ] variable[ret] assign[=] call[name[salt].utils.data.traverse_dict_and_list, parameter[name[__opts__], name[key], constant[_|-]]] if compare[name[ret] not_equal[!=] constant[_|-]] begin[:] return[name[ret]] variable[ret] assign[=] call[name[salt].utils.data.traverse_dict_and_list, parameter[name[__grains__], name[key], constant[_|-]]] if compare[name[ret] not_equal[!=] constant[_|-]] begin[:] return[name[ret]] variable[ret] assign[=] call[name[salt].utils.data.traverse_dict_and_list, parameter[name[__pillar__], name[key], constant[_|-]]] if compare[name[ret] not_equal[!=] constant[_|-]] begin[:] return[name[ret]] variable[ret] assign[=] call[name[salt].utils.data.traverse_dict_and_list, parameter[call[name[__pillar__].get, parameter[constant[master], dictionary[[], []]]], name[key], constant[_|-]]] if compare[name[ret] not_equal[!=] constant[_|-]] begin[:] return[name[ret]] return[name[default]]
keyword[def] identifier[get] ( identifier[key] , identifier[default] = literal[string] ): literal[string] identifier[ret] = identifier[salt] . identifier[utils] . identifier[data] . identifier[traverse_dict_and_list] ( identifier[__opts__] , identifier[key] , literal[string] ) keyword[if] identifier[ret] != literal[string] : keyword[return] identifier[ret] identifier[ret] = identifier[salt] . identifier[utils] . identifier[data] . identifier[traverse_dict_and_list] ( identifier[__grains__] , identifier[key] , literal[string] ) keyword[if] identifier[ret] != literal[string] : keyword[return] identifier[ret] identifier[ret] = identifier[salt] . identifier[utils] . identifier[data] . identifier[traverse_dict_and_list] ( identifier[__pillar__] , identifier[key] , literal[string] ) keyword[if] identifier[ret] != literal[string] : keyword[return] identifier[ret] identifier[ret] = identifier[salt] . identifier[utils] . identifier[data] . identifier[traverse_dict_and_list] ( identifier[__pillar__] . identifier[get] ( literal[string] ,{}), identifier[key] , literal[string] ) keyword[if] identifier[ret] != literal[string] : keyword[return] identifier[ret] keyword[return] identifier[default]
def get(key, default=''): """ .. versionadded: 0.14.0 Attempt to retrieve the named value from opts, pillar, grains of the master config, if the named value is not available return the passed default. The default return is an empty string. The value can also represent a value in a nested dict using a ":" delimiter for the dict. This means that if a dict looks like this:: {'pkg': {'apache': 'httpd'}} To retrieve the value associated with the apache key in the pkg dict this key can be passed:: pkg:apache This routine traverses these data stores in this order: - Local minion config (opts) - Minion's grains - Minion's pillar - Master config CLI Example: .. code-block:: bash salt '*' config.get pkg:apache """ ret = salt.utils.data.traverse_dict_and_list(__opts__, key, '_|-') if ret != '_|-': return ret # depends on [control=['if'], data=['ret']] ret = salt.utils.data.traverse_dict_and_list(__grains__, key, '_|-') if ret != '_|-': return ret # depends on [control=['if'], data=['ret']] ret = salt.utils.data.traverse_dict_and_list(__pillar__, key, '_|-') if ret != '_|-': return ret # depends on [control=['if'], data=['ret']] ret = salt.utils.data.traverse_dict_and_list(__pillar__.get('master', {}), key, '_|-') if ret != '_|-': return ret # depends on [control=['if'], data=['ret']] return default
def ndims(self): """Returns the rank of this shape, or None if it is unspecified.""" if self._dims is None: return None else: if self._ndims is None: self._ndims = len(self._dims) return self._ndims
def function[ndims, parameter[self]]: constant[Returns the rank of this shape, or None if it is unspecified.] if compare[name[self]._dims is constant[None]] begin[:] return[constant[None]]
keyword[def] identifier[ndims] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_dims] keyword[is] keyword[None] : keyword[return] keyword[None] keyword[else] : keyword[if] identifier[self] . identifier[_ndims] keyword[is] keyword[None] : identifier[self] . identifier[_ndims] = identifier[len] ( identifier[self] . identifier[_dims] ) keyword[return] identifier[self] . identifier[_ndims]
def ndims(self): """Returns the rank of this shape, or None if it is unspecified.""" if self._dims is None: return None # depends on [control=['if'], data=[]] else: if self._ndims is None: self._ndims = len(self._dims) # depends on [control=['if'], data=[]] return self._ndims
def deduplicate(pairs, aa=False, ignore_primer_regions=False): ''' Removes duplicate sequences from a list of Pair objects. If a Pair has heavy and light chains, both chains must identically match heavy and light chains from another Pair to be considered a duplicate. If a Pair has only a single chain, identical matches to that chain will cause the single chain Pair to be considered a duplicate, even if the comparison Pair has both chains. Note that identical sequences are identified by simple string comparison, so sequences of different length that are identical over the entirety of the shorter sequence are not considered duplicates. By default, comparison is made on the nucleotide sequence. To use the amino acid sequence instead, set aa=True. ''' nr_pairs = [] just_pairs = [p for p in pairs if p.is_pair] single_chains = [p for p in pairs if not p.is_pair] _pairs = just_pairs + single_chains for p in _pairs: duplicates = [] for nr in nr_pairs: identical = True vdj = 'vdj_aa' if aa else 'vdj_nt' offset = 4 if aa else 12 if p.heavy is not None: if nr.heavy is None: identical = False else: heavy = p.heavy[vdj][offset:-offset] if ignore_primer_regions else p.heavy[vdj] nr_heavy = nr.heavy[vdj][offset:-offset] if ignore_primer_regions else nr.heavy[vdj] if heavy != nr_heavy: identical = False if p.light is not None: if nr.light is None: identical = False else: light = p.light[vdj][offset:-offset] if ignore_primer_regions else p.light[vdj] nr_light = nr.light[vdj][offset:-offset] if ignore_primer_regions else nr.light[vdj] if light != nr_light: identical = False duplicates.append(identical) if any(duplicates): continue else: nr_pairs.append(p) return nr_pairs
def function[deduplicate, parameter[pairs, aa, ignore_primer_regions]]: constant[ Removes duplicate sequences from a list of Pair objects. If a Pair has heavy and light chains, both chains must identically match heavy and light chains from another Pair to be considered a duplicate. If a Pair has only a single chain, identical matches to that chain will cause the single chain Pair to be considered a duplicate, even if the comparison Pair has both chains. Note that identical sequences are identified by simple string comparison, so sequences of different length that are identical over the entirety of the shorter sequence are not considered duplicates. By default, comparison is made on the nucleotide sequence. To use the amino acid sequence instead, set aa=True. ] variable[nr_pairs] assign[=] list[[]] variable[just_pairs] assign[=] <ast.ListComp object at 0x7da207f9b430> variable[single_chains] assign[=] <ast.ListComp object at 0x7da207f9a740> variable[_pairs] assign[=] binary_operation[name[just_pairs] + name[single_chains]] for taget[name[p]] in starred[name[_pairs]] begin[:] variable[duplicates] assign[=] list[[]] for taget[name[nr]] in starred[name[nr_pairs]] begin[:] variable[identical] assign[=] constant[True] variable[vdj] assign[=] <ast.IfExp object at 0x7da207f9b730> variable[offset] assign[=] <ast.IfExp object at 0x7da207f984c0> if compare[name[p].heavy is_not constant[None]] begin[:] if compare[name[nr].heavy is constant[None]] begin[:] variable[identical] assign[=] constant[False] if compare[name[p].light is_not constant[None]] begin[:] if compare[name[nr].light is constant[None]] begin[:] variable[identical] assign[=] constant[False] call[name[duplicates].append, parameter[name[identical]]] if call[name[any], parameter[name[duplicates]]] begin[:] continue return[name[nr_pairs]]
keyword[def] identifier[deduplicate] ( identifier[pairs] , identifier[aa] = keyword[False] , identifier[ignore_primer_regions] = keyword[False] ): literal[string] identifier[nr_pairs] =[] identifier[just_pairs] =[ identifier[p] keyword[for] identifier[p] keyword[in] identifier[pairs] keyword[if] identifier[p] . identifier[is_pair] ] identifier[single_chains] =[ identifier[p] keyword[for] identifier[p] keyword[in] identifier[pairs] keyword[if] keyword[not] identifier[p] . identifier[is_pair] ] identifier[_pairs] = identifier[just_pairs] + identifier[single_chains] keyword[for] identifier[p] keyword[in] identifier[_pairs] : identifier[duplicates] =[] keyword[for] identifier[nr] keyword[in] identifier[nr_pairs] : identifier[identical] = keyword[True] identifier[vdj] = literal[string] keyword[if] identifier[aa] keyword[else] literal[string] identifier[offset] = literal[int] keyword[if] identifier[aa] keyword[else] literal[int] keyword[if] identifier[p] . identifier[heavy] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[nr] . identifier[heavy] keyword[is] keyword[None] : identifier[identical] = keyword[False] keyword[else] : identifier[heavy] = identifier[p] . identifier[heavy] [ identifier[vdj] ][ identifier[offset] :- identifier[offset] ] keyword[if] identifier[ignore_primer_regions] keyword[else] identifier[p] . identifier[heavy] [ identifier[vdj] ] identifier[nr_heavy] = identifier[nr] . identifier[heavy] [ identifier[vdj] ][ identifier[offset] :- identifier[offset] ] keyword[if] identifier[ignore_primer_regions] keyword[else] identifier[nr] . identifier[heavy] [ identifier[vdj] ] keyword[if] identifier[heavy] != identifier[nr_heavy] : identifier[identical] = keyword[False] keyword[if] identifier[p] . identifier[light] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[nr] . identifier[light] keyword[is] keyword[None] : identifier[identical] = keyword[False] keyword[else] : identifier[light] = identifier[p] . identifier[light] [ identifier[vdj] ][ identifier[offset] :- identifier[offset] ] keyword[if] identifier[ignore_primer_regions] keyword[else] identifier[p] . identifier[light] [ identifier[vdj] ] identifier[nr_light] = identifier[nr] . identifier[light] [ identifier[vdj] ][ identifier[offset] :- identifier[offset] ] keyword[if] identifier[ignore_primer_regions] keyword[else] identifier[nr] . identifier[light] [ identifier[vdj] ] keyword[if] identifier[light] != identifier[nr_light] : identifier[identical] = keyword[False] identifier[duplicates] . identifier[append] ( identifier[identical] ) keyword[if] identifier[any] ( identifier[duplicates] ): keyword[continue] keyword[else] : identifier[nr_pairs] . identifier[append] ( identifier[p] ) keyword[return] identifier[nr_pairs]
def deduplicate(pairs, aa=False, ignore_primer_regions=False): """ Removes duplicate sequences from a list of Pair objects. If a Pair has heavy and light chains, both chains must identically match heavy and light chains from another Pair to be considered a duplicate. If a Pair has only a single chain, identical matches to that chain will cause the single chain Pair to be considered a duplicate, even if the comparison Pair has both chains. Note that identical sequences are identified by simple string comparison, so sequences of different length that are identical over the entirety of the shorter sequence are not considered duplicates. By default, comparison is made on the nucleotide sequence. To use the amino acid sequence instead, set aa=True. """ nr_pairs = [] just_pairs = [p for p in pairs if p.is_pair] single_chains = [p for p in pairs if not p.is_pair] _pairs = just_pairs + single_chains for p in _pairs: duplicates = [] for nr in nr_pairs: identical = True vdj = 'vdj_aa' if aa else 'vdj_nt' offset = 4 if aa else 12 if p.heavy is not None: if nr.heavy is None: identical = False # depends on [control=['if'], data=[]] else: heavy = p.heavy[vdj][offset:-offset] if ignore_primer_regions else p.heavy[vdj] nr_heavy = nr.heavy[vdj][offset:-offset] if ignore_primer_regions else nr.heavy[vdj] if heavy != nr_heavy: identical = False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if p.light is not None: if nr.light is None: identical = False # depends on [control=['if'], data=[]] else: light = p.light[vdj][offset:-offset] if ignore_primer_regions else p.light[vdj] nr_light = nr.light[vdj][offset:-offset] if ignore_primer_regions else nr.light[vdj] if light != nr_light: identical = False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] duplicates.append(identical) # depends on [control=['for'], data=['nr']] if any(duplicates): continue # depends on [control=['if'], data=[]] else: nr_pairs.append(p) # depends on [control=['for'], data=['p']] return nr_pairs
async def get_firmware_version(self): """ This method retrieves the Firmata firmware version :returns: Firmata firmware version """ current_time = time.time() if self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE) == '': await self._send_sysex(PrivateConstants.REPORT_FIRMWARE) while self.query_reply_data.get( PrivateConstants.REPORT_FIRMWARE) == '': elapsed_time = time.time() if elapsed_time - current_time > 4: return None await asyncio.sleep(self.sleep_tune) return self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE)
<ast.AsyncFunctionDef object at 0x7da18dc9b5b0>
keyword[async] keyword[def] identifier[get_firmware_version] ( identifier[self] ): literal[string] identifier[current_time] = identifier[time] . identifier[time] () keyword[if] identifier[self] . identifier[query_reply_data] . identifier[get] ( identifier[PrivateConstants] . identifier[REPORT_FIRMWARE] )== literal[string] : keyword[await] identifier[self] . identifier[_send_sysex] ( identifier[PrivateConstants] . identifier[REPORT_FIRMWARE] ) keyword[while] identifier[self] . identifier[query_reply_data] . identifier[get] ( identifier[PrivateConstants] . identifier[REPORT_FIRMWARE] )== literal[string] : identifier[elapsed_time] = identifier[time] . identifier[time] () keyword[if] identifier[elapsed_time] - identifier[current_time] > literal[int] : keyword[return] keyword[None] keyword[await] identifier[asyncio] . identifier[sleep] ( identifier[self] . identifier[sleep_tune] ) keyword[return] identifier[self] . identifier[query_reply_data] . identifier[get] ( identifier[PrivateConstants] . identifier[REPORT_FIRMWARE] )
async def get_firmware_version(self): """ This method retrieves the Firmata firmware version :returns: Firmata firmware version """ current_time = time.time() if self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE) == '': await self._send_sysex(PrivateConstants.REPORT_FIRMWARE) while self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE) == '': elapsed_time = time.time() if elapsed_time - current_time > 4: return None # depends on [control=['if'], data=[]] await asyncio.sleep(self.sleep_tune) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]] return self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE)
def _expand_alts_and_remove_duplicates_in_list(cls, vcf_records, ref_seq, indel_gap=100): '''Input: list of VCF records, all from the same CHROM. ref_seq = sequence of that CHROM. Expands any record in the list that has >ALT, into one record per ALT. Removes duplicated records, where REF and ALT are the same (at the same position!), or where there is the same indel more than once, but written in a different way (eg indel in homopolymer run can be put in >1 way in a VCF. Checks indels are the same within indel_gap nucleotides of each other''' expanded_vcf_records = VcfClusterer._expand_alts_in_vcf_record_list(vcf_records) new_vcf_records = [x for x in expanded_vcf_records if not x.is_snp()] for i in range(len(new_vcf_records) - 1): j = i + 1 while j < len(new_vcf_records) and new_vcf_records[i].ref_end_pos() + indel_gap > new_vcf_records[j].POS: if new_vcf_records[i].is_the_same_indel(new_vcf_records[j], ref_seq): new_vcf_records.pop(j) else: j += 1 new_vcf_records.extend([x for x in expanded_vcf_records if x.is_snp()]) new_vcf_records.sort(key=operator.attrgetter('POS')) return new_vcf_records
def function[_expand_alts_and_remove_duplicates_in_list, parameter[cls, vcf_records, ref_seq, indel_gap]]: constant[Input: list of VCF records, all from the same CHROM. ref_seq = sequence of that CHROM. Expands any record in the list that has >ALT, into one record per ALT. Removes duplicated records, where REF and ALT are the same (at the same position!), or where there is the same indel more than once, but written in a different way (eg indel in homopolymer run can be put in >1 way in a VCF. Checks indels are the same within indel_gap nucleotides of each other] variable[expanded_vcf_records] assign[=] call[name[VcfClusterer]._expand_alts_in_vcf_record_list, parameter[name[vcf_records]]] variable[new_vcf_records] assign[=] <ast.ListComp object at 0x7da1b1c3cdf0> for taget[name[i]] in starred[call[name[range], parameter[binary_operation[call[name[len], parameter[name[new_vcf_records]]] - constant[1]]]]] begin[:] variable[j] assign[=] binary_operation[name[i] + constant[1]] while <ast.BoolOp object at 0x7da1b1c3da20> begin[:] if call[call[name[new_vcf_records]][name[i]].is_the_same_indel, parameter[call[name[new_vcf_records]][name[j]], name[ref_seq]]] begin[:] call[name[new_vcf_records].pop, parameter[name[j]]] call[name[new_vcf_records].extend, parameter[<ast.ListComp object at 0x7da1b1c3fa60>]] call[name[new_vcf_records].sort, parameter[]] return[name[new_vcf_records]]
keyword[def] identifier[_expand_alts_and_remove_duplicates_in_list] ( identifier[cls] , identifier[vcf_records] , identifier[ref_seq] , identifier[indel_gap] = literal[int] ): literal[string] identifier[expanded_vcf_records] = identifier[VcfClusterer] . identifier[_expand_alts_in_vcf_record_list] ( identifier[vcf_records] ) identifier[new_vcf_records] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[expanded_vcf_records] keyword[if] keyword[not] identifier[x] . identifier[is_snp] ()] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[new_vcf_records] )- literal[int] ): identifier[j] = identifier[i] + literal[int] keyword[while] identifier[j] < identifier[len] ( identifier[new_vcf_records] ) keyword[and] identifier[new_vcf_records] [ identifier[i] ]. identifier[ref_end_pos] ()+ identifier[indel_gap] > identifier[new_vcf_records] [ identifier[j] ]. identifier[POS] : keyword[if] identifier[new_vcf_records] [ identifier[i] ]. identifier[is_the_same_indel] ( identifier[new_vcf_records] [ identifier[j] ], identifier[ref_seq] ): identifier[new_vcf_records] . identifier[pop] ( identifier[j] ) keyword[else] : identifier[j] += literal[int] identifier[new_vcf_records] . identifier[extend] ([ identifier[x] keyword[for] identifier[x] keyword[in] identifier[expanded_vcf_records] keyword[if] identifier[x] . identifier[is_snp] ()]) identifier[new_vcf_records] . identifier[sort] ( identifier[key] = identifier[operator] . identifier[attrgetter] ( literal[string] )) keyword[return] identifier[new_vcf_records]
def _expand_alts_and_remove_duplicates_in_list(cls, vcf_records, ref_seq, indel_gap=100): """Input: list of VCF records, all from the same CHROM. ref_seq = sequence of that CHROM. Expands any record in the list that has >ALT, into one record per ALT. Removes duplicated records, where REF and ALT are the same (at the same position!), or where there is the same indel more than once, but written in a different way (eg indel in homopolymer run can be put in >1 way in a VCF. Checks indels are the same within indel_gap nucleotides of each other""" expanded_vcf_records = VcfClusterer._expand_alts_in_vcf_record_list(vcf_records) new_vcf_records = [x for x in expanded_vcf_records if not x.is_snp()] for i in range(len(new_vcf_records) - 1): j = i + 1 while j < len(new_vcf_records) and new_vcf_records[i].ref_end_pos() + indel_gap > new_vcf_records[j].POS: if new_vcf_records[i].is_the_same_indel(new_vcf_records[j], ref_seq): new_vcf_records.pop(j) # depends on [control=['if'], data=[]] else: j += 1 # depends on [control=['while'], data=[]] # depends on [control=['for'], data=['i']] new_vcf_records.extend([x for x in expanded_vcf_records if x.is_snp()]) new_vcf_records.sort(key=operator.attrgetter('POS')) return new_vcf_records
def run(self, arguments, show_help=True): """ Program entry point. Please note that the first item in ``arguments`` is discarded, as it is assumed to be the script/invocation name; pass a "dumb" placeholder if you call this method with an argument different that ``sys.argv``. :param arguments: the list of arguments :type arguments: list :param show_help: if ``False``, do not show help on ``-h`` and ``--help`` :type show_help: bool :rtype: int """ # convert arguments into Unicode strings if self.use_sys: # check that sys.stdin.encoding and sys.stdout.encoding are set to utf-8 if not gf.FROZEN: if sys.stdin.encoding not in ["UTF-8", "UTF8"]: self.print_warning(u"The default input encoding is not UTF-8.") self.print_warning(u"You might want to set 'PYTHONIOENCODING=UTF-8' in your shell.") if sys.stdout.encoding not in ["UTF-8", "UTF8"]: self.print_warning(u"The default output encoding is not UTF-8.") self.print_warning(u"You might want to set 'PYTHONIOENCODING=UTF-8' in your shell.") # decode using sys.stdin.encoding args = [gf.safe_unicode_stdin(arg) for arg in arguments] else: # decode using utf-8 (but you should pass Unicode strings as parameters anyway) args = [gf.safe_unicode(arg) for arg in arguments] if show_help: if u"-h" in args: return self.print_help(short=True) if u"--help" in args: return self.print_help(short=False) if u"--help-rconf" in args: return self.print_rconf_parameters() if u"--version" in args: return self.print_name_version() # store formal arguments self.formal_arguments_raw = arguments self.formal_arguments = args # to obtain the actual arguments, # remove the first one and "special" switches args = args[1:] set_args = set(args) # set verbosity, if requested for flag in set([u"-v", u"--verbose"]) & set_args: self.verbose = True args.remove(flag) for flag in set([u"-vv", u"--very-verbose"]) & set_args: self.verbose = True self.very_verbose = True args.remove(flag) # set RuntimeConfiguration string, if specified for flag in [u"-r", u"--runtime-configuration"]: rconf_string = self.has_option_with_value(flag, actual_arguments=False) if rconf_string is not None: self.rconf = RuntimeConfiguration(rconf_string) args.remove("%s=%s" % (flag, rconf_string)) # set log file path, if requested log_path = None for flag in [u"-l", u"--log"]: log_path = self.has_option_with_value(flag, actual_arguments=False) if log_path is not None: args.remove("%s=%s" % (flag, log_path)) elif flag in set_args: handler, log_path = gf.tmp_file(suffix=u".log", root=self.rconf[RuntimeConfiguration.TMP_PATH]) args.remove(flag) if log_path is not None: self.log_file_path = log_path # if no actual arguments left, print help if (len(args) < 1) and (show_help): return self.print_help(short=True) # store actual arguments self.actual_arguments = args # create logger self.logger = Logger(tee=self.verbose, tee_show_datetime=self.very_verbose) self.log([u"Running aeneas %s", aeneas_version]) self.log([u"Formal arguments: %s", self.formal_arguments]) self.log([u"Actual arguments: %s", self.actual_arguments]) self.log([u"Runtime configuration: '%s'", self.rconf.config_string]) # perform command exit_code = self.perform_command() self.log([u"Execution completed with code %d", exit_code]) # output log if requested if self.log_file_path is not None: self.log([u"User requested saving log to file '%s'", self.log_file_path]) self.logger.write(self.log_file_path) if self.use_sys: self.print_info(u"Log written to file '%s'" % self.log_file_path) return self.exit(exit_code)
def function[run, parameter[self, arguments, show_help]]: constant[ Program entry point. Please note that the first item in ``arguments`` is discarded, as it is assumed to be the script/invocation name; pass a "dumb" placeholder if you call this method with an argument different that ``sys.argv``. :param arguments: the list of arguments :type arguments: list :param show_help: if ``False``, do not show help on ``-h`` and ``--help`` :type show_help: bool :rtype: int ] if name[self].use_sys begin[:] if <ast.UnaryOp object at 0x7da1b1511480> begin[:] if compare[name[sys].stdin.encoding <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da1b1511150>, <ast.Constant object at 0x7da1b15110c0>]]] begin[:] call[name[self].print_warning, parameter[constant[The default input encoding is not UTF-8.]]] call[name[self].print_warning, parameter[constant[You might want to set 'PYTHONIOENCODING=UTF-8' in your shell.]]] if compare[name[sys].stdout.encoding <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da1b1510550>, <ast.Constant object at 0x7da1b15104f0>]]] begin[:] call[name[self].print_warning, parameter[constant[The default output encoding is not UTF-8.]]] call[name[self].print_warning, parameter[constant[You might want to set 'PYTHONIOENCODING=UTF-8' in your shell.]]] variable[args] assign[=] <ast.ListComp object at 0x7da1b1510640> if name[show_help] begin[:] if compare[constant[-h] in name[args]] begin[:] return[call[name[self].print_help, parameter[]]] if compare[constant[--help] in name[args]] begin[:] return[call[name[self].print_help, parameter[]]] if compare[constant[--help-rconf] in name[args]] begin[:] return[call[name[self].print_rconf_parameters, parameter[]]] if compare[constant[--version] in name[args]] begin[:] return[call[name[self].print_name_version, parameter[]]] name[self].formal_arguments_raw assign[=] name[arguments] name[self].formal_arguments assign[=] name[args] variable[args] assign[=] call[name[args]][<ast.Slice object at 0x7da20e9b22f0>] variable[set_args] assign[=] call[name[set], parameter[name[args]]] for taget[name[flag]] in starred[binary_operation[call[name[set], parameter[list[[<ast.Constant object at 0x7da18f813a00>, <ast.Constant object at 0x7da18f8106d0>]]]] <ast.BitAnd object at 0x7da2590d6b60> name[set_args]]] begin[:] name[self].verbose assign[=] constant[True] call[name[args].remove, parameter[name[flag]]] for taget[name[flag]] in starred[binary_operation[call[name[set], parameter[list[[<ast.Constant object at 0x7da18f810e20>, <ast.Constant object at 0x7da18f812380>]]]] <ast.BitAnd object at 0x7da2590d6b60> name[set_args]]] begin[:] name[self].verbose assign[=] constant[True] name[self].very_verbose assign[=] constant[True] call[name[args].remove, parameter[name[flag]]] for taget[name[flag]] in starred[list[[<ast.Constant object at 0x7da18f8124d0>, <ast.Constant object at 0x7da18f813010>]]] begin[:] variable[rconf_string] assign[=] call[name[self].has_option_with_value, parameter[name[flag]]] if compare[name[rconf_string] is_not constant[None]] begin[:] name[self].rconf assign[=] call[name[RuntimeConfiguration], parameter[name[rconf_string]]] call[name[args].remove, parameter[binary_operation[constant[%s=%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f810040>, <ast.Name object at 0x7da18f8103d0>]]]]] variable[log_path] assign[=] constant[None] for taget[name[flag]] in starred[list[[<ast.Constant object at 0x7da18f813b20>, <ast.Constant object at 0x7da18f8118d0>]]] begin[:] variable[log_path] assign[=] call[name[self].has_option_with_value, parameter[name[flag]]] if compare[name[log_path] is_not constant[None]] begin[:] call[name[args].remove, parameter[binary_operation[constant[%s=%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f811570>, <ast.Name object at 0x7da18f812e60>]]]]] if compare[name[log_path] is_not constant[None]] begin[:] name[self].log_file_path assign[=] name[log_path] if <ast.BoolOp object at 0x7da18f812680> begin[:] return[call[name[self].print_help, parameter[]]] name[self].actual_arguments assign[=] name[args] name[self].logger assign[=] call[name[Logger], parameter[]] call[name[self].log, parameter[list[[<ast.Constant object at 0x7da18c4ced10>, <ast.Name object at 0x7da18c4cf670>]]]] call[name[self].log, parameter[list[[<ast.Constant object at 0x7da18c4cc640>, <ast.Attribute object at 0x7da18c4cf550>]]]] call[name[self].log, parameter[list[[<ast.Constant object at 0x7da1b17f9cf0>, <ast.Attribute object at 0x7da1b17f9ea0>]]]] call[name[self].log, parameter[list[[<ast.Constant object at 0x7da20c993340>, <ast.Attribute object at 0x7da20c991180>]]]] variable[exit_code] assign[=] call[name[self].perform_command, parameter[]] call[name[self].log, parameter[list[[<ast.Constant object at 0x7da18eb56ce0>, <ast.Name object at 0x7da18eb54fd0>]]]] if compare[name[self].log_file_path is_not constant[None]] begin[:] call[name[self].log, parameter[list[[<ast.Constant object at 0x7da18eb561d0>, <ast.Attribute object at 0x7da18eb558d0>]]]] call[name[self].logger.write, parameter[name[self].log_file_path]] if name[self].use_sys begin[:] call[name[self].print_info, parameter[binary_operation[constant[Log written to file '%s'] <ast.Mod object at 0x7da2590d6920> name[self].log_file_path]]] return[call[name[self].exit, parameter[name[exit_code]]]]
keyword[def] identifier[run] ( identifier[self] , identifier[arguments] , identifier[show_help] = keyword[True] ): literal[string] keyword[if] identifier[self] . identifier[use_sys] : keyword[if] keyword[not] identifier[gf] . identifier[FROZEN] : keyword[if] identifier[sys] . identifier[stdin] . identifier[encoding] keyword[not] keyword[in] [ literal[string] , literal[string] ]: identifier[self] . identifier[print_warning] ( literal[string] ) identifier[self] . identifier[print_warning] ( literal[string] ) keyword[if] identifier[sys] . identifier[stdout] . identifier[encoding] keyword[not] keyword[in] [ literal[string] , literal[string] ]: identifier[self] . identifier[print_warning] ( literal[string] ) identifier[self] . identifier[print_warning] ( literal[string] ) identifier[args] =[ identifier[gf] . identifier[safe_unicode_stdin] ( identifier[arg] ) keyword[for] identifier[arg] keyword[in] identifier[arguments] ] keyword[else] : identifier[args] =[ identifier[gf] . identifier[safe_unicode] ( identifier[arg] ) keyword[for] identifier[arg] keyword[in] identifier[arguments] ] keyword[if] identifier[show_help] : keyword[if] literal[string] keyword[in] identifier[args] : keyword[return] identifier[self] . identifier[print_help] ( identifier[short] = keyword[True] ) keyword[if] literal[string] keyword[in] identifier[args] : keyword[return] identifier[self] . identifier[print_help] ( identifier[short] = keyword[False] ) keyword[if] literal[string] keyword[in] identifier[args] : keyword[return] identifier[self] . identifier[print_rconf_parameters] () keyword[if] literal[string] keyword[in] identifier[args] : keyword[return] identifier[self] . identifier[print_name_version] () identifier[self] . identifier[formal_arguments_raw] = identifier[arguments] identifier[self] . identifier[formal_arguments] = identifier[args] identifier[args] = identifier[args] [ literal[int] :] identifier[set_args] = identifier[set] ( identifier[args] ) keyword[for] identifier[flag] keyword[in] identifier[set] ([ literal[string] , literal[string] ])& identifier[set_args] : identifier[self] . identifier[verbose] = keyword[True] identifier[args] . identifier[remove] ( identifier[flag] ) keyword[for] identifier[flag] keyword[in] identifier[set] ([ literal[string] , literal[string] ])& identifier[set_args] : identifier[self] . identifier[verbose] = keyword[True] identifier[self] . identifier[very_verbose] = keyword[True] identifier[args] . identifier[remove] ( identifier[flag] ) keyword[for] identifier[flag] keyword[in] [ literal[string] , literal[string] ]: identifier[rconf_string] = identifier[self] . identifier[has_option_with_value] ( identifier[flag] , identifier[actual_arguments] = keyword[False] ) keyword[if] identifier[rconf_string] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[rconf] = identifier[RuntimeConfiguration] ( identifier[rconf_string] ) identifier[args] . identifier[remove] ( literal[string] %( identifier[flag] , identifier[rconf_string] )) identifier[log_path] = keyword[None] keyword[for] identifier[flag] keyword[in] [ literal[string] , literal[string] ]: identifier[log_path] = identifier[self] . identifier[has_option_with_value] ( identifier[flag] , identifier[actual_arguments] = keyword[False] ) keyword[if] identifier[log_path] keyword[is] keyword[not] keyword[None] : identifier[args] . identifier[remove] ( literal[string] %( identifier[flag] , identifier[log_path] )) keyword[elif] identifier[flag] keyword[in] identifier[set_args] : identifier[handler] , identifier[log_path] = identifier[gf] . identifier[tmp_file] ( identifier[suffix] = literal[string] , identifier[root] = identifier[self] . identifier[rconf] [ identifier[RuntimeConfiguration] . identifier[TMP_PATH] ]) identifier[args] . identifier[remove] ( identifier[flag] ) keyword[if] identifier[log_path] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[log_file_path] = identifier[log_path] keyword[if] ( identifier[len] ( identifier[args] )< literal[int] ) keyword[and] ( identifier[show_help] ): keyword[return] identifier[self] . identifier[print_help] ( identifier[short] = keyword[True] ) identifier[self] . identifier[actual_arguments] = identifier[args] identifier[self] . identifier[logger] = identifier[Logger] ( identifier[tee] = identifier[self] . identifier[verbose] , identifier[tee_show_datetime] = identifier[self] . identifier[very_verbose] ) identifier[self] . identifier[log] ([ literal[string] , identifier[aeneas_version] ]) identifier[self] . identifier[log] ([ literal[string] , identifier[self] . identifier[formal_arguments] ]) identifier[self] . identifier[log] ([ literal[string] , identifier[self] . identifier[actual_arguments] ]) identifier[self] . identifier[log] ([ literal[string] , identifier[self] . identifier[rconf] . identifier[config_string] ]) identifier[exit_code] = identifier[self] . identifier[perform_command] () identifier[self] . identifier[log] ([ literal[string] , identifier[exit_code] ]) keyword[if] identifier[self] . identifier[log_file_path] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[log] ([ literal[string] , identifier[self] . identifier[log_file_path] ]) identifier[self] . identifier[logger] . identifier[write] ( identifier[self] . identifier[log_file_path] ) keyword[if] identifier[self] . identifier[use_sys] : identifier[self] . identifier[print_info] ( literal[string] % identifier[self] . identifier[log_file_path] ) keyword[return] identifier[self] . identifier[exit] ( identifier[exit_code] )
def run(self, arguments, show_help=True): """ Program entry point. Please note that the first item in ``arguments`` is discarded, as it is assumed to be the script/invocation name; pass a "dumb" placeholder if you call this method with an argument different that ``sys.argv``. :param arguments: the list of arguments :type arguments: list :param show_help: if ``False``, do not show help on ``-h`` and ``--help`` :type show_help: bool :rtype: int """ # convert arguments into Unicode strings if self.use_sys: # check that sys.stdin.encoding and sys.stdout.encoding are set to utf-8 if not gf.FROZEN: if sys.stdin.encoding not in ['UTF-8', 'UTF8']: self.print_warning(u'The default input encoding is not UTF-8.') self.print_warning(u"You might want to set 'PYTHONIOENCODING=UTF-8' in your shell.") # depends on [control=['if'], data=[]] if sys.stdout.encoding not in ['UTF-8', 'UTF8']: self.print_warning(u'The default output encoding is not UTF-8.') self.print_warning(u"You might want to set 'PYTHONIOENCODING=UTF-8' in your shell.") # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # decode using sys.stdin.encoding args = [gf.safe_unicode_stdin(arg) for arg in arguments] # depends on [control=['if'], data=[]] else: # decode using utf-8 (but you should pass Unicode strings as parameters anyway) args = [gf.safe_unicode(arg) for arg in arguments] if show_help: if u'-h' in args: return self.print_help(short=True) # depends on [control=['if'], data=[]] if u'--help' in args: return self.print_help(short=False) # depends on [control=['if'], data=[]] if u'--help-rconf' in args: return self.print_rconf_parameters() # depends on [control=['if'], data=[]] if u'--version' in args: return self.print_name_version() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # store formal arguments self.formal_arguments_raw = arguments self.formal_arguments = args # to obtain the actual arguments, # remove the first one and "special" switches args = args[1:] set_args = set(args) # set verbosity, if requested for flag in set([u'-v', u'--verbose']) & set_args: self.verbose = True args.remove(flag) # depends on [control=['for'], data=['flag']] for flag in set([u'-vv', u'--very-verbose']) & set_args: self.verbose = True self.very_verbose = True args.remove(flag) # depends on [control=['for'], data=['flag']] # set RuntimeConfiguration string, if specified for flag in [u'-r', u'--runtime-configuration']: rconf_string = self.has_option_with_value(flag, actual_arguments=False) if rconf_string is not None: self.rconf = RuntimeConfiguration(rconf_string) args.remove('%s=%s' % (flag, rconf_string)) # depends on [control=['if'], data=['rconf_string']] # depends on [control=['for'], data=['flag']] # set log file path, if requested log_path = None for flag in [u'-l', u'--log']: log_path = self.has_option_with_value(flag, actual_arguments=False) if log_path is not None: args.remove('%s=%s' % (flag, log_path)) # depends on [control=['if'], data=['log_path']] elif flag in set_args: (handler, log_path) = gf.tmp_file(suffix=u'.log', root=self.rconf[RuntimeConfiguration.TMP_PATH]) args.remove(flag) # depends on [control=['if'], data=['flag']] if log_path is not None: self.log_file_path = log_path # depends on [control=['if'], data=['log_path']] # depends on [control=['for'], data=['flag']] # if no actual arguments left, print help if len(args) < 1 and show_help: return self.print_help(short=True) # depends on [control=['if'], data=[]] # store actual arguments self.actual_arguments = args # create logger self.logger = Logger(tee=self.verbose, tee_show_datetime=self.very_verbose) self.log([u'Running aeneas %s', aeneas_version]) self.log([u'Formal arguments: %s', self.formal_arguments]) self.log([u'Actual arguments: %s', self.actual_arguments]) self.log([u"Runtime configuration: '%s'", self.rconf.config_string]) # perform command exit_code = self.perform_command() self.log([u'Execution completed with code %d', exit_code]) # output log if requested if self.log_file_path is not None: self.log([u"User requested saving log to file '%s'", self.log_file_path]) self.logger.write(self.log_file_path) if self.use_sys: self.print_info(u"Log written to file '%s'" % self.log_file_path) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return self.exit(exit_code)
def delete_collection(db_name, collection_name, host='localhost', port=27017): """Almost exclusively for testing.""" client = MongoClient("mongodb://%s:%d" % (host, port)) client[db_name].drop_collection(collection_name)
def function[delete_collection, parameter[db_name, collection_name, host, port]]: constant[Almost exclusively for testing.] variable[client] assign[=] call[name[MongoClient], parameter[binary_operation[constant[mongodb://%s:%d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2047eb1c0>, <ast.Name object at 0x7da2047ea470>]]]]] call[call[name[client]][name[db_name]].drop_collection, parameter[name[collection_name]]]
keyword[def] identifier[delete_collection] ( identifier[db_name] , identifier[collection_name] , identifier[host] = literal[string] , identifier[port] = literal[int] ): literal[string] identifier[client] = identifier[MongoClient] ( literal[string] %( identifier[host] , identifier[port] )) identifier[client] [ identifier[db_name] ]. identifier[drop_collection] ( identifier[collection_name] )
def delete_collection(db_name, collection_name, host='localhost', port=27017): """Almost exclusively for testing.""" client = MongoClient('mongodb://%s:%d' % (host, port)) client[db_name].drop_collection(collection_name)
def transformer_latent_decoder(x, encoder_output, ed_attention_bias, hparams, name=None): """Transformer decoder over latents using latent_attention_type. Args: x: Tensor of shape [batch, length_q, hparams.hidden_size]. length_q is the latent length, which is height * width * hparams.num_latents / (2**hparams.num_compress_steps). encoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size]. ed_attention_bias: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. hparams: HParams. name: string, variable scope. Returns: Tensor of shape [batch, length_q, hparams.hidden_size]. """ with tf.variable_scope(name, default_name="transformer_latent_dec"): batch_size = common_layers.shape_list(x)[0] compressed_img_len = (hparams.img_len // 2**(hparams.num_compress_steps // 2)) x = tf.reshape(x, [batch_size, compressed_img_len, compressed_img_len * hparams.num_latents, hparams.hidden_size]) decoder_input, _, _ = cia.prepare_decoder(x, hparams) decoder_output = cia.transformer_decoder_layers( decoder_input, encoder_output, hparams.num_latent_layers or hparams.num_hidden_layers, hparams, attention_type=hparams.latent_attention_type, encoder_decoder_attention_bias=ed_attention_bias, name="decoder") decoder_output = tf.reshape(decoder_output, [batch_size, compressed_img_len**2 * hparams.num_latents, hparams.hidden_size]) return decoder_output
def function[transformer_latent_decoder, parameter[x, encoder_output, ed_attention_bias, hparams, name]]: constant[Transformer decoder over latents using latent_attention_type. Args: x: Tensor of shape [batch, length_q, hparams.hidden_size]. length_q is the latent length, which is height * width * hparams.num_latents / (2**hparams.num_compress_steps). encoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size]. ed_attention_bias: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. hparams: HParams. name: string, variable scope. Returns: Tensor of shape [batch, length_q, hparams.hidden_size]. ] with call[name[tf].variable_scope, parameter[name[name]]] begin[:] variable[batch_size] assign[=] call[call[name[common_layers].shape_list, parameter[name[x]]]][constant[0]] variable[compressed_img_len] assign[=] binary_operation[name[hparams].img_len <ast.FloorDiv object at 0x7da2590d6bc0> binary_operation[constant[2] ** binary_operation[name[hparams].num_compress_steps <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]]] variable[x] assign[=] call[name[tf].reshape, parameter[name[x], list[[<ast.Name object at 0x7da18fe91690>, <ast.Name object at 0x7da18fe90850>, <ast.BinOp object at 0x7da18fe91780>, <ast.Attribute object at 0x7da18fe92c20>]]]] <ast.Tuple object at 0x7da18fe922c0> assign[=] call[name[cia].prepare_decoder, parameter[name[x], name[hparams]]] variable[decoder_output] assign[=] call[name[cia].transformer_decoder_layers, parameter[name[decoder_input], name[encoder_output], <ast.BoolOp object at 0x7da18fe92dd0>, name[hparams]]] variable[decoder_output] assign[=] call[name[tf].reshape, parameter[name[decoder_output], list[[<ast.Name object at 0x7da18fe91960>, <ast.BinOp object at 0x7da18fe90d90>, <ast.Attribute object at 0x7da18fe93190>]]]] return[name[decoder_output]]
keyword[def] identifier[transformer_latent_decoder] ( identifier[x] , identifier[encoder_output] , identifier[ed_attention_bias] , identifier[hparams] , identifier[name] = keyword[None] ): literal[string] keyword[with] identifier[tf] . identifier[variable_scope] ( identifier[name] , identifier[default_name] = literal[string] ): identifier[batch_size] = identifier[common_layers] . identifier[shape_list] ( identifier[x] )[ literal[int] ] identifier[compressed_img_len] =( identifier[hparams] . identifier[img_len] // literal[int] **( identifier[hparams] . identifier[num_compress_steps] // literal[int] )) identifier[x] = identifier[tf] . identifier[reshape] ( identifier[x] ,[ identifier[batch_size] , identifier[compressed_img_len] , identifier[compressed_img_len] * identifier[hparams] . identifier[num_latents] , identifier[hparams] . identifier[hidden_size] ]) identifier[decoder_input] , identifier[_] , identifier[_] = identifier[cia] . identifier[prepare_decoder] ( identifier[x] , identifier[hparams] ) identifier[decoder_output] = identifier[cia] . identifier[transformer_decoder_layers] ( identifier[decoder_input] , identifier[encoder_output] , identifier[hparams] . identifier[num_latent_layers] keyword[or] identifier[hparams] . identifier[num_hidden_layers] , identifier[hparams] , identifier[attention_type] = identifier[hparams] . identifier[latent_attention_type] , identifier[encoder_decoder_attention_bias] = identifier[ed_attention_bias] , identifier[name] = literal[string] ) identifier[decoder_output] = identifier[tf] . identifier[reshape] ( identifier[decoder_output] , [ identifier[batch_size] , identifier[compressed_img_len] ** literal[int] * identifier[hparams] . identifier[num_latents] , identifier[hparams] . identifier[hidden_size] ]) keyword[return] identifier[decoder_output]
def transformer_latent_decoder(x, encoder_output, ed_attention_bias, hparams, name=None): """Transformer decoder over latents using latent_attention_type. Args: x: Tensor of shape [batch, length_q, hparams.hidden_size]. length_q is the latent length, which is height * width * hparams.num_latents / (2**hparams.num_compress_steps). encoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size]. ed_attention_bias: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. hparams: HParams. name: string, variable scope. Returns: Tensor of shape [batch, length_q, hparams.hidden_size]. """ with tf.variable_scope(name, default_name='transformer_latent_dec'): batch_size = common_layers.shape_list(x)[0] compressed_img_len = hparams.img_len // 2 ** (hparams.num_compress_steps // 2) x = tf.reshape(x, [batch_size, compressed_img_len, compressed_img_len * hparams.num_latents, hparams.hidden_size]) (decoder_input, _, _) = cia.prepare_decoder(x, hparams) decoder_output = cia.transformer_decoder_layers(decoder_input, encoder_output, hparams.num_latent_layers or hparams.num_hidden_layers, hparams, attention_type=hparams.latent_attention_type, encoder_decoder_attention_bias=ed_attention_bias, name='decoder') decoder_output = tf.reshape(decoder_output, [batch_size, compressed_img_len ** 2 * hparams.num_latents, hparams.hidden_size]) return decoder_output # depends on [control=['with'], data=[]]
def set_log_type_name(self, logType, name): """ Set a logtype name. :Parameters: #. logType (string): A defined logging type. #. name (string): The logtype new name. """ assert logType in self.__logTypeStdoutFlags.keys(), "logType '%s' not defined" %logType assert isinstance(name, basestring), "name must be a string" name = str(name) self.__logTypeNames[logType] = name
def function[set_log_type_name, parameter[self, logType, name]]: constant[ Set a logtype name. :Parameters: #. logType (string): A defined logging type. #. name (string): The logtype new name. ] assert[compare[name[logType] in call[name[self].__logTypeStdoutFlags.keys, parameter[]]]] assert[call[name[isinstance], parameter[name[name], name[basestring]]]] variable[name] assign[=] call[name[str], parameter[name[name]]] call[name[self].__logTypeNames][name[logType]] assign[=] name[name]
keyword[def] identifier[set_log_type_name] ( identifier[self] , identifier[logType] , identifier[name] ): literal[string] keyword[assert] identifier[logType] keyword[in] identifier[self] . identifier[__logTypeStdoutFlags] . identifier[keys] (), literal[string] % identifier[logType] keyword[assert] identifier[isinstance] ( identifier[name] , identifier[basestring] ), literal[string] identifier[name] = identifier[str] ( identifier[name] ) identifier[self] . identifier[__logTypeNames] [ identifier[logType] ]= identifier[name]
def set_log_type_name(self, logType, name): """ Set a logtype name. :Parameters: #. logType (string): A defined logging type. #. name (string): The logtype new name. """ assert logType in self.__logTypeStdoutFlags.keys(), "logType '%s' not defined" % logType assert isinstance(name, basestring), 'name must be a string' name = str(name) self.__logTypeNames[logType] = name
def as_dict(self): """ Return a ditionary mapping time slide IDs to offset dictionaries. """ d = {} for row in self: if row.time_slide_id not in d: d[row.time_slide_id] = offsetvector.offsetvector() if row.instrument in d[row.time_slide_id]: raise KeyError("'%s': duplicate instrument '%s'" % (row.time_slide_id, row.instrument)) d[row.time_slide_id][row.instrument] = row.offset return d
def function[as_dict, parameter[self]]: constant[ Return a ditionary mapping time slide IDs to offset dictionaries. ] variable[d] assign[=] dictionary[[], []] for taget[name[row]] in starred[name[self]] begin[:] if compare[name[row].time_slide_id <ast.NotIn object at 0x7da2590d7190> name[d]] begin[:] call[name[d]][name[row].time_slide_id] assign[=] call[name[offsetvector].offsetvector, parameter[]] if compare[name[row].instrument in call[name[d]][name[row].time_slide_id]] begin[:] <ast.Raise object at 0x7da20c7c81f0> call[call[name[d]][name[row].time_slide_id]][name[row].instrument] assign[=] name[row].offset return[name[d]]
keyword[def] identifier[as_dict] ( identifier[self] ): literal[string] identifier[d] ={} keyword[for] identifier[row] keyword[in] identifier[self] : keyword[if] identifier[row] . identifier[time_slide_id] keyword[not] keyword[in] identifier[d] : identifier[d] [ identifier[row] . identifier[time_slide_id] ]= identifier[offsetvector] . identifier[offsetvector] () keyword[if] identifier[row] . identifier[instrument] keyword[in] identifier[d] [ identifier[row] . identifier[time_slide_id] ]: keyword[raise] identifier[KeyError] ( literal[string] %( identifier[row] . identifier[time_slide_id] , identifier[row] . identifier[instrument] )) identifier[d] [ identifier[row] . identifier[time_slide_id] ][ identifier[row] . identifier[instrument] ]= identifier[row] . identifier[offset] keyword[return] identifier[d]
def as_dict(self): """ Return a ditionary mapping time slide IDs to offset dictionaries. """ d = {} for row in self: if row.time_slide_id not in d: d[row.time_slide_id] = offsetvector.offsetvector() # depends on [control=['if'], data=['d']] if row.instrument in d[row.time_slide_id]: raise KeyError("'%s': duplicate instrument '%s'" % (row.time_slide_id, row.instrument)) # depends on [control=['if'], data=[]] d[row.time_slide_id][row.instrument] = row.offset # depends on [control=['for'], data=['row']] return d
def create_info_endpoint(self, name, data): """Create an endpoint to serve info GET requests.""" # make sure data is serializable data = make_serializable(data) # create generic restful resource to serve static JSON data class InfoBase(Resource): @staticmethod def get(): return data def info_factory(name): """Return an Info derivative resource.""" class NewClass(InfoBase): pass NewClass.__name__ = "{}_{}".format(name, InfoBase.__name__) return NewClass path = '/info/{}'.format(name) self.api.add_resource(info_factory(name), path) logger.info('Regestered informational resource to {} (available via GET)'.format(path)) logger.debug('Endpoint {} will now serve the following static data:\n{}'.format(path, data))
def function[create_info_endpoint, parameter[self, name, data]]: constant[Create an endpoint to serve info GET requests.] variable[data] assign[=] call[name[make_serializable], parameter[name[data]]] class class[InfoBase, parameter[]] begin[:] def function[get, parameter[]]: return[name[data]] def function[info_factory, parameter[name]]: constant[Return an Info derivative resource.] class class[NewClass, parameter[]] begin[:] pass name[NewClass].__name__ assign[=] call[constant[{}_{}].format, parameter[name[name], name[InfoBase].__name__]] return[name[NewClass]] variable[path] assign[=] call[constant[/info/{}].format, parameter[name[name]]] call[name[self].api.add_resource, parameter[call[name[info_factory], parameter[name[name]]], name[path]]] call[name[logger].info, parameter[call[constant[Regestered informational resource to {} (available via GET)].format, parameter[name[path]]]]] call[name[logger].debug, parameter[call[constant[Endpoint {} will now serve the following static data: {}].format, parameter[name[path], name[data]]]]]
keyword[def] identifier[create_info_endpoint] ( identifier[self] , identifier[name] , identifier[data] ): literal[string] identifier[data] = identifier[make_serializable] ( identifier[data] ) keyword[class] identifier[InfoBase] ( identifier[Resource] ): @ identifier[staticmethod] keyword[def] identifier[get] (): keyword[return] identifier[data] keyword[def] identifier[info_factory] ( identifier[name] ): literal[string] keyword[class] identifier[NewClass] ( identifier[InfoBase] ): keyword[pass] identifier[NewClass] . identifier[__name__] = literal[string] . identifier[format] ( identifier[name] , identifier[InfoBase] . identifier[__name__] ) keyword[return] identifier[NewClass] identifier[path] = literal[string] . identifier[format] ( identifier[name] ) identifier[self] . identifier[api] . identifier[add_resource] ( identifier[info_factory] ( identifier[name] ), identifier[path] ) identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[path] )) identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[path] , identifier[data] ))
def create_info_endpoint(self, name, data): """Create an endpoint to serve info GET requests.""" # make sure data is serializable data = make_serializable(data) # create generic restful resource to serve static JSON data class InfoBase(Resource): @staticmethod def get(): return data def info_factory(name): """Return an Info derivative resource.""" class NewClass(InfoBase): pass NewClass.__name__ = '{}_{}'.format(name, InfoBase.__name__) return NewClass path = '/info/{}'.format(name) self.api.add_resource(info_factory(name), path) logger.info('Regestered informational resource to {} (available via GET)'.format(path)) logger.debug('Endpoint {} will now serve the following static data:\n{}'.format(path, data))
def fromtabix(filename, reference=None, start=None, stop=None, region=None, header=None): """ Extract rows from a tabix indexed file, e.g.:: >>> import petl as etl >>> # activate bio extensions ... import petlx.bio >>> table1 = etl.fromtabix('fixture/test.bed.gz', ... region='Pf3D7_02_v3') >>> table1 +---------------+----------+----------+-----------------------------+ | #chrom | start | end | region | +===============+==========+==========+=============================+ | 'Pf3D7_02_v3' | '0' | '23100' | 'SubtelomericRepeat' | +---------------+----------+----------+-----------------------------+ | 'Pf3D7_02_v3' | '23100' | '105800' | 'SubtelomericHypervariable' | +---------------+----------+----------+-----------------------------+ | 'Pf3D7_02_v3' | '105800' | '447300' | 'Core' | +---------------+----------+----------+-----------------------------+ | 'Pf3D7_02_v3' | '447300' | '450450' | 'Centromere' | +---------------+----------+----------+-----------------------------+ | 'Pf3D7_02_v3' | '450450' | '862500' | 'Core' | +---------------+----------+----------+-----------------------------+ ... >>> table2 = etl.fromtabix('fixture/test.bed.gz', ... region='Pf3D7_02_v3:110000-120000') >>> table2 +---------------+----------+----------+--------+ | #chrom | start | end | region | +===============+==========+==========+========+ | 'Pf3D7_02_v3' | '105800' | '447300' | 'Core' | +---------------+----------+----------+--------+ """ return TabixView(filename, reference, start, stop, region, header)
def function[fromtabix, parameter[filename, reference, start, stop, region, header]]: constant[ Extract rows from a tabix indexed file, e.g.:: >>> import petl as etl >>> # activate bio extensions ... import petlx.bio >>> table1 = etl.fromtabix('fixture/test.bed.gz', ... region='Pf3D7_02_v3') >>> table1 +---------------+----------+----------+-----------------------------+ | #chrom | start | end | region | +===============+==========+==========+=============================+ | 'Pf3D7_02_v3' | '0' | '23100' | 'SubtelomericRepeat' | +---------------+----------+----------+-----------------------------+ | 'Pf3D7_02_v3' | '23100' | '105800' | 'SubtelomericHypervariable' | +---------------+----------+----------+-----------------------------+ | 'Pf3D7_02_v3' | '105800' | '447300' | 'Core' | +---------------+----------+----------+-----------------------------+ | 'Pf3D7_02_v3' | '447300' | '450450' | 'Centromere' | +---------------+----------+----------+-----------------------------+ | 'Pf3D7_02_v3' | '450450' | '862500' | 'Core' | +---------------+----------+----------+-----------------------------+ ... >>> table2 = etl.fromtabix('fixture/test.bed.gz', ... region='Pf3D7_02_v3:110000-120000') >>> table2 +---------------+----------+----------+--------+ | #chrom | start | end | region | +===============+==========+==========+========+ | 'Pf3D7_02_v3' | '105800' | '447300' | 'Core' | +---------------+----------+----------+--------+ ] return[call[name[TabixView], parameter[name[filename], name[reference], name[start], name[stop], name[region], name[header]]]]
keyword[def] identifier[fromtabix] ( identifier[filename] , identifier[reference] = keyword[None] , identifier[start] = keyword[None] , identifier[stop] = keyword[None] , identifier[region] = keyword[None] , identifier[header] = keyword[None] ): literal[string] keyword[return] identifier[TabixView] ( identifier[filename] , identifier[reference] , identifier[start] , identifier[stop] , identifier[region] , identifier[header] )
def fromtabix(filename, reference=None, start=None, stop=None, region=None, header=None): """ Extract rows from a tabix indexed file, e.g.:: >>> import petl as etl >>> # activate bio extensions ... import petlx.bio >>> table1 = etl.fromtabix('fixture/test.bed.gz', ... region='Pf3D7_02_v3') >>> table1 +---------------+----------+----------+-----------------------------+ | #chrom | start | end | region | +===============+==========+==========+=============================+ | 'Pf3D7_02_v3' | '0' | '23100' | 'SubtelomericRepeat' | +---------------+----------+----------+-----------------------------+ | 'Pf3D7_02_v3' | '23100' | '105800' | 'SubtelomericHypervariable' | +---------------+----------+----------+-----------------------------+ | 'Pf3D7_02_v3' | '105800' | '447300' | 'Core' | +---------------+----------+----------+-----------------------------+ | 'Pf3D7_02_v3' | '447300' | '450450' | 'Centromere' | +---------------+----------+----------+-----------------------------+ | 'Pf3D7_02_v3' | '450450' | '862500' | 'Core' | +---------------+----------+----------+-----------------------------+ ... >>> table2 = etl.fromtabix('fixture/test.bed.gz', ... region='Pf3D7_02_v3:110000-120000') >>> table2 +---------------+----------+----------+--------+ | #chrom | start | end | region | +===============+==========+==========+========+ | 'Pf3D7_02_v3' | '105800' | '447300' | 'Core' | +---------------+----------+----------+--------+ """ return TabixView(filename, reference, start, stop, region, header)
def word_similarity_explorer(corpus, category, category_name, not_category_name, target_term, nlp=None, alpha=0.01, max_p_val=0.1, **kwargs): ''' Parameters ---------- corpus : Corpus Corpus to use. category : str Name of category column as it appears in original data frame. category_name : str Name of category to use. E.g., "5-star reviews." not_category_name : str Name of everything that isn't in category. E.g., "Below 5-star reviews". target_term : str Word or phrase for semantic similarity comparison nlp : spaCy-like parsing function E.g., spacy.load('en'), whitespace_nlp, etc... alpha : float, default = 0.01 Uniform dirichlet prior for p-value calculation max_p_val : float, default = 0.1 Max p-val to use find set of terms for similarity calculation Remaining arguments are from `produce_scattertext_explorer`. Returns ------- str, html of visualization ''' if nlp is None: import spacy nlp = spacy.load('en') base_term = nlp(target_term) scores = np.array([base_term.similarity(nlp(tok)) for tok in corpus._term_idx_store._i2val]) return produce_scattertext_explorer(corpus, category, category_name, not_category_name, scores=scores, sort_by_dist=False, reverse_sort_scores_for_not_category=False, word_vec_use_p_vals=True, term_significance=LogOddsRatioUninformativeDirichletPrior(alpha), max_p_val=max_p_val, p_value_colors=True, **kwargs)
def function[word_similarity_explorer, parameter[corpus, category, category_name, not_category_name, target_term, nlp, alpha, max_p_val]]: constant[ Parameters ---------- corpus : Corpus Corpus to use. category : str Name of category column as it appears in original data frame. category_name : str Name of category to use. E.g., "5-star reviews." not_category_name : str Name of everything that isn't in category. E.g., "Below 5-star reviews". target_term : str Word or phrase for semantic similarity comparison nlp : spaCy-like parsing function E.g., spacy.load('en'), whitespace_nlp, etc... alpha : float, default = 0.01 Uniform dirichlet prior for p-value calculation max_p_val : float, default = 0.1 Max p-val to use find set of terms for similarity calculation Remaining arguments are from `produce_scattertext_explorer`. Returns ------- str, html of visualization ] if compare[name[nlp] is constant[None]] begin[:] import module[spacy] variable[nlp] assign[=] call[name[spacy].load, parameter[constant[en]]] variable[base_term] assign[=] call[name[nlp], parameter[name[target_term]]] variable[scores] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1b1a102b0>]] return[call[name[produce_scattertext_explorer], parameter[name[corpus], name[category], name[category_name], name[not_category_name]]]]
keyword[def] identifier[word_similarity_explorer] ( identifier[corpus] , identifier[category] , identifier[category_name] , identifier[not_category_name] , identifier[target_term] , identifier[nlp] = keyword[None] , identifier[alpha] = literal[int] , identifier[max_p_val] = literal[int] , ** identifier[kwargs] ): literal[string] keyword[if] identifier[nlp] keyword[is] keyword[None] : keyword[import] identifier[spacy] identifier[nlp] = identifier[spacy] . identifier[load] ( literal[string] ) identifier[base_term] = identifier[nlp] ( identifier[target_term] ) identifier[scores] = identifier[np] . identifier[array] ([ identifier[base_term] . identifier[similarity] ( identifier[nlp] ( identifier[tok] )) keyword[for] identifier[tok] keyword[in] identifier[corpus] . identifier[_term_idx_store] . identifier[_i2val] ]) keyword[return] identifier[produce_scattertext_explorer] ( identifier[corpus] , identifier[category] , identifier[category_name] , identifier[not_category_name] , identifier[scores] = identifier[scores] , identifier[sort_by_dist] = keyword[False] , identifier[reverse_sort_scores_for_not_category] = keyword[False] , identifier[word_vec_use_p_vals] = keyword[True] , identifier[term_significance] = identifier[LogOddsRatioUninformativeDirichletPrior] ( identifier[alpha] ), identifier[max_p_val] = identifier[max_p_val] , identifier[p_value_colors] = keyword[True] , ** identifier[kwargs] )
def word_similarity_explorer(corpus, category, category_name, not_category_name, target_term, nlp=None, alpha=0.01, max_p_val=0.1, **kwargs): """ Parameters ---------- corpus : Corpus Corpus to use. category : str Name of category column as it appears in original data frame. category_name : str Name of category to use. E.g., "5-star reviews." not_category_name : str Name of everything that isn't in category. E.g., "Below 5-star reviews". target_term : str Word or phrase for semantic similarity comparison nlp : spaCy-like parsing function E.g., spacy.load('en'), whitespace_nlp, etc... alpha : float, default = 0.01 Uniform dirichlet prior for p-value calculation max_p_val : float, default = 0.1 Max p-val to use find set of terms for similarity calculation Remaining arguments are from `produce_scattertext_explorer`. Returns ------- str, html of visualization """ if nlp is None: import spacy nlp = spacy.load('en') # depends on [control=['if'], data=['nlp']] base_term = nlp(target_term) scores = np.array([base_term.similarity(nlp(tok)) for tok in corpus._term_idx_store._i2val]) return produce_scattertext_explorer(corpus, category, category_name, not_category_name, scores=scores, sort_by_dist=False, reverse_sort_scores_for_not_category=False, word_vec_use_p_vals=True, term_significance=LogOddsRatioUninformativeDirichletPrior(alpha), max_p_val=max_p_val, p_value_colors=True, **kwargs)
def load_system_host_keys(self, filename=None): """ Load host keys from a system (read-only) file. Host keys read with this method will not be saved back by `save_host_keys`. This method can be called multiple times. Each new set of host keys will be merged with the existing set (new replacing old if there are conflicts). If ``filename`` is left as ``None``, an attempt will be made to read keys from the user's local "known hosts" file, as used by OpenSSH, and no exception will be raised if the file can't be read. This is probably only useful on posix. :param str filename: the filename to read, or ``None`` :raises: ``IOError`` -- if a filename was provided and the file could not be read """ if filename is None: # try the user's .ssh key file, and mask exceptions filename = os.path.expanduser("~/.ssh/known_hosts") try: self._system_host_keys.load(filename) except IOError: pass return self._system_host_keys.load(filename)
def function[load_system_host_keys, parameter[self, filename]]: constant[ Load host keys from a system (read-only) file. Host keys read with this method will not be saved back by `save_host_keys`. This method can be called multiple times. Each new set of host keys will be merged with the existing set (new replacing old if there are conflicts). If ``filename`` is left as ``None``, an attempt will be made to read keys from the user's local "known hosts" file, as used by OpenSSH, and no exception will be raised if the file can't be read. This is probably only useful on posix. :param str filename: the filename to read, or ``None`` :raises: ``IOError`` -- if a filename was provided and the file could not be read ] if compare[name[filename] is constant[None]] begin[:] variable[filename] assign[=] call[name[os].path.expanduser, parameter[constant[~/.ssh/known_hosts]]] <ast.Try object at 0x7da20e9b2860> return[None] call[name[self]._system_host_keys.load, parameter[name[filename]]]
keyword[def] identifier[load_system_host_keys] ( identifier[self] , identifier[filename] = keyword[None] ): literal[string] keyword[if] identifier[filename] keyword[is] keyword[None] : identifier[filename] = identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] ) keyword[try] : identifier[self] . identifier[_system_host_keys] . identifier[load] ( identifier[filename] ) keyword[except] identifier[IOError] : keyword[pass] keyword[return] identifier[self] . identifier[_system_host_keys] . identifier[load] ( identifier[filename] )
def load_system_host_keys(self, filename=None): """ Load host keys from a system (read-only) file. Host keys read with this method will not be saved back by `save_host_keys`. This method can be called multiple times. Each new set of host keys will be merged with the existing set (new replacing old if there are conflicts). If ``filename`` is left as ``None``, an attempt will be made to read keys from the user's local "known hosts" file, as used by OpenSSH, and no exception will be raised if the file can't be read. This is probably only useful on posix. :param str filename: the filename to read, or ``None`` :raises: ``IOError`` -- if a filename was provided and the file could not be read """ if filename is None: # try the user's .ssh key file, and mask exceptions filename = os.path.expanduser('~/.ssh/known_hosts') try: self._system_host_keys.load(filename) # depends on [control=['try'], data=[]] except IOError: pass # depends on [control=['except'], data=[]] return # depends on [control=['if'], data=['filename']] self._system_host_keys.load(filename)
def _get_changes_from_diff_dict(diff_dict): ''' Returns a list of string message of the differences in a diff dict. Each inner message is tabulated one tab deeper ''' changes_strings = [] for p in diff_dict.keys(): if not isinstance(diff_dict[p], dict): raise ValueError('Unexpected diff difct \'{0}\''.format(diff_dict)) if sorted(diff_dict[p].keys()) == ['val1', 'val2']: # Some string formatting from_str = diff_dict[p]['val1'] if isinstance(diff_dict[p]['val1'], six.string_types): from_str = '\'{0}\''.format(diff_dict[p]['val1']) elif isinstance(diff_dict[p]['val1'], list): from_str = '\'{0}\''.format(', '.join(diff_dict[p]['val1'])) to_str = diff_dict[p]['val2'] if isinstance(diff_dict[p]['val2'], six.string_types): to_str = '\'{0}\''.format(diff_dict[p]['val2']) elif isinstance(diff_dict[p]['val2'], list): to_str = '\'{0}\''.format(', '.join(diff_dict[p]['val2'])) changes_strings.append('{0} from {1} to {2}'.format( p, from_str, to_str)) else: sub_changes = _get_changes_from_diff_dict(diff_dict[p]) if sub_changes: changes_strings.append('{0}:'.format(p)) changes_strings.extend(['\t{0}'.format(c) for c in sub_changes]) return changes_strings
def function[_get_changes_from_diff_dict, parameter[diff_dict]]: constant[ Returns a list of string message of the differences in a diff dict. Each inner message is tabulated one tab deeper ] variable[changes_strings] assign[=] list[[]] for taget[name[p]] in starred[call[name[diff_dict].keys, parameter[]]] begin[:] if <ast.UnaryOp object at 0x7da20e74b790> begin[:] <ast.Raise object at 0x7da20e74ad70> if compare[call[name[sorted], parameter[call[call[name[diff_dict]][name[p]].keys, parameter[]]]] equal[==] list[[<ast.Constant object at 0x7da20e74b220>, <ast.Constant object at 0x7da20e74bf70>]]] begin[:] variable[from_str] assign[=] call[call[name[diff_dict]][name[p]]][constant[val1]] if call[name[isinstance], parameter[call[call[name[diff_dict]][name[p]]][constant[val1]], name[six].string_types]] begin[:] variable[from_str] assign[=] call[constant['{0}'].format, parameter[call[call[name[diff_dict]][name[p]]][constant[val1]]]] variable[to_str] assign[=] call[call[name[diff_dict]][name[p]]][constant[val2]] if call[name[isinstance], parameter[call[call[name[diff_dict]][name[p]]][constant[val2]], name[six].string_types]] begin[:] variable[to_str] assign[=] call[constant['{0}'].format, parameter[call[call[name[diff_dict]][name[p]]][constant[val2]]]] call[name[changes_strings].append, parameter[call[constant[{0} from {1} to {2}].format, parameter[name[p], name[from_str], name[to_str]]]]] return[name[changes_strings]]
keyword[def] identifier[_get_changes_from_diff_dict] ( identifier[diff_dict] ): literal[string] identifier[changes_strings] =[] keyword[for] identifier[p] keyword[in] identifier[diff_dict] . identifier[keys] (): keyword[if] keyword[not] identifier[isinstance] ( identifier[diff_dict] [ identifier[p] ], identifier[dict] ): keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[diff_dict] )) keyword[if] identifier[sorted] ( identifier[diff_dict] [ identifier[p] ]. identifier[keys] ())==[ literal[string] , literal[string] ]: identifier[from_str] = identifier[diff_dict] [ identifier[p] ][ literal[string] ] keyword[if] identifier[isinstance] ( identifier[diff_dict] [ identifier[p] ][ literal[string] ], identifier[six] . identifier[string_types] ): identifier[from_str] = literal[string] . identifier[format] ( identifier[diff_dict] [ identifier[p] ][ literal[string] ]) keyword[elif] identifier[isinstance] ( identifier[diff_dict] [ identifier[p] ][ literal[string] ], identifier[list] ): identifier[from_str] = literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[diff_dict] [ identifier[p] ][ literal[string] ])) identifier[to_str] = identifier[diff_dict] [ identifier[p] ][ literal[string] ] keyword[if] identifier[isinstance] ( identifier[diff_dict] [ identifier[p] ][ literal[string] ], identifier[six] . identifier[string_types] ): identifier[to_str] = literal[string] . identifier[format] ( identifier[diff_dict] [ identifier[p] ][ literal[string] ]) keyword[elif] identifier[isinstance] ( identifier[diff_dict] [ identifier[p] ][ literal[string] ], identifier[list] ): identifier[to_str] = literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[diff_dict] [ identifier[p] ][ literal[string] ])) identifier[changes_strings] . identifier[append] ( literal[string] . identifier[format] ( identifier[p] , identifier[from_str] , identifier[to_str] )) keyword[else] : identifier[sub_changes] = identifier[_get_changes_from_diff_dict] ( identifier[diff_dict] [ identifier[p] ]) keyword[if] identifier[sub_changes] : identifier[changes_strings] . identifier[append] ( literal[string] . identifier[format] ( identifier[p] )) identifier[changes_strings] . identifier[extend] ([ literal[string] . identifier[format] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[sub_changes] ]) keyword[return] identifier[changes_strings]
def _get_changes_from_diff_dict(diff_dict): """ Returns a list of string message of the differences in a diff dict. Each inner message is tabulated one tab deeper """ changes_strings = [] for p in diff_dict.keys(): if not isinstance(diff_dict[p], dict): raise ValueError("Unexpected diff difct '{0}'".format(diff_dict)) # depends on [control=['if'], data=[]] if sorted(diff_dict[p].keys()) == ['val1', 'val2']: # Some string formatting from_str = diff_dict[p]['val1'] if isinstance(diff_dict[p]['val1'], six.string_types): from_str = "'{0}'".format(diff_dict[p]['val1']) # depends on [control=['if'], data=[]] elif isinstance(diff_dict[p]['val1'], list): from_str = "'{0}'".format(', '.join(diff_dict[p]['val1'])) # depends on [control=['if'], data=[]] to_str = diff_dict[p]['val2'] if isinstance(diff_dict[p]['val2'], six.string_types): to_str = "'{0}'".format(diff_dict[p]['val2']) # depends on [control=['if'], data=[]] elif isinstance(diff_dict[p]['val2'], list): to_str = "'{0}'".format(', '.join(diff_dict[p]['val2'])) # depends on [control=['if'], data=[]] changes_strings.append('{0} from {1} to {2}'.format(p, from_str, to_str)) # depends on [control=['if'], data=[]] else: sub_changes = _get_changes_from_diff_dict(diff_dict[p]) if sub_changes: changes_strings.append('{0}:'.format(p)) changes_strings.extend(['\t{0}'.format(c) for c in sub_changes]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']] return changes_strings
def filter_image(im_name, out_base, step_size=None, box_size=None, twopass=False, cores=None, mask=True, compressed=False, nslice=None): """ Create a background and noise image from an input image. Resulting images are written to `outbase_bkg.fits` and `outbase_rms.fits` Parameters ---------- im_name : str or HDUList Image to filter. Either a string filename or an astropy.io.fits.HDUList. out_base : str The output filename base. Will be modified to make _bkg and _rms files. step_size : (int,int) Tuple of the x,y step size in pixels box_size : (int,int) The size of the box in piexls twopass : bool Perform a second pass calculation to ensure that the noise is not contaminated by the background. Default = False cores : int Number of CPU corse to use. Default = all available nslice : int The image will be divided into this many horizontal stripes for processing. Default = None = equal to cores mask : bool Mask the output array to contain np.nna wherever the input array is nan or not finite. Default = true compressed : bool Return a compressed version of the background/noise images. Default = False Returns ------- None """ header = fits.getheader(im_name) shape = (header['NAXIS2'],header['NAXIS1']) if step_size is None: if 'BMAJ' in header and 'BMIN' in header: beam_size = np.sqrt(abs(header['BMAJ']*header['BMIN'])) if 'CDELT1' in header: pix_scale = np.sqrt(abs(header['CDELT1']*header['CDELT2'])) elif 'CD1_1' in header: pix_scale = np.sqrt(abs(header['CD1_1']*header['CD2_2'])) if 'CD1_2' in header and 'CD2_1' in header: if header['CD1_2'] != 0 or header['CD2_1']!=0: logging.warning("CD1_2 and/or CD2_1 are non-zero and I don't know what to do with them") logging.warning("Ingoring them") else: logging.warning("Cannot determine pixel scale, assuming 4 pixels per beam") pix_scale = beam_size/4. # default to 4x the synthesized beam width step_size = int(np.ceil(4*beam_size/pix_scale)) else: logging.info("BMAJ and/or BMIN not in fits header.") logging.info("Assuming 4 pix/beam, so we have step_size = 16 pixels") step_size = 16 step_size = (step_size, step_size) if box_size is None: # default to 6x the step size so we have ~ 30beams box_size = (step_size[0]*6, step_size[1]*6) if compressed: if not step_size[0] == step_size[1]: step_size = (min(step_size), min(step_size)) logging.info("Changing grid to be {0} so we can compress the output".format(step_size)) logging.info("using grid_size {0}, box_size {1}".format(step_size,box_size)) logging.info("on data shape {0}".format(shape)) bkg, rms = filter_mc_sharemem(im_name, step_size=step_size, box_size=box_size, cores=cores, shape=shape, nslice=nslice, domask=mask) logging.info("done") bkg_out = '_'.join([os.path.expanduser(out_base), 'bkg.fits']) rms_out = '_'.join([os.path.expanduser(out_base), 'rms.fits']) # add a comment to the fits header header['HISTORY'] = 'BANE {0}-({1})'.format(__version__, __date__) # compress if compressed: hdu = fits.PrimaryHDU(bkg) hdu.header = copy.deepcopy(header) hdulist = fits.HDUList([hdu]) compress(hdulist, step_size[0], bkg_out) hdulist[0].header = copy.deepcopy(header) hdulist[0].data = rms compress(hdulist, step_size[0], rms_out) return write_fits(bkg, header, bkg_out) write_fits(rms, header, rms_out)
def function[filter_image, parameter[im_name, out_base, step_size, box_size, twopass, cores, mask, compressed, nslice]]: constant[ Create a background and noise image from an input image. Resulting images are written to `outbase_bkg.fits` and `outbase_rms.fits` Parameters ---------- im_name : str or HDUList Image to filter. Either a string filename or an astropy.io.fits.HDUList. out_base : str The output filename base. Will be modified to make _bkg and _rms files. step_size : (int,int) Tuple of the x,y step size in pixels box_size : (int,int) The size of the box in piexls twopass : bool Perform a second pass calculation to ensure that the noise is not contaminated by the background. Default = False cores : int Number of CPU corse to use. Default = all available nslice : int The image will be divided into this many horizontal stripes for processing. Default = None = equal to cores mask : bool Mask the output array to contain np.nna wherever the input array is nan or not finite. Default = true compressed : bool Return a compressed version of the background/noise images. Default = False Returns ------- None ] variable[header] assign[=] call[name[fits].getheader, parameter[name[im_name]]] variable[shape] assign[=] tuple[[<ast.Subscript object at 0x7da18f721600>, <ast.Subscript object at 0x7da18f7225f0>]] if compare[name[step_size] is constant[None]] begin[:] if <ast.BoolOp object at 0x7da18f722e00> begin[:] variable[beam_size] assign[=] call[name[np].sqrt, parameter[call[name[abs], parameter[binary_operation[call[name[header]][constant[BMAJ]] * call[name[header]][constant[BMIN]]]]]]] if compare[constant[CDELT1] in name[header]] begin[:] variable[pix_scale] assign[=] call[name[np].sqrt, parameter[call[name[abs], parameter[binary_operation[call[name[header]][constant[CDELT1]] * call[name[header]][constant[CDELT2]]]]]]] variable[step_size] assign[=] call[name[int], parameter[call[name[np].ceil, parameter[binary_operation[binary_operation[constant[4] * name[beam_size]] / name[pix_scale]]]]]] variable[step_size] assign[=] tuple[[<ast.Name object at 0x7da20c76e110>, <ast.Name object at 0x7da20c76e650>]] if compare[name[box_size] is constant[None]] begin[:] variable[box_size] assign[=] tuple[[<ast.BinOp object at 0x7da20c76cbb0>, <ast.BinOp object at 0x7da20c76d600>]] if name[compressed] begin[:] if <ast.UnaryOp object at 0x7da20c76e5f0> begin[:] variable[step_size] assign[=] tuple[[<ast.Call object at 0x7da20c76fc70>, <ast.Call object at 0x7da20c76d060>]] call[name[logging].info, parameter[call[constant[Changing grid to be {0} so we can compress the output].format, parameter[name[step_size]]]]] call[name[logging].info, parameter[call[constant[using grid_size {0}, box_size {1}].format, parameter[name[step_size], name[box_size]]]]] call[name[logging].info, parameter[call[constant[on data shape {0}].format, parameter[name[shape]]]]] <ast.Tuple object at 0x7da20c76f520> assign[=] call[name[filter_mc_sharemem], parameter[name[im_name]]] call[name[logging].info, parameter[constant[done]]] variable[bkg_out] assign[=] call[constant[_].join, parameter[list[[<ast.Call object at 0x7da20c76e620>, <ast.Constant object at 0x7da20c76d450>]]]] variable[rms_out] assign[=] call[constant[_].join, parameter[list[[<ast.Call object at 0x7da20c76dde0>, <ast.Constant object at 0x7da20c76faf0>]]]] call[name[header]][constant[HISTORY]] assign[=] call[constant[BANE {0}-({1})].format, parameter[name[__version__], name[__date__]]] if name[compressed] begin[:] variable[hdu] assign[=] call[name[fits].PrimaryHDU, parameter[name[bkg]]] name[hdu].header assign[=] call[name[copy].deepcopy, parameter[name[header]]] variable[hdulist] assign[=] call[name[fits].HDUList, parameter[list[[<ast.Name object at 0x7da237eef0d0>]]]] call[name[compress], parameter[name[hdulist], call[name[step_size]][constant[0]], name[bkg_out]]] call[name[hdulist]][constant[0]].header assign[=] call[name[copy].deepcopy, parameter[name[header]]] call[name[hdulist]][constant[0]].data assign[=] name[rms] call[name[compress], parameter[name[hdulist], call[name[step_size]][constant[0]], name[rms_out]]] return[None] call[name[write_fits], parameter[name[bkg], name[header], name[bkg_out]]] call[name[write_fits], parameter[name[rms], name[header], name[rms_out]]]
keyword[def] identifier[filter_image] ( identifier[im_name] , identifier[out_base] , identifier[step_size] = keyword[None] , identifier[box_size] = keyword[None] , identifier[twopass] = keyword[False] , identifier[cores] = keyword[None] , identifier[mask] = keyword[True] , identifier[compressed] = keyword[False] , identifier[nslice] = keyword[None] ): literal[string] identifier[header] = identifier[fits] . identifier[getheader] ( identifier[im_name] ) identifier[shape] =( identifier[header] [ literal[string] ], identifier[header] [ literal[string] ]) keyword[if] identifier[step_size] keyword[is] keyword[None] : keyword[if] literal[string] keyword[in] identifier[header] keyword[and] literal[string] keyword[in] identifier[header] : identifier[beam_size] = identifier[np] . identifier[sqrt] ( identifier[abs] ( identifier[header] [ literal[string] ]* identifier[header] [ literal[string] ])) keyword[if] literal[string] keyword[in] identifier[header] : identifier[pix_scale] = identifier[np] . identifier[sqrt] ( identifier[abs] ( identifier[header] [ literal[string] ]* identifier[header] [ literal[string] ])) keyword[elif] literal[string] keyword[in] identifier[header] : identifier[pix_scale] = identifier[np] . identifier[sqrt] ( identifier[abs] ( identifier[header] [ literal[string] ]* identifier[header] [ literal[string] ])) keyword[if] literal[string] keyword[in] identifier[header] keyword[and] literal[string] keyword[in] identifier[header] : keyword[if] identifier[header] [ literal[string] ]!= literal[int] keyword[or] identifier[header] [ literal[string] ]!= literal[int] : identifier[logging] . identifier[warning] ( literal[string] ) identifier[logging] . identifier[warning] ( literal[string] ) keyword[else] : identifier[logging] . identifier[warning] ( literal[string] ) identifier[pix_scale] = identifier[beam_size] / literal[int] identifier[step_size] = identifier[int] ( identifier[np] . identifier[ceil] ( literal[int] * identifier[beam_size] / identifier[pix_scale] )) keyword[else] : identifier[logging] . identifier[info] ( literal[string] ) identifier[logging] . identifier[info] ( literal[string] ) identifier[step_size] = literal[int] identifier[step_size] =( identifier[step_size] , identifier[step_size] ) keyword[if] identifier[box_size] keyword[is] keyword[None] : identifier[box_size] =( identifier[step_size] [ literal[int] ]* literal[int] , identifier[step_size] [ literal[int] ]* literal[int] ) keyword[if] identifier[compressed] : keyword[if] keyword[not] identifier[step_size] [ literal[int] ]== identifier[step_size] [ literal[int] ]: identifier[step_size] =( identifier[min] ( identifier[step_size] ), identifier[min] ( identifier[step_size] )) identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[step_size] )) identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[step_size] , identifier[box_size] )) identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[shape] )) identifier[bkg] , identifier[rms] = identifier[filter_mc_sharemem] ( identifier[im_name] , identifier[step_size] = identifier[step_size] , identifier[box_size] = identifier[box_size] , identifier[cores] = identifier[cores] , identifier[shape] = identifier[shape] , identifier[nslice] = identifier[nslice] , identifier[domask] = identifier[mask] ) identifier[logging] . identifier[info] ( literal[string] ) identifier[bkg_out] = literal[string] . identifier[join] ([ identifier[os] . identifier[path] . identifier[expanduser] ( identifier[out_base] ), literal[string] ]) identifier[rms_out] = literal[string] . identifier[join] ([ identifier[os] . identifier[path] . identifier[expanduser] ( identifier[out_base] ), literal[string] ]) identifier[header] [ literal[string] ]= literal[string] . identifier[format] ( identifier[__version__] , identifier[__date__] ) keyword[if] identifier[compressed] : identifier[hdu] = identifier[fits] . identifier[PrimaryHDU] ( identifier[bkg] ) identifier[hdu] . identifier[header] = identifier[copy] . identifier[deepcopy] ( identifier[header] ) identifier[hdulist] = identifier[fits] . identifier[HDUList] ([ identifier[hdu] ]) identifier[compress] ( identifier[hdulist] , identifier[step_size] [ literal[int] ], identifier[bkg_out] ) identifier[hdulist] [ literal[int] ]. identifier[header] = identifier[copy] . identifier[deepcopy] ( identifier[header] ) identifier[hdulist] [ literal[int] ]. identifier[data] = identifier[rms] identifier[compress] ( identifier[hdulist] , identifier[step_size] [ literal[int] ], identifier[rms_out] ) keyword[return] identifier[write_fits] ( identifier[bkg] , identifier[header] , identifier[bkg_out] ) identifier[write_fits] ( identifier[rms] , identifier[header] , identifier[rms_out] )
def filter_image(im_name, out_base, step_size=None, box_size=None, twopass=False, cores=None, mask=True, compressed=False, nslice=None): """ Create a background and noise image from an input image. Resulting images are written to `outbase_bkg.fits` and `outbase_rms.fits` Parameters ---------- im_name : str or HDUList Image to filter. Either a string filename or an astropy.io.fits.HDUList. out_base : str The output filename base. Will be modified to make _bkg and _rms files. step_size : (int,int) Tuple of the x,y step size in pixels box_size : (int,int) The size of the box in piexls twopass : bool Perform a second pass calculation to ensure that the noise is not contaminated by the background. Default = False cores : int Number of CPU corse to use. Default = all available nslice : int The image will be divided into this many horizontal stripes for processing. Default = None = equal to cores mask : bool Mask the output array to contain np.nna wherever the input array is nan or not finite. Default = true compressed : bool Return a compressed version of the background/noise images. Default = False Returns ------- None """ header = fits.getheader(im_name) shape = (header['NAXIS2'], header['NAXIS1']) if step_size is None: if 'BMAJ' in header and 'BMIN' in header: beam_size = np.sqrt(abs(header['BMAJ'] * header['BMIN'])) if 'CDELT1' in header: pix_scale = np.sqrt(abs(header['CDELT1'] * header['CDELT2'])) # depends on [control=['if'], data=['header']] elif 'CD1_1' in header: pix_scale = np.sqrt(abs(header['CD1_1'] * header['CD2_2'])) if 'CD1_2' in header and 'CD2_1' in header: if header['CD1_2'] != 0 or header['CD2_1'] != 0: logging.warning("CD1_2 and/or CD2_1 are non-zero and I don't know what to do with them") logging.warning('Ingoring them') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['header']] else: logging.warning('Cannot determine pixel scale, assuming 4 pixels per beam') pix_scale = beam_size / 4.0 # default to 4x the synthesized beam width step_size = int(np.ceil(4 * beam_size / pix_scale)) # depends on [control=['if'], data=[]] else: logging.info('BMAJ and/or BMIN not in fits header.') logging.info('Assuming 4 pix/beam, so we have step_size = 16 pixels') step_size = 16 step_size = (step_size, step_size) # depends on [control=['if'], data=['step_size']] if box_size is None: # default to 6x the step size so we have ~ 30beams box_size = (step_size[0] * 6, step_size[1] * 6) # depends on [control=['if'], data=['box_size']] if compressed: if not step_size[0] == step_size[1]: step_size = (min(step_size), min(step_size)) logging.info('Changing grid to be {0} so we can compress the output'.format(step_size)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] logging.info('using grid_size {0}, box_size {1}'.format(step_size, box_size)) logging.info('on data shape {0}'.format(shape)) (bkg, rms) = filter_mc_sharemem(im_name, step_size=step_size, box_size=box_size, cores=cores, shape=shape, nslice=nslice, domask=mask) logging.info('done') bkg_out = '_'.join([os.path.expanduser(out_base), 'bkg.fits']) rms_out = '_'.join([os.path.expanduser(out_base), 'rms.fits']) # add a comment to the fits header header['HISTORY'] = 'BANE {0}-({1})'.format(__version__, __date__) # compress if compressed: hdu = fits.PrimaryHDU(bkg) hdu.header = copy.deepcopy(header) hdulist = fits.HDUList([hdu]) compress(hdulist, step_size[0], bkg_out) hdulist[0].header = copy.deepcopy(header) hdulist[0].data = rms compress(hdulist, step_size[0], rms_out) return # depends on [control=['if'], data=[]] write_fits(bkg, header, bkg_out) write_fits(rms, header, rms_out)
def get_task(self, task_id): """Get task meta for task by ``task_id``. :keyword exception_retry_count: How many times to retry by transaction rollback on exception. This could theoretically happen in a race condition if another worker is trying to create the same task. The default is to retry once. """ try: return self.get(task_id=task_id) except self.model.DoesNotExist: if self._last_id == task_id: self.warn_if_repeatable_read() self._last_id = task_id return self.model(task_id=task_id)
def function[get_task, parameter[self, task_id]]: constant[Get task meta for task by ``task_id``. :keyword exception_retry_count: How many times to retry by transaction rollback on exception. This could theoretically happen in a race condition if another worker is trying to create the same task. The default is to retry once. ] <ast.Try object at 0x7da18f09cf10>
keyword[def] identifier[get_task] ( identifier[self] , identifier[task_id] ): literal[string] keyword[try] : keyword[return] identifier[self] . identifier[get] ( identifier[task_id] = identifier[task_id] ) keyword[except] identifier[self] . identifier[model] . identifier[DoesNotExist] : keyword[if] identifier[self] . identifier[_last_id] == identifier[task_id] : identifier[self] . identifier[warn_if_repeatable_read] () identifier[self] . identifier[_last_id] = identifier[task_id] keyword[return] identifier[self] . identifier[model] ( identifier[task_id] = identifier[task_id] )
def get_task(self, task_id): """Get task meta for task by ``task_id``. :keyword exception_retry_count: How many times to retry by transaction rollback on exception. This could theoretically happen in a race condition if another worker is trying to create the same task. The default is to retry once. """ try: return self.get(task_id=task_id) # depends on [control=['try'], data=[]] except self.model.DoesNotExist: if self._last_id == task_id: self.warn_if_repeatable_read() # depends on [control=['if'], data=[]] self._last_id = task_id return self.model(task_id=task_id) # depends on [control=['except'], data=[]]
def get_reminders_per_page(self, per_page=1000, page=1, params=None): """ Get reminders per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list """ return self._get_resource_per_page(resource=REMINDERS, per_page=per_page, page=page, params=params)
def function[get_reminders_per_page, parameter[self, per_page, page, params]]: constant[ Get reminders per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list ] return[call[name[self]._get_resource_per_page, parameter[]]]
keyword[def] identifier[get_reminders_per_page] ( identifier[self] , identifier[per_page] = literal[int] , identifier[page] = literal[int] , identifier[params] = keyword[None] ): literal[string] keyword[return] identifier[self] . identifier[_get_resource_per_page] ( identifier[resource] = identifier[REMINDERS] , identifier[per_page] = identifier[per_page] , identifier[page] = identifier[page] , identifier[params] = identifier[params] )
def get_reminders_per_page(self, per_page=1000, page=1, params=None): """ Get reminders per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list """ return self._get_resource_per_page(resource=REMINDERS, per_page=per_page, page=page, params=params)
def add_cset_entries(self, ordered_rev_list, timestamp=False, number_forward=True): ''' Adds a list of revisions to the table. Assumes ordered_rev_list is an ordered based on how changesets are found in the changelog. Going forwards or backwards is dealt with by flipping the list :param ordered_cset_list: Order given from changeset log searching. :param timestamp: If false, records are kept indefinitely but if holes exist: (delete, None, delete, None) those delete's with None's around them will not be deleted. :param numbered: If True, this function will number the revision list by going forward from max(revNum), else it'll go backwards from revNum, then add X to all revnums and self.next_revnum where X is the length of ordered_rev_list :return: ''' with self.conn.transaction() as t: current_min = t.get_one("SELECT min(revnum) FROM csetlog")[0] current_max = t.get_one("SELECT max(revnum) FROM csetlog")[0] if not current_min or not current_max: current_min = 0 current_max = 0 direction = -1 start = current_min - 1 if number_forward: direction = 1 start = current_max + 1 ordered_rev_list = ordered_rev_list[::-1] insert_list = [ ( start + direction * count, rev, int(time.time()) if timestamp else -1 ) for count, rev in enumerate(ordered_rev_list) ] # In case of overlapping requests fmt_insert_list = [] for cset_entry in insert_list: tmp = self._get_one_revision(t, cset_entry) if not tmp: fmt_insert_list.append(cset_entry) for _, tmp_insert_list in jx.groupby(fmt_insert_list, size=SQL_CSET_BATCH_SIZE): t.execute( "INSERT INTO csetLog (revnum, revision, timestamp)" + " VALUES " + sql_list( quote_set((revnum, revision, timestamp)) for revnum, revision, timestamp in tmp_insert_list ) ) # Move the revision numbers forward if needed self.recompute_table_revnums() # Start a maintenance run if needed if self.check_for_maintenance(): self.maintenance_signal.go()
def function[add_cset_entries, parameter[self, ordered_rev_list, timestamp, number_forward]]: constant[ Adds a list of revisions to the table. Assumes ordered_rev_list is an ordered based on how changesets are found in the changelog. Going forwards or backwards is dealt with by flipping the list :param ordered_cset_list: Order given from changeset log searching. :param timestamp: If false, records are kept indefinitely but if holes exist: (delete, None, delete, None) those delete's with None's around them will not be deleted. :param numbered: If True, this function will number the revision list by going forward from max(revNum), else it'll go backwards from revNum, then add X to all revnums and self.next_revnum where X is the length of ordered_rev_list :return: ] with call[name[self].conn.transaction, parameter[]] begin[:] variable[current_min] assign[=] call[call[name[t].get_one, parameter[constant[SELECT min(revnum) FROM csetlog]]]][constant[0]] variable[current_max] assign[=] call[call[name[t].get_one, parameter[constant[SELECT max(revnum) FROM csetlog]]]][constant[0]] if <ast.BoolOp object at 0x7da1b2344eb0> begin[:] variable[current_min] assign[=] constant[0] variable[current_max] assign[=] constant[0] variable[direction] assign[=] <ast.UnaryOp object at 0x7da1b23451b0> variable[start] assign[=] binary_operation[name[current_min] - constant[1]] if name[number_forward] begin[:] variable[direction] assign[=] constant[1] variable[start] assign[=] binary_operation[name[current_max] + constant[1]] variable[ordered_rev_list] assign[=] call[name[ordered_rev_list]][<ast.Slice object at 0x7da1b23441c0>] variable[insert_list] assign[=] <ast.ListComp object at 0x7da1b2345480> variable[fmt_insert_list] assign[=] list[[]] for taget[name[cset_entry]] in starred[name[insert_list]] begin[:] variable[tmp] assign[=] call[name[self]._get_one_revision, parameter[name[t], name[cset_entry]]] if <ast.UnaryOp object at 0x7da1b2346ec0> begin[:] call[name[fmt_insert_list].append, parameter[name[cset_entry]]] for taget[tuple[[<ast.Name object at 0x7da1b23443d0>, <ast.Name object at 0x7da1b2345c90>]]] in starred[call[name[jx].groupby, parameter[name[fmt_insert_list]]]] begin[:] call[name[t].execute, parameter[binary_operation[binary_operation[constant[INSERT INTO csetLog (revnum, revision, timestamp)] + constant[ VALUES ]] + call[name[sql_list], parameter[<ast.GeneratorExp object at 0x7da1b23476d0>]]]]] call[name[self].recompute_table_revnums, parameter[]] if call[name[self].check_for_maintenance, parameter[]] begin[:] call[name[self].maintenance_signal.go, parameter[]]
keyword[def] identifier[add_cset_entries] ( identifier[self] , identifier[ordered_rev_list] , identifier[timestamp] = keyword[False] , identifier[number_forward] = keyword[True] ): literal[string] keyword[with] identifier[self] . identifier[conn] . identifier[transaction] () keyword[as] identifier[t] : identifier[current_min] = identifier[t] . identifier[get_one] ( literal[string] )[ literal[int] ] identifier[current_max] = identifier[t] . identifier[get_one] ( literal[string] )[ literal[int] ] keyword[if] keyword[not] identifier[current_min] keyword[or] keyword[not] identifier[current_max] : identifier[current_min] = literal[int] identifier[current_max] = literal[int] identifier[direction] =- literal[int] identifier[start] = identifier[current_min] - literal[int] keyword[if] identifier[number_forward] : identifier[direction] = literal[int] identifier[start] = identifier[current_max] + literal[int] identifier[ordered_rev_list] = identifier[ordered_rev_list] [::- literal[int] ] identifier[insert_list] =[ ( identifier[start] + identifier[direction] * identifier[count] , identifier[rev] , identifier[int] ( identifier[time] . identifier[time] ()) keyword[if] identifier[timestamp] keyword[else] - literal[int] ) keyword[for] identifier[count] , identifier[rev] keyword[in] identifier[enumerate] ( identifier[ordered_rev_list] ) ] identifier[fmt_insert_list] =[] keyword[for] identifier[cset_entry] keyword[in] identifier[insert_list] : identifier[tmp] = identifier[self] . identifier[_get_one_revision] ( identifier[t] , identifier[cset_entry] ) keyword[if] keyword[not] identifier[tmp] : identifier[fmt_insert_list] . identifier[append] ( identifier[cset_entry] ) keyword[for] identifier[_] , identifier[tmp_insert_list] keyword[in] identifier[jx] . identifier[groupby] ( identifier[fmt_insert_list] , identifier[size] = identifier[SQL_CSET_BATCH_SIZE] ): identifier[t] . identifier[execute] ( literal[string] + literal[string] + identifier[sql_list] ( identifier[quote_set] (( identifier[revnum] , identifier[revision] , identifier[timestamp] )) keyword[for] identifier[revnum] , identifier[revision] , identifier[timestamp] keyword[in] identifier[tmp_insert_list] ) ) identifier[self] . identifier[recompute_table_revnums] () keyword[if] identifier[self] . identifier[check_for_maintenance] (): identifier[self] . identifier[maintenance_signal] . identifier[go] ()
def add_cset_entries(self, ordered_rev_list, timestamp=False, number_forward=True): """ Adds a list of revisions to the table. Assumes ordered_rev_list is an ordered based on how changesets are found in the changelog. Going forwards or backwards is dealt with by flipping the list :param ordered_cset_list: Order given from changeset log searching. :param timestamp: If false, records are kept indefinitely but if holes exist: (delete, None, delete, None) those delete's with None's around them will not be deleted. :param numbered: If True, this function will number the revision list by going forward from max(revNum), else it'll go backwards from revNum, then add X to all revnums and self.next_revnum where X is the length of ordered_rev_list :return: """ with self.conn.transaction() as t: current_min = t.get_one('SELECT min(revnum) FROM csetlog')[0] current_max = t.get_one('SELECT max(revnum) FROM csetlog')[0] if not current_min or not current_max: current_min = 0 current_max = 0 # depends on [control=['if'], data=[]] direction = -1 start = current_min - 1 if number_forward: direction = 1 start = current_max + 1 ordered_rev_list = ordered_rev_list[::-1] # depends on [control=['if'], data=[]] insert_list = [(start + direction * count, rev, int(time.time()) if timestamp else -1) for (count, rev) in enumerate(ordered_rev_list)] # In case of overlapping requests fmt_insert_list = [] for cset_entry in insert_list: tmp = self._get_one_revision(t, cset_entry) if not tmp: fmt_insert_list.append(cset_entry) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cset_entry']] for (_, tmp_insert_list) in jx.groupby(fmt_insert_list, size=SQL_CSET_BATCH_SIZE): t.execute('INSERT INTO csetLog (revnum, revision, timestamp)' + ' VALUES ' + sql_list((quote_set((revnum, revision, timestamp)) for (revnum, revision, timestamp) in tmp_insert_list))) # depends on [control=['for'], data=[]] # Move the revision numbers forward if needed self.recompute_table_revnums() # depends on [control=['with'], data=['t']] # Start a maintenance run if needed if self.check_for_maintenance(): self.maintenance_signal.go() # depends on [control=['if'], data=[]]
def get_branch_mutation_matrix(self, node, full_sequence=False): """uses results from marginal ancestral inference to return a joint distribution of the sequence states at both ends of the branch. Parameters ---------- node : Phylo.clade node of the tree full_sequence : bool, optional expand the sequence to the full sequence, if false (default) the there will be one mutation matrix for each column in the reduced alignment Returns ------- numpy.array an Lxqxq stack of matrices (q=alphabet size, L (reduced)sequence length) """ pp,pc = self.marginal_branch_profile(node) # calculate pc_i [e^Qt]_ij pp_j for each site expQt = self.gtr.expQt(self._branch_length_to_gtr(node)) if len(expQt.shape)==3: # site specific model mut_matrix_stack = np.einsum('ai,aj,ija->aij', pc, pp, expQt) else: mut_matrix_stack = np.einsum('ai,aj,ij->aij', pc, pp, expQt) # normalize this distribution normalizer = mut_matrix_stack.sum(axis=2).sum(axis=1) mut_matrix_stack = np.einsum('aij,a->aij', mut_matrix_stack, 1.0/normalizer) # expand to full sequence if requested if full_sequence: return mut_matrix_stack[self.full_to_reduced_sequence_map] else: return mut_matrix_stack
def function[get_branch_mutation_matrix, parameter[self, node, full_sequence]]: constant[uses results from marginal ancestral inference to return a joint distribution of the sequence states at both ends of the branch. Parameters ---------- node : Phylo.clade node of the tree full_sequence : bool, optional expand the sequence to the full sequence, if false (default) the there will be one mutation matrix for each column in the reduced alignment Returns ------- numpy.array an Lxqxq stack of matrices (q=alphabet size, L (reduced)sequence length) ] <ast.Tuple object at 0x7da1b02a7ac0> assign[=] call[name[self].marginal_branch_profile, parameter[name[node]]] variable[expQt] assign[=] call[name[self].gtr.expQt, parameter[call[name[self]._branch_length_to_gtr, parameter[name[node]]]]] if compare[call[name[len], parameter[name[expQt].shape]] equal[==] constant[3]] begin[:] variable[mut_matrix_stack] assign[=] call[name[np].einsum, parameter[constant[ai,aj,ija->aij], name[pc], name[pp], name[expQt]]] variable[normalizer] assign[=] call[call[name[mut_matrix_stack].sum, parameter[]].sum, parameter[]] variable[mut_matrix_stack] assign[=] call[name[np].einsum, parameter[constant[aij,a->aij], name[mut_matrix_stack], binary_operation[constant[1.0] / name[normalizer]]]] if name[full_sequence] begin[:] return[call[name[mut_matrix_stack]][name[self].full_to_reduced_sequence_map]]
keyword[def] identifier[get_branch_mutation_matrix] ( identifier[self] , identifier[node] , identifier[full_sequence] = keyword[False] ): literal[string] identifier[pp] , identifier[pc] = identifier[self] . identifier[marginal_branch_profile] ( identifier[node] ) identifier[expQt] = identifier[self] . identifier[gtr] . identifier[expQt] ( identifier[self] . identifier[_branch_length_to_gtr] ( identifier[node] )) keyword[if] identifier[len] ( identifier[expQt] . identifier[shape] )== literal[int] : identifier[mut_matrix_stack] = identifier[np] . identifier[einsum] ( literal[string] , identifier[pc] , identifier[pp] , identifier[expQt] ) keyword[else] : identifier[mut_matrix_stack] = identifier[np] . identifier[einsum] ( literal[string] , identifier[pc] , identifier[pp] , identifier[expQt] ) identifier[normalizer] = identifier[mut_matrix_stack] . identifier[sum] ( identifier[axis] = literal[int] ). identifier[sum] ( identifier[axis] = literal[int] ) identifier[mut_matrix_stack] = identifier[np] . identifier[einsum] ( literal[string] , identifier[mut_matrix_stack] , literal[int] / identifier[normalizer] ) keyword[if] identifier[full_sequence] : keyword[return] identifier[mut_matrix_stack] [ identifier[self] . identifier[full_to_reduced_sequence_map] ] keyword[else] : keyword[return] identifier[mut_matrix_stack]
def get_branch_mutation_matrix(self, node, full_sequence=False): """uses results from marginal ancestral inference to return a joint distribution of the sequence states at both ends of the branch. Parameters ---------- node : Phylo.clade node of the tree full_sequence : bool, optional expand the sequence to the full sequence, if false (default) the there will be one mutation matrix for each column in the reduced alignment Returns ------- numpy.array an Lxqxq stack of matrices (q=alphabet size, L (reduced)sequence length) """ (pp, pc) = self.marginal_branch_profile(node) # calculate pc_i [e^Qt]_ij pp_j for each site expQt = self.gtr.expQt(self._branch_length_to_gtr(node)) if len(expQt.shape) == 3: # site specific model mut_matrix_stack = np.einsum('ai,aj,ija->aij', pc, pp, expQt) # depends on [control=['if'], data=[]] else: mut_matrix_stack = np.einsum('ai,aj,ij->aij', pc, pp, expQt) # normalize this distribution normalizer = mut_matrix_stack.sum(axis=2).sum(axis=1) mut_matrix_stack = np.einsum('aij,a->aij', mut_matrix_stack, 1.0 / normalizer) # expand to full sequence if requested if full_sequence: return mut_matrix_stack[self.full_to_reduced_sequence_map] # depends on [control=['if'], data=[]] else: return mut_matrix_stack
def extractClips(self, specsFilePathOrStr, outputDir=None, zipOutput=False): """Extract clips according to the specification file or string. Arguments: specsFilePathOrStr (str): Specification file path or string outputDir (str): Location of the extracted clips zipOutput (bool): Archive extracted clips' flag Specifications format: <begin:seconds> <end:seconds> [<text_metadata>] 20.5 59.75 Discussion about dogs 105.3 200.3 Cat story Notes: <text_metadata> is completely optional """ clips = SpecsParser.parse(specsFilePathOrStr) # Output to current working directory if no outputDir was provided if not outputDir: outputDir = os.path.abspath('.') zipFile = None if zipOutput: bname = os.path.splitext(os.path.basename(specsFilePathOrStr))[0] zipPath = "%s_clips.zip" % bname zipFile = zipfile.ZipFile(os.path.join(outputDir, zipPath), mode='w') for i, clip in enumerate(clips): # 13 clips => clip01.mp3, clip12.mp3... filenameFormat = 'clip%%0%dd.mp3' % len(str(len(clips))) filepath = os.path.join(outputDir, filenameFormat % (i+1)) clipData = self._extractClipData(clip) with open(filepath, 'wb') as f_out: f_out.write(clipData) if zipFile: zipFile.write(filepath, arcname=os.path.basename(filepath)) os.unlink(filepath) if zipFile: zipFile.close()
def function[extractClips, parameter[self, specsFilePathOrStr, outputDir, zipOutput]]: constant[Extract clips according to the specification file or string. Arguments: specsFilePathOrStr (str): Specification file path or string outputDir (str): Location of the extracted clips zipOutput (bool): Archive extracted clips' flag Specifications format: <begin:seconds> <end:seconds> [<text_metadata>] 20.5 59.75 Discussion about dogs 105.3 200.3 Cat story Notes: <text_metadata> is completely optional ] variable[clips] assign[=] call[name[SpecsParser].parse, parameter[name[specsFilePathOrStr]]] if <ast.UnaryOp object at 0x7da18f810310> begin[:] variable[outputDir] assign[=] call[name[os].path.abspath, parameter[constant[.]]] variable[zipFile] assign[=] constant[None] if name[zipOutput] begin[:] variable[bname] assign[=] call[call[name[os].path.splitext, parameter[call[name[os].path.basename, parameter[name[specsFilePathOrStr]]]]]][constant[0]] variable[zipPath] assign[=] binary_operation[constant[%s_clips.zip] <ast.Mod object at 0x7da2590d6920> name[bname]] variable[zipFile] assign[=] call[name[zipfile].ZipFile, parameter[call[name[os].path.join, parameter[name[outputDir], name[zipPath]]]]] for taget[tuple[[<ast.Name object at 0x7da18f810970>, <ast.Name object at 0x7da18f811780>]]] in starred[call[name[enumerate], parameter[name[clips]]]] begin[:] variable[filenameFormat] assign[=] binary_operation[constant[clip%%0%dd.mp3] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[call[name[str], parameter[call[name[len], parameter[name[clips]]]]]]]] variable[filepath] assign[=] call[name[os].path.join, parameter[name[outputDir], binary_operation[name[filenameFormat] <ast.Mod object at 0x7da2590d6920> binary_operation[name[i] + constant[1]]]]] variable[clipData] assign[=] call[name[self]._extractClipData, parameter[name[clip]]] with call[name[open], parameter[name[filepath], constant[wb]]] begin[:] call[name[f_out].write, parameter[name[clipData]]] if name[zipFile] begin[:] call[name[zipFile].write, parameter[name[filepath]]] call[name[os].unlink, parameter[name[filepath]]] if name[zipFile] begin[:] call[name[zipFile].close, parameter[]]
keyword[def] identifier[extractClips] ( identifier[self] , identifier[specsFilePathOrStr] , identifier[outputDir] = keyword[None] , identifier[zipOutput] = keyword[False] ): literal[string] identifier[clips] = identifier[SpecsParser] . identifier[parse] ( identifier[specsFilePathOrStr] ) keyword[if] keyword[not] identifier[outputDir] : identifier[outputDir] = identifier[os] . identifier[path] . identifier[abspath] ( literal[string] ) identifier[zipFile] = keyword[None] keyword[if] identifier[zipOutput] : identifier[bname] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[specsFilePathOrStr] ))[ literal[int] ] identifier[zipPath] = literal[string] % identifier[bname] identifier[zipFile] = identifier[zipfile] . identifier[ZipFile] ( identifier[os] . identifier[path] . identifier[join] ( identifier[outputDir] , identifier[zipPath] ), identifier[mode] = literal[string] ) keyword[for] identifier[i] , identifier[clip] keyword[in] identifier[enumerate] ( identifier[clips] ): identifier[filenameFormat] = literal[string] % identifier[len] ( identifier[str] ( identifier[len] ( identifier[clips] ))) identifier[filepath] = identifier[os] . identifier[path] . identifier[join] ( identifier[outputDir] , identifier[filenameFormat] %( identifier[i] + literal[int] )) identifier[clipData] = identifier[self] . identifier[_extractClipData] ( identifier[clip] ) keyword[with] identifier[open] ( identifier[filepath] , literal[string] ) keyword[as] identifier[f_out] : identifier[f_out] . identifier[write] ( identifier[clipData] ) keyword[if] identifier[zipFile] : identifier[zipFile] . identifier[write] ( identifier[filepath] , identifier[arcname] = identifier[os] . identifier[path] . identifier[basename] ( identifier[filepath] )) identifier[os] . identifier[unlink] ( identifier[filepath] ) keyword[if] identifier[zipFile] : identifier[zipFile] . identifier[close] ()
def extractClips(self, specsFilePathOrStr, outputDir=None, zipOutput=False): """Extract clips according to the specification file or string. Arguments: specsFilePathOrStr (str): Specification file path or string outputDir (str): Location of the extracted clips zipOutput (bool): Archive extracted clips' flag Specifications format: <begin:seconds> <end:seconds> [<text_metadata>] 20.5 59.75 Discussion about dogs 105.3 200.3 Cat story Notes: <text_metadata> is completely optional """ clips = SpecsParser.parse(specsFilePathOrStr) # Output to current working directory if no outputDir was provided if not outputDir: outputDir = os.path.abspath('.') # depends on [control=['if'], data=[]] zipFile = None if zipOutput: bname = os.path.splitext(os.path.basename(specsFilePathOrStr))[0] zipPath = '%s_clips.zip' % bname zipFile = zipfile.ZipFile(os.path.join(outputDir, zipPath), mode='w') # depends on [control=['if'], data=[]] for (i, clip) in enumerate(clips): # 13 clips => clip01.mp3, clip12.mp3... filenameFormat = 'clip%%0%dd.mp3' % len(str(len(clips))) filepath = os.path.join(outputDir, filenameFormat % (i + 1)) clipData = self._extractClipData(clip) with open(filepath, 'wb') as f_out: f_out.write(clipData) # depends on [control=['with'], data=['f_out']] if zipFile: zipFile.write(filepath, arcname=os.path.basename(filepath)) os.unlink(filepath) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] if zipFile: zipFile.close() # depends on [control=['if'], data=[]]
def _get_base_defaultLayer(self): """ This is the environment implementation of :attr:`BaseFont.defaultLayer`. Return the default layer as a :class:`BaseLayer` object. The layer will be normalized with :func:`normalizers.normalizeLayer`. Subclasses must override this method. """ name = self.defaultLayerName layer = self.getLayer(name) return layer
def function[_get_base_defaultLayer, parameter[self]]: constant[ This is the environment implementation of :attr:`BaseFont.defaultLayer`. Return the default layer as a :class:`BaseLayer` object. The layer will be normalized with :func:`normalizers.normalizeLayer`. Subclasses must override this method. ] variable[name] assign[=] name[self].defaultLayerName variable[layer] assign[=] call[name[self].getLayer, parameter[name[name]]] return[name[layer]]
keyword[def] identifier[_get_base_defaultLayer] ( identifier[self] ): literal[string] identifier[name] = identifier[self] . identifier[defaultLayerName] identifier[layer] = identifier[self] . identifier[getLayer] ( identifier[name] ) keyword[return] identifier[layer]
def _get_base_defaultLayer(self): """ This is the environment implementation of :attr:`BaseFont.defaultLayer`. Return the default layer as a :class:`BaseLayer` object. The layer will be normalized with :func:`normalizers.normalizeLayer`. Subclasses must override this method. """ name = self.defaultLayerName layer = self.getLayer(name) return layer
def decode(codec, stream, image): """Reads an entire image. Wraps the openjp2 library function opj_decode. Parameters ---------- codec : CODEC_TYPE The JPEG2000 codec stream : STREAM_TYPE_P The stream to decode. image : ImageType Output image structure. Raises ------ RuntimeError If the OpenJPEG library routine opj_decode fails. """ OPENJP2.opj_decode.argtypes = [CODEC_TYPE, STREAM_TYPE_P, ctypes.POINTER(ImageType)] OPENJP2.opj_decode.restype = check_error OPENJP2.opj_decode(codec, stream, image)
def function[decode, parameter[codec, stream, image]]: constant[Reads an entire image. Wraps the openjp2 library function opj_decode. Parameters ---------- codec : CODEC_TYPE The JPEG2000 codec stream : STREAM_TYPE_P The stream to decode. image : ImageType Output image structure. Raises ------ RuntimeError If the OpenJPEG library routine opj_decode fails. ] name[OPENJP2].opj_decode.argtypes assign[=] list[[<ast.Name object at 0x7da20c6c5870>, <ast.Name object at 0x7da20c6c45b0>, <ast.Call object at 0x7da20c6c4430>]] name[OPENJP2].opj_decode.restype assign[=] name[check_error] call[name[OPENJP2].opj_decode, parameter[name[codec], name[stream], name[image]]]
keyword[def] identifier[decode] ( identifier[codec] , identifier[stream] , identifier[image] ): literal[string] identifier[OPENJP2] . identifier[opj_decode] . identifier[argtypes] =[ identifier[CODEC_TYPE] , identifier[STREAM_TYPE_P] , identifier[ctypes] . identifier[POINTER] ( identifier[ImageType] )] identifier[OPENJP2] . identifier[opj_decode] . identifier[restype] = identifier[check_error] identifier[OPENJP2] . identifier[opj_decode] ( identifier[codec] , identifier[stream] , identifier[image] )
def decode(codec, stream, image): """Reads an entire image. Wraps the openjp2 library function opj_decode. Parameters ---------- codec : CODEC_TYPE The JPEG2000 codec stream : STREAM_TYPE_P The stream to decode. image : ImageType Output image structure. Raises ------ RuntimeError If the OpenJPEG library routine opj_decode fails. """ OPENJP2.opj_decode.argtypes = [CODEC_TYPE, STREAM_TYPE_P, ctypes.POINTER(ImageType)] OPENJP2.opj_decode.restype = check_error OPENJP2.opj_decode(codec, stream, image)
def read_file(self, file_name, section=None): """Read settings from specified ``section`` of config file.""" file_name, section = self.parse_file_name_and_section(file_name, section) if not os.path.isfile(file_name): raise SettingsFileNotFoundError(file_name) parser = self.make_parser() with open(file_name) as fp: parser.read_file(fp) settings = OrderedDict() if parser.has_section(section): section_dict = parser[section] self.section_found_while_reading = True else: section_dict = parser.defaults().copy() extends = section_dict.get('extends') if extends: extends = self.decode_value(extends) extends, extends_section = self.parse_file_name_and_section( extends, extender=file_name, extender_section=section) settings.update(self.read_file(extends, extends_section)) settings.update(section_dict) if not self.section_found_while_reading: raise SettingsFileSectionNotFoundError(section) return settings
def function[read_file, parameter[self, file_name, section]]: constant[Read settings from specified ``section`` of config file.] <ast.Tuple object at 0x7da204960070> assign[=] call[name[self].parse_file_name_and_section, parameter[name[file_name], name[section]]] if <ast.UnaryOp object at 0x7da204963070> begin[:] <ast.Raise object at 0x7da204960d90> variable[parser] assign[=] call[name[self].make_parser, parameter[]] with call[name[open], parameter[name[file_name]]] begin[:] call[name[parser].read_file, parameter[name[fp]]] variable[settings] assign[=] call[name[OrderedDict], parameter[]] if call[name[parser].has_section, parameter[name[section]]] begin[:] variable[section_dict] assign[=] call[name[parser]][name[section]] name[self].section_found_while_reading assign[=] constant[True] variable[extends] assign[=] call[name[section_dict].get, parameter[constant[extends]]] if name[extends] begin[:] variable[extends] assign[=] call[name[self].decode_value, parameter[name[extends]]] <ast.Tuple object at 0x7da20c6c69e0> assign[=] call[name[self].parse_file_name_and_section, parameter[name[extends]]] call[name[settings].update, parameter[call[name[self].read_file, parameter[name[extends], name[extends_section]]]]] call[name[settings].update, parameter[name[section_dict]]] if <ast.UnaryOp object at 0x7da20c6c5f90> begin[:] <ast.Raise object at 0x7da20c6c7460> return[name[settings]]
keyword[def] identifier[read_file] ( identifier[self] , identifier[file_name] , identifier[section] = keyword[None] ): literal[string] identifier[file_name] , identifier[section] = identifier[self] . identifier[parse_file_name_and_section] ( identifier[file_name] , identifier[section] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[file_name] ): keyword[raise] identifier[SettingsFileNotFoundError] ( identifier[file_name] ) identifier[parser] = identifier[self] . identifier[make_parser] () keyword[with] identifier[open] ( identifier[file_name] ) keyword[as] identifier[fp] : identifier[parser] . identifier[read_file] ( identifier[fp] ) identifier[settings] = identifier[OrderedDict] () keyword[if] identifier[parser] . identifier[has_section] ( identifier[section] ): identifier[section_dict] = identifier[parser] [ identifier[section] ] identifier[self] . identifier[section_found_while_reading] = keyword[True] keyword[else] : identifier[section_dict] = identifier[parser] . identifier[defaults] (). identifier[copy] () identifier[extends] = identifier[section_dict] . identifier[get] ( literal[string] ) keyword[if] identifier[extends] : identifier[extends] = identifier[self] . identifier[decode_value] ( identifier[extends] ) identifier[extends] , identifier[extends_section] = identifier[self] . identifier[parse_file_name_and_section] ( identifier[extends] , identifier[extender] = identifier[file_name] , identifier[extender_section] = identifier[section] ) identifier[settings] . identifier[update] ( identifier[self] . identifier[read_file] ( identifier[extends] , identifier[extends_section] )) identifier[settings] . identifier[update] ( identifier[section_dict] ) keyword[if] keyword[not] identifier[self] . identifier[section_found_while_reading] : keyword[raise] identifier[SettingsFileSectionNotFoundError] ( identifier[section] ) keyword[return] identifier[settings]
def read_file(self, file_name, section=None): """Read settings from specified ``section`` of config file.""" (file_name, section) = self.parse_file_name_and_section(file_name, section) if not os.path.isfile(file_name): raise SettingsFileNotFoundError(file_name) # depends on [control=['if'], data=[]] parser = self.make_parser() with open(file_name) as fp: parser.read_file(fp) # depends on [control=['with'], data=['fp']] settings = OrderedDict() if parser.has_section(section): section_dict = parser[section] self.section_found_while_reading = True # depends on [control=['if'], data=[]] else: section_dict = parser.defaults().copy() extends = section_dict.get('extends') if extends: extends = self.decode_value(extends) (extends, extends_section) = self.parse_file_name_and_section(extends, extender=file_name, extender_section=section) settings.update(self.read_file(extends, extends_section)) # depends on [control=['if'], data=[]] settings.update(section_dict) if not self.section_found_while_reading: raise SettingsFileSectionNotFoundError(section) # depends on [control=['if'], data=[]] return settings
def create_dataset(self, owner_id, **kwargs): """Create a new dataset :param owner_id: Username of the owner of the new dataset :type owner_id: str :param title: Dataset title (will be used to generate dataset id on creation) :type title: str :param description: Dataset description :type description: str, optional :param summary: Dataset summary markdown :type summary: str, optional :param tags: Dataset tags :type tags: list, optional :param license: Dataset license :type license: {'Public Domain', 'PDDL', 'CC-0', 'CC-BY', 'ODC-BY', 'CC-BY-SA', 'ODC-ODbL', 'CC BY-NC', 'CC BY-NC-SA', 'Other'} :param visibility: Dataset visibility :type visibility: {'OPEN', 'PRIVATE'} :param files: File name as dict, source URLs, description and labels() as properties :type files: dict, optional *Description and labels are optional* :returns: Newly created dataset key :rtype: str :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> url = 'http://www.acme.inc/example.csv' >>> api_client.create_dataset( ... 'username', title='Test dataset', visibility='PRIVATE', ... license='Public Domain', ... files={'dataset.csv':{'url': url}}) # doctest: +SKIP """ request = self.__build_dataset_obj( lambda: _swagger.DatasetCreateRequest( title=kwargs.get('title'), visibility=kwargs.get('visibility')), lambda name, url, expand_archive, description, labels: _swagger.FileCreateRequest( name=name, source=_swagger.FileSourceCreateRequest( url=url, expand_archive=expand_archive), description=description, labels=labels), kwargs) try: (_, _, headers) = self._datasets_api.create_dataset_with_http_info( owner_id, request, _return_http_data_only=False) if 'Location' in headers: return headers['Location'] except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
def function[create_dataset, parameter[self, owner_id]]: constant[Create a new dataset :param owner_id: Username of the owner of the new dataset :type owner_id: str :param title: Dataset title (will be used to generate dataset id on creation) :type title: str :param description: Dataset description :type description: str, optional :param summary: Dataset summary markdown :type summary: str, optional :param tags: Dataset tags :type tags: list, optional :param license: Dataset license :type license: {'Public Domain', 'PDDL', 'CC-0', 'CC-BY', 'ODC-BY', 'CC-BY-SA', 'ODC-ODbL', 'CC BY-NC', 'CC BY-NC-SA', 'Other'} :param visibility: Dataset visibility :type visibility: {'OPEN', 'PRIVATE'} :param files: File name as dict, source URLs, description and labels() as properties :type files: dict, optional *Description and labels are optional* :returns: Newly created dataset key :rtype: str :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> url = 'http://www.acme.inc/example.csv' >>> api_client.create_dataset( ... 'username', title='Test dataset', visibility='PRIVATE', ... license='Public Domain', ... files={'dataset.csv':{'url': url}}) # doctest: +SKIP ] variable[request] assign[=] call[name[self].__build_dataset_obj, parameter[<ast.Lambda object at 0x7da1b04b6410>, <ast.Lambda object at 0x7da1b04b4ca0>, name[kwargs]]] <ast.Try object at 0x7da1b04b50f0>
keyword[def] identifier[create_dataset] ( identifier[self] , identifier[owner_id] ,** identifier[kwargs] ): literal[string] identifier[request] = identifier[self] . identifier[__build_dataset_obj] ( keyword[lambda] : identifier[_swagger] . identifier[DatasetCreateRequest] ( identifier[title] = identifier[kwargs] . identifier[get] ( literal[string] ), identifier[visibility] = identifier[kwargs] . identifier[get] ( literal[string] )), keyword[lambda] identifier[name] , identifier[url] , identifier[expand_archive] , identifier[description] , identifier[labels] : identifier[_swagger] . identifier[FileCreateRequest] ( identifier[name] = identifier[name] , identifier[source] = identifier[_swagger] . identifier[FileSourceCreateRequest] ( identifier[url] = identifier[url] , identifier[expand_archive] = identifier[expand_archive] ), identifier[description] = identifier[description] , identifier[labels] = identifier[labels] ), identifier[kwargs] ) keyword[try] : ( identifier[_] , identifier[_] , identifier[headers] )= identifier[self] . identifier[_datasets_api] . identifier[create_dataset_with_http_info] ( identifier[owner_id] , identifier[request] , identifier[_return_http_data_only] = keyword[False] ) keyword[if] literal[string] keyword[in] identifier[headers] : keyword[return] identifier[headers] [ literal[string] ] keyword[except] identifier[_swagger] . identifier[rest] . identifier[ApiException] keyword[as] identifier[e] : keyword[raise] identifier[RestApiError] ( identifier[cause] = identifier[e] )
def create_dataset(self, owner_id, **kwargs): """Create a new dataset :param owner_id: Username of the owner of the new dataset :type owner_id: str :param title: Dataset title (will be used to generate dataset id on creation) :type title: str :param description: Dataset description :type description: str, optional :param summary: Dataset summary markdown :type summary: str, optional :param tags: Dataset tags :type tags: list, optional :param license: Dataset license :type license: {'Public Domain', 'PDDL', 'CC-0', 'CC-BY', 'ODC-BY', 'CC-BY-SA', 'ODC-ODbL', 'CC BY-NC', 'CC BY-NC-SA', 'Other'} :param visibility: Dataset visibility :type visibility: {'OPEN', 'PRIVATE'} :param files: File name as dict, source URLs, description and labels() as properties :type files: dict, optional *Description and labels are optional* :returns: Newly created dataset key :rtype: str :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> url = 'http://www.acme.inc/example.csv' >>> api_client.create_dataset( ... 'username', title='Test dataset', visibility='PRIVATE', ... license='Public Domain', ... files={'dataset.csv':{'url': url}}) # doctest: +SKIP """ request = self.__build_dataset_obj(lambda : _swagger.DatasetCreateRequest(title=kwargs.get('title'), visibility=kwargs.get('visibility')), lambda name, url, expand_archive, description, labels: _swagger.FileCreateRequest(name=name, source=_swagger.FileSourceCreateRequest(url=url, expand_archive=expand_archive), description=description, labels=labels), kwargs) try: (_, _, headers) = self._datasets_api.create_dataset_with_http_info(owner_id, request, _return_http_data_only=False) if 'Location' in headers: return headers['Location'] # depends on [control=['if'], data=['headers']] # depends on [control=['try'], data=[]] except _swagger.rest.ApiException as e: raise RestApiError(cause=e) # depends on [control=['except'], data=['e']]
def _import_templates(force=False): """Import templates from disk into database Reads all templates from disk and adds them to the database. By default, any template that has been modified by the user will not be updated. This can however be changed by setting `force` to `True`, which causes all templates to be imported regardless of status Args: force (`bool`): Force overwrite any templates with local changes made. Default: `False` Returns: `None` """ tmplpath = os.path.join(resource_filename('cloud_inquisitor', 'data'), 'templates') disk_templates = {f: os.path.join(root, f) for root, directory, files in os.walk(tmplpath) for f in files} db_templates = {tmpl.template_name: tmpl for tmpl in db.Template.find()} for name, template_file in disk_templates.items(): with open(template_file, 'r') as f: body = f.read() disk_hash = get_hash(body) if name not in db_templates: template = Template() template.template_name = name template.template = body db.session.add(template) auditlog( event='template.import', actor='init', data={ 'template_name': name, 'template': body } ) logger.info('Imported template {}'.format(name)) else: template = db_templates[name] db_hash = get_hash(template.template) if db_hash != disk_hash: if force or not db_templates[name].is_modified: template.template = body db.session.add(template) auditlog( event='template.update', actor='init', data={ 'template_name': name, 'template_diff': diff(template.template, body) } ) logger.info('Updated template {}'.format(name)) else: logger.warning( 'Updated template available for {}. Will not import as it would' ' overwrite user edited content and force is not enabled'.format(name) )
def function[_import_templates, parameter[force]]: constant[Import templates from disk into database Reads all templates from disk and adds them to the database. By default, any template that has been modified by the user will not be updated. This can however be changed by setting `force` to `True`, which causes all templates to be imported regardless of status Args: force (`bool`): Force overwrite any templates with local changes made. Default: `False` Returns: `None` ] variable[tmplpath] assign[=] call[name[os].path.join, parameter[call[name[resource_filename], parameter[constant[cloud_inquisitor], constant[data]]], constant[templates]]] variable[disk_templates] assign[=] <ast.DictComp object at 0x7da1b20967a0> variable[db_templates] assign[=] <ast.DictComp object at 0x7da1b2095540> for taget[tuple[[<ast.Name object at 0x7da1b20957b0>, <ast.Name object at 0x7da1b2095c00>]]] in starred[call[name[disk_templates].items, parameter[]]] begin[:] with call[name[open], parameter[name[template_file], constant[r]]] begin[:] variable[body] assign[=] call[name[f].read, parameter[]] variable[disk_hash] assign[=] call[name[get_hash], parameter[name[body]]] if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[db_templates]] begin[:] variable[template] assign[=] call[name[Template], parameter[]] name[template].template_name assign[=] name[name] name[template].template assign[=] name[body] call[name[db].session.add, parameter[name[template]]] call[name[auditlog], parameter[]] call[name[logger].info, parameter[call[constant[Imported template {}].format, parameter[name[name]]]]]
keyword[def] identifier[_import_templates] ( identifier[force] = keyword[False] ): literal[string] identifier[tmplpath] = identifier[os] . identifier[path] . identifier[join] ( identifier[resource_filename] ( literal[string] , literal[string] ), literal[string] ) identifier[disk_templates] ={ identifier[f] : identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[f] ) keyword[for] identifier[root] , identifier[directory] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[tmplpath] ) keyword[for] identifier[f] keyword[in] identifier[files] } identifier[db_templates] ={ identifier[tmpl] . identifier[template_name] : identifier[tmpl] keyword[for] identifier[tmpl] keyword[in] identifier[db] . identifier[Template] . identifier[find] ()} keyword[for] identifier[name] , identifier[template_file] keyword[in] identifier[disk_templates] . identifier[items] (): keyword[with] identifier[open] ( identifier[template_file] , literal[string] ) keyword[as] identifier[f] : identifier[body] = identifier[f] . identifier[read] () identifier[disk_hash] = identifier[get_hash] ( identifier[body] ) keyword[if] identifier[name] keyword[not] keyword[in] identifier[db_templates] : identifier[template] = identifier[Template] () identifier[template] . identifier[template_name] = identifier[name] identifier[template] . identifier[template] = identifier[body] identifier[db] . identifier[session] . identifier[add] ( identifier[template] ) identifier[auditlog] ( identifier[event] = literal[string] , identifier[actor] = literal[string] , identifier[data] ={ literal[string] : identifier[name] , literal[string] : identifier[body] } ) identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[name] )) keyword[else] : identifier[template] = identifier[db_templates] [ identifier[name] ] identifier[db_hash] = identifier[get_hash] ( identifier[template] . identifier[template] ) keyword[if] identifier[db_hash] != identifier[disk_hash] : keyword[if] identifier[force] keyword[or] keyword[not] identifier[db_templates] [ identifier[name] ]. identifier[is_modified] : identifier[template] . identifier[template] = identifier[body] identifier[db] . identifier[session] . identifier[add] ( identifier[template] ) identifier[auditlog] ( identifier[event] = literal[string] , identifier[actor] = literal[string] , identifier[data] ={ literal[string] : identifier[name] , literal[string] : identifier[diff] ( identifier[template] . identifier[template] , identifier[body] ) } ) identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[name] )) keyword[else] : identifier[logger] . identifier[warning] ( literal[string] literal[string] . identifier[format] ( identifier[name] ) )
def _import_templates(force=False): """Import templates from disk into database Reads all templates from disk and adds them to the database. By default, any template that has been modified by the user will not be updated. This can however be changed by setting `force` to `True`, which causes all templates to be imported regardless of status Args: force (`bool`): Force overwrite any templates with local changes made. Default: `False` Returns: `None` """ tmplpath = os.path.join(resource_filename('cloud_inquisitor', 'data'), 'templates') disk_templates = {f: os.path.join(root, f) for (root, directory, files) in os.walk(tmplpath) for f in files} db_templates = {tmpl.template_name: tmpl for tmpl in db.Template.find()} for (name, template_file) in disk_templates.items(): with open(template_file, 'r') as f: body = f.read() # depends on [control=['with'], data=['f']] disk_hash = get_hash(body) if name not in db_templates: template = Template() template.template_name = name template.template = body db.session.add(template) auditlog(event='template.import', actor='init', data={'template_name': name, 'template': body}) logger.info('Imported template {}'.format(name)) # depends on [control=['if'], data=['name']] else: template = db_templates[name] db_hash = get_hash(template.template) if db_hash != disk_hash: if force or not db_templates[name].is_modified: template.template = body db.session.add(template) auditlog(event='template.update', actor='init', data={'template_name': name, 'template_diff': diff(template.template, body)}) logger.info('Updated template {}'.format(name)) # depends on [control=['if'], data=[]] else: logger.warning('Updated template available for {}. Will not import as it would overwrite user edited content and force is not enabled'.format(name)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
def evaluate_service_changes(services, envs, repo_root, func): """ Given a dict of services, and a list of environments, apply the diff function to evaluate the differences between the target environments and the rendered templates. Sub-services (names with '.' in them) are skipped. """ for service_name, service in services.iteritems(): for env_category in service['environments']: if env_category not in get_env_categories(envs): logger.debug('Skipping not-included environment `%s` for service `%s`', env_category, service_name) continue environment = generate_test_environment_name(env_category) cf_client = get_cloudformation_client(service_name, environment) func(service_name, service, environment, cf_client, repo_root)
def function[evaluate_service_changes, parameter[services, envs, repo_root, func]]: constant[ Given a dict of services, and a list of environments, apply the diff function to evaluate the differences between the target environments and the rendered templates. Sub-services (names with '.' in them) are skipped. ] for taget[tuple[[<ast.Name object at 0x7da1b1b350c0>, <ast.Name object at 0x7da1b1b346a0>]]] in starred[call[name[services].iteritems, parameter[]]] begin[:] for taget[name[env_category]] in starred[call[name[service]][constant[environments]]] begin[:] if compare[name[env_category] <ast.NotIn object at 0x7da2590d7190> call[name[get_env_categories], parameter[name[envs]]]] begin[:] call[name[logger].debug, parameter[constant[Skipping not-included environment `%s` for service `%s`], name[env_category], name[service_name]]] continue variable[environment] assign[=] call[name[generate_test_environment_name], parameter[name[env_category]]] variable[cf_client] assign[=] call[name[get_cloudformation_client], parameter[name[service_name], name[environment]]] call[name[func], parameter[name[service_name], name[service], name[environment], name[cf_client], name[repo_root]]]
keyword[def] identifier[evaluate_service_changes] ( identifier[services] , identifier[envs] , identifier[repo_root] , identifier[func] ): literal[string] keyword[for] identifier[service_name] , identifier[service] keyword[in] identifier[services] . identifier[iteritems] (): keyword[for] identifier[env_category] keyword[in] identifier[service] [ literal[string] ]: keyword[if] identifier[env_category] keyword[not] keyword[in] identifier[get_env_categories] ( identifier[envs] ): identifier[logger] . identifier[debug] ( literal[string] , identifier[env_category] , identifier[service_name] ) keyword[continue] identifier[environment] = identifier[generate_test_environment_name] ( identifier[env_category] ) identifier[cf_client] = identifier[get_cloudformation_client] ( identifier[service_name] , identifier[environment] ) identifier[func] ( identifier[service_name] , identifier[service] , identifier[environment] , identifier[cf_client] , identifier[repo_root] )
def evaluate_service_changes(services, envs, repo_root, func): """ Given a dict of services, and a list of environments, apply the diff function to evaluate the differences between the target environments and the rendered templates. Sub-services (names with '.' in them) are skipped. """ for (service_name, service) in services.iteritems(): for env_category in service['environments']: if env_category not in get_env_categories(envs): logger.debug('Skipping not-included environment `%s` for service `%s`', env_category, service_name) continue # depends on [control=['if'], data=['env_category']] environment = generate_test_environment_name(env_category) cf_client = get_cloudformation_client(service_name, environment) func(service_name, service, environment, cf_client, repo_root) # depends on [control=['for'], data=['env_category']] # depends on [control=['for'], data=[]]
def get_function_from_bot_intent_trigger(self, event): """ For the given event build ARN and return the configured function """ intent = event.get('currentIntent') if intent: intent = intent.get('name') if intent: return self.settings.AWS_BOT_EVENT_MAPPING.get( "{}:{}".format(intent, event.get('invocationSource')) )
def function[get_function_from_bot_intent_trigger, parameter[self, event]]: constant[ For the given event build ARN and return the configured function ] variable[intent] assign[=] call[name[event].get, parameter[constant[currentIntent]]] if name[intent] begin[:] variable[intent] assign[=] call[name[intent].get, parameter[constant[name]]] if name[intent] begin[:] return[call[name[self].settings.AWS_BOT_EVENT_MAPPING.get, parameter[call[constant[{}:{}].format, parameter[name[intent], call[name[event].get, parameter[constant[invocationSource]]]]]]]]
keyword[def] identifier[get_function_from_bot_intent_trigger] ( identifier[self] , identifier[event] ): literal[string] identifier[intent] = identifier[event] . identifier[get] ( literal[string] ) keyword[if] identifier[intent] : identifier[intent] = identifier[intent] . identifier[get] ( literal[string] ) keyword[if] identifier[intent] : keyword[return] identifier[self] . identifier[settings] . identifier[AWS_BOT_EVENT_MAPPING] . identifier[get] ( literal[string] . identifier[format] ( identifier[intent] , identifier[event] . identifier[get] ( literal[string] )) )
def get_function_from_bot_intent_trigger(self, event): """ For the given event build ARN and return the configured function """ intent = event.get('currentIntent') if intent: intent = intent.get('name') if intent: return self.settings.AWS_BOT_EVENT_MAPPING.get('{}:{}'.format(intent, event.get('invocationSource'))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def add( self, method=None, # method or ``Response`` url=None, body="", adding_headers=None, *args, **kwargs ): """ A basic request: >>> responses.add(responses.GET, 'http://example.com') You can also directly pass an object which implements the ``BaseResponse`` interface: >>> responses.add(Response(...)) A JSON payload: >>> responses.add( >>> method='GET', >>> url='http://example.com', >>> json={'foo': 'bar'}, >>> ) Custom headers: >>> responses.add( >>> method='GET', >>> url='http://example.com', >>> headers={'X-Header': 'foo'}, >>> ) Strict query string matching: >>> responses.add( >>> method='GET', >>> url='http://example.com?foo=bar', >>> match_querystring=True >>> ) """ if isinstance(method, BaseResponse): self._matches.append(method) return if adding_headers is not None: kwargs.setdefault("headers", adding_headers) self._matches.append(Response(method=method, url=url, body=body, **kwargs))
def function[add, parameter[self, method, url, body, adding_headers]]: constant[ A basic request: >>> responses.add(responses.GET, 'http://example.com') You can also directly pass an object which implements the ``BaseResponse`` interface: >>> responses.add(Response(...)) A JSON payload: >>> responses.add( >>> method='GET', >>> url='http://example.com', >>> json={'foo': 'bar'}, >>> ) Custom headers: >>> responses.add( >>> method='GET', >>> url='http://example.com', >>> headers={'X-Header': 'foo'}, >>> ) Strict query string matching: >>> responses.add( >>> method='GET', >>> url='http://example.com?foo=bar', >>> match_querystring=True >>> ) ] if call[name[isinstance], parameter[name[method], name[BaseResponse]]] begin[:] call[name[self]._matches.append, parameter[name[method]]] return[None] if compare[name[adding_headers] is_not constant[None]] begin[:] call[name[kwargs].setdefault, parameter[constant[headers], name[adding_headers]]] call[name[self]._matches.append, parameter[call[name[Response], parameter[]]]]
keyword[def] identifier[add] ( identifier[self] , identifier[method] = keyword[None] , identifier[url] = keyword[None] , identifier[body] = literal[string] , identifier[adding_headers] = keyword[None] , * identifier[args] , ** identifier[kwargs] ): literal[string] keyword[if] identifier[isinstance] ( identifier[method] , identifier[BaseResponse] ): identifier[self] . identifier[_matches] . identifier[append] ( identifier[method] ) keyword[return] keyword[if] identifier[adding_headers] keyword[is] keyword[not] keyword[None] : identifier[kwargs] . identifier[setdefault] ( literal[string] , identifier[adding_headers] ) identifier[self] . identifier[_matches] . identifier[append] ( identifier[Response] ( identifier[method] = identifier[method] , identifier[url] = identifier[url] , identifier[body] = identifier[body] ,** identifier[kwargs] ))
def add(self, method=None, url=None, body='', adding_headers=None, *args, **kwargs): # method or ``Response`` "\n A basic request:\n\n >>> responses.add(responses.GET, 'http://example.com')\n\n You can also directly pass an object which implements the\n ``BaseResponse`` interface:\n\n >>> responses.add(Response(...))\n\n A JSON payload:\n\n >>> responses.add(\n >>> method='GET',\n >>> url='http://example.com',\n >>> json={'foo': 'bar'},\n >>> )\n\n Custom headers:\n\n >>> responses.add(\n >>> method='GET',\n >>> url='http://example.com',\n >>> headers={'X-Header': 'foo'},\n >>> )\n\n\n Strict query string matching:\n\n >>> responses.add(\n >>> method='GET',\n >>> url='http://example.com?foo=bar',\n >>> match_querystring=True\n >>> )\n " if isinstance(method, BaseResponse): self._matches.append(method) return # depends on [control=['if'], data=[]] if adding_headers is not None: kwargs.setdefault('headers', adding_headers) # depends on [control=['if'], data=['adding_headers']] self._matches.append(Response(method=method, url=url, body=body, **kwargs))
def check_dependencies(dependencies, module): """ Ensure dependencies of a module are listed in settings.INSTALLED_APPS :dependencies string | list: list of dependencies to check :module string: string representing the path to the current app """ if type(dependencies) == str: dependencies = [dependencies] elif type(dependencies) != list: raise TypeError('dependencies argument must be of type list or string') for dependency in dependencies: if dependency not in settings.INSTALLED_APPS: raise DependencyError('%s depends on %s, which should be in settings.INSTALLED_APPS' % (module, dependency))
def function[check_dependencies, parameter[dependencies, module]]: constant[ Ensure dependencies of a module are listed in settings.INSTALLED_APPS :dependencies string | list: list of dependencies to check :module string: string representing the path to the current app ] if compare[call[name[type], parameter[name[dependencies]]] equal[==] name[str]] begin[:] variable[dependencies] assign[=] list[[<ast.Name object at 0x7da20c6e4c10>]] for taget[name[dependency]] in starred[name[dependencies]] begin[:] if compare[name[dependency] <ast.NotIn object at 0x7da2590d7190> name[settings].INSTALLED_APPS] begin[:] <ast.Raise object at 0x7da20c6e4a30>
keyword[def] identifier[check_dependencies] ( identifier[dependencies] , identifier[module] ): literal[string] keyword[if] identifier[type] ( identifier[dependencies] )== identifier[str] : identifier[dependencies] =[ identifier[dependencies] ] keyword[elif] identifier[type] ( identifier[dependencies] )!= identifier[list] : keyword[raise] identifier[TypeError] ( literal[string] ) keyword[for] identifier[dependency] keyword[in] identifier[dependencies] : keyword[if] identifier[dependency] keyword[not] keyword[in] identifier[settings] . identifier[INSTALLED_APPS] : keyword[raise] identifier[DependencyError] ( literal[string] %( identifier[module] , identifier[dependency] ))
def check_dependencies(dependencies, module): """ Ensure dependencies of a module are listed in settings.INSTALLED_APPS :dependencies string | list: list of dependencies to check :module string: string representing the path to the current app """ if type(dependencies) == str: dependencies = [dependencies] # depends on [control=['if'], data=[]] elif type(dependencies) != list: raise TypeError('dependencies argument must be of type list or string') # depends on [control=['if'], data=[]] for dependency in dependencies: if dependency not in settings.INSTALLED_APPS: raise DependencyError('%s depends on %s, which should be in settings.INSTALLED_APPS' % (module, dependency)) # depends on [control=['if'], data=['dependency']] # depends on [control=['for'], data=['dependency']]
def validate_svc_catalog_endpoint_data(self, expected, actual, openstack_release=None): """Validate service catalog endpoint data. Pick the correct validator for the OpenStack version. Expected data should be in the v2 format: { 'service_name1': [ { 'adminURL': adminURL, 'id': id, 'region': region. 'publicURL': publicURL, 'internalURL': internalURL }], 'service_name2': [ { 'adminURL': adminURL, 'id': id, 'region': region. 'publicURL': publicURL, 'internalURL': internalURL }], } """ validation_function = self.validate_v2_svc_catalog_endpoint_data xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') if openstack_release and openstack_release >= xenial_queens: validation_function = self.validate_v3_svc_catalog_endpoint_data expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) return validation_function(expected, actual)
def function[validate_svc_catalog_endpoint_data, parameter[self, expected, actual, openstack_release]]: constant[Validate service catalog endpoint data. Pick the correct validator for the OpenStack version. Expected data should be in the v2 format: { 'service_name1': [ { 'adminURL': adminURL, 'id': id, 'region': region. 'publicURL': publicURL, 'internalURL': internalURL }], 'service_name2': [ { 'adminURL': adminURL, 'id': id, 'region': region. 'publicURL': publicURL, 'internalURL': internalURL }], } ] variable[validation_function] assign[=] name[self].validate_v2_svc_catalog_endpoint_data variable[xenial_queens] assign[=] call[name[OPENSTACK_RELEASES_PAIRS].index, parameter[constant[xenial_queens]]] if <ast.BoolOp object at 0x7da207f03fd0> begin[:] variable[validation_function] assign[=] name[self].validate_v3_svc_catalog_endpoint_data variable[expected] assign[=] call[name[self].convert_svc_catalog_endpoint_data_to_v3, parameter[name[expected]]] return[call[name[validation_function], parameter[name[expected], name[actual]]]]
keyword[def] identifier[validate_svc_catalog_endpoint_data] ( identifier[self] , identifier[expected] , identifier[actual] , identifier[openstack_release] = keyword[None] ): literal[string] identifier[validation_function] = identifier[self] . identifier[validate_v2_svc_catalog_endpoint_data] identifier[xenial_queens] = identifier[OPENSTACK_RELEASES_PAIRS] . identifier[index] ( literal[string] ) keyword[if] identifier[openstack_release] keyword[and] identifier[openstack_release] >= identifier[xenial_queens] : identifier[validation_function] = identifier[self] . identifier[validate_v3_svc_catalog_endpoint_data] identifier[expected] = identifier[self] . identifier[convert_svc_catalog_endpoint_data_to_v3] ( identifier[expected] ) keyword[return] identifier[validation_function] ( identifier[expected] , identifier[actual] )
def validate_svc_catalog_endpoint_data(self, expected, actual, openstack_release=None): """Validate service catalog endpoint data. Pick the correct validator for the OpenStack version. Expected data should be in the v2 format: { 'service_name1': [ { 'adminURL': adminURL, 'id': id, 'region': region. 'publicURL': publicURL, 'internalURL': internalURL }], 'service_name2': [ { 'adminURL': adminURL, 'id': id, 'region': region. 'publicURL': publicURL, 'internalURL': internalURL }], } """ validation_function = self.validate_v2_svc_catalog_endpoint_data xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') if openstack_release and openstack_release >= xenial_queens: validation_function = self.validate_v3_svc_catalog_endpoint_data expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) # depends on [control=['if'], data=[]] return validation_function(expected, actual)
def walk_paths(self, base: Optional[pathlib.PurePath] = pathlib.PurePath()) \ -> Iterator[pathlib.PurePath]: """ Recursively traverse all paths inside this entity, including the entity itself. :param base: The base path to prepend to the entity name. :return: An iterator of paths. """ raise NotImplementedError()
def function[walk_paths, parameter[self, base]]: constant[ Recursively traverse all paths inside this entity, including the entity itself. :param base: The base path to prepend to the entity name. :return: An iterator of paths. ] <ast.Raise object at 0x7da1b242ae00>
keyword[def] identifier[walk_paths] ( identifier[self] , identifier[base] : identifier[Optional] [ identifier[pathlib] . identifier[PurePath] ]= identifier[pathlib] . identifier[PurePath] ())-> identifier[Iterator] [ identifier[pathlib] . identifier[PurePath] ]: literal[string] keyword[raise] identifier[NotImplementedError] ()
def walk_paths(self, base: Optional[pathlib.PurePath]=pathlib.PurePath()) -> Iterator[pathlib.PurePath]: """ Recursively traverse all paths inside this entity, including the entity itself. :param base: The base path to prepend to the entity name. :return: An iterator of paths. """ raise NotImplementedError()
def run(self, node): """ Captures the use of exclude in ModelForm Meta """ if not self.checker_applies(node): return issues = [] for body in node.body: if not isinstance(body, ast.ClassDef): continue for element in body.body: if not isinstance(element, ast.Assign): continue for target in element.targets: if target.id == 'fields' and self.is_string_dunder_all(element): issues.append( DJ07( lineno=node.lineno, col=node.col_offset, ) ) elif target.id == 'exclude': issues.append( DJ06( lineno=node.lineno, col=node.col_offset, ) ) return issues
def function[run, parameter[self, node]]: constant[ Captures the use of exclude in ModelForm Meta ] if <ast.UnaryOp object at 0x7da1b0780970> begin[:] return[None] variable[issues] assign[=] list[[]] for taget[name[body]] in starred[name[node].body] begin[:] if <ast.UnaryOp object at 0x7da18dc07100> begin[:] continue for taget[name[element]] in starred[name[body].body] begin[:] if <ast.UnaryOp object at 0x7da18dc06320> begin[:] continue for taget[name[target]] in starred[name[element].targets] begin[:] if <ast.BoolOp object at 0x7da18dc079d0> begin[:] call[name[issues].append, parameter[call[name[DJ07], parameter[]]]] return[name[issues]]
keyword[def] identifier[run] ( identifier[self] , identifier[node] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[checker_applies] ( identifier[node] ): keyword[return] identifier[issues] =[] keyword[for] identifier[body] keyword[in] identifier[node] . identifier[body] : keyword[if] keyword[not] identifier[isinstance] ( identifier[body] , identifier[ast] . identifier[ClassDef] ): keyword[continue] keyword[for] identifier[element] keyword[in] identifier[body] . identifier[body] : keyword[if] keyword[not] identifier[isinstance] ( identifier[element] , identifier[ast] . identifier[Assign] ): keyword[continue] keyword[for] identifier[target] keyword[in] identifier[element] . identifier[targets] : keyword[if] identifier[target] . identifier[id] == literal[string] keyword[and] identifier[self] . identifier[is_string_dunder_all] ( identifier[element] ): identifier[issues] . identifier[append] ( identifier[DJ07] ( identifier[lineno] = identifier[node] . identifier[lineno] , identifier[col] = identifier[node] . identifier[col_offset] , ) ) keyword[elif] identifier[target] . identifier[id] == literal[string] : identifier[issues] . identifier[append] ( identifier[DJ06] ( identifier[lineno] = identifier[node] . identifier[lineno] , identifier[col] = identifier[node] . identifier[col_offset] , ) ) keyword[return] identifier[issues]
def run(self, node): """ Captures the use of exclude in ModelForm Meta """ if not self.checker_applies(node): return # depends on [control=['if'], data=[]] issues = [] for body in node.body: if not isinstance(body, ast.ClassDef): continue # depends on [control=['if'], data=[]] for element in body.body: if not isinstance(element, ast.Assign): continue # depends on [control=['if'], data=[]] for target in element.targets: if target.id == 'fields' and self.is_string_dunder_all(element): issues.append(DJ07(lineno=node.lineno, col=node.col_offset)) # depends on [control=['if'], data=[]] elif target.id == 'exclude': issues.append(DJ06(lineno=node.lineno, col=node.col_offset)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['target']] # depends on [control=['for'], data=['element']] # depends on [control=['for'], data=['body']] return issues
def print_usage(actions): """Print the usage information. (Help screen)""" actions = actions.items() actions.sort() print('usage: %s <action> [<options>]' % basename(sys.argv[0])) print(' %s --help' % basename(sys.argv[0])) print() print('actions:') for name, (func, doc, arguments) in actions: print(' %s:' % name) for line in doc.splitlines(): print(' %s' % line) if arguments: print() for arg, shortcut, default, argtype in arguments: if isinstance(default, bool): print(' %s' % ( (shortcut and '-%s, ' % shortcut or '') + '--' + arg )) else: print(' %-30s%-10s%s' % ( (shortcut and '-%s, ' % shortcut or '') + '--' + arg, argtype, default )) print()
def function[print_usage, parameter[actions]]: constant[Print the usage information. (Help screen)] variable[actions] assign[=] call[name[actions].items, parameter[]] call[name[actions].sort, parameter[]] call[name[print], parameter[binary_operation[constant[usage: %s <action> [<options>]] <ast.Mod object at 0x7da2590d6920> call[name[basename], parameter[call[name[sys].argv][constant[0]]]]]]] call[name[print], parameter[binary_operation[constant[ %s --help] <ast.Mod object at 0x7da2590d6920> call[name[basename], parameter[call[name[sys].argv][constant[0]]]]]]] call[name[print], parameter[]] call[name[print], parameter[constant[actions:]]] for taget[tuple[[<ast.Name object at 0x7da18f723700>, <ast.Tuple object at 0x7da18f720310>]]] in starred[name[actions]] begin[:] call[name[print], parameter[binary_operation[constant[ %s:] <ast.Mod object at 0x7da2590d6920> name[name]]]] for taget[name[line]] in starred[call[name[doc].splitlines, parameter[]]] begin[:] call[name[print], parameter[binary_operation[constant[ %s] <ast.Mod object at 0x7da2590d6920> name[line]]]] if name[arguments] begin[:] call[name[print], parameter[]] for taget[tuple[[<ast.Name object at 0x7da18f7203d0>, <ast.Name object at 0x7da18f722320>, <ast.Name object at 0x7da18f722110>, <ast.Name object at 0x7da18f7229e0>]]] in starred[name[arguments]] begin[:] if call[name[isinstance], parameter[name[default], name[bool]]] begin[:] call[name[print], parameter[binary_operation[constant[ %s] <ast.Mod object at 0x7da2590d6920> binary_operation[binary_operation[<ast.BoolOp object at 0x7da1b11129b0> + constant[--]] + name[arg]]]]] call[name[print], parameter[]]
keyword[def] identifier[print_usage] ( identifier[actions] ): literal[string] identifier[actions] = identifier[actions] . identifier[items] () identifier[actions] . identifier[sort] () identifier[print] ( literal[string] % identifier[basename] ( identifier[sys] . identifier[argv] [ literal[int] ])) identifier[print] ( literal[string] % identifier[basename] ( identifier[sys] . identifier[argv] [ literal[int] ])) identifier[print] () identifier[print] ( literal[string] ) keyword[for] identifier[name] ,( identifier[func] , identifier[doc] , identifier[arguments] ) keyword[in] identifier[actions] : identifier[print] ( literal[string] % identifier[name] ) keyword[for] identifier[line] keyword[in] identifier[doc] . identifier[splitlines] (): identifier[print] ( literal[string] % identifier[line] ) keyword[if] identifier[arguments] : identifier[print] () keyword[for] identifier[arg] , identifier[shortcut] , identifier[default] , identifier[argtype] keyword[in] identifier[arguments] : keyword[if] identifier[isinstance] ( identifier[default] , identifier[bool] ): identifier[print] ( literal[string] %( ( identifier[shortcut] keyword[and] literal[string] % identifier[shortcut] keyword[or] literal[string] )+ literal[string] + identifier[arg] )) keyword[else] : identifier[print] ( literal[string] %( ( identifier[shortcut] keyword[and] literal[string] % identifier[shortcut] keyword[or] literal[string] )+ literal[string] + identifier[arg] , identifier[argtype] , identifier[default] )) identifier[print] ()
def print_usage(actions): """Print the usage information. (Help screen)""" actions = actions.items() actions.sort() print('usage: %s <action> [<options>]' % basename(sys.argv[0])) print(' %s --help' % basename(sys.argv[0])) print() print('actions:') for (name, (func, doc, arguments)) in actions: print(' %s:' % name) for line in doc.splitlines(): print(' %s' % line) # depends on [control=['for'], data=['line']] if arguments: print() # depends on [control=['if'], data=[]] for (arg, shortcut, default, argtype) in arguments: if isinstance(default, bool): print(' %s' % ((shortcut and '-%s, ' % shortcut or '') + '--' + arg)) # depends on [control=['if'], data=[]] else: print(' %-30s%-10s%s' % ((shortcut and '-%s, ' % shortcut or '') + '--' + arg, argtype, default)) # depends on [control=['for'], data=[]] print() # depends on [control=['for'], data=[]]
def _symbol_bottom_simple(x, model_hparams, vocab_size, name, reuse): """Bottom transformation for symbols.""" with tf.variable_scope(name, reuse=reuse): # Ensure the inputs are 3-D if len(x.get_shape()) == 4: x = tf.squeeze(x, axis=3) while len(x.get_shape()) < 3: x = tf.expand_dims(x, axis=-1) var = get_weights(model_hparams, vocab_size) x = common_layers.dropout_no_scaling( x, 1.0 - model_hparams.symbol_dropout) ret = common_layers.gather(var, x) if model_hparams.multiply_embedding_mode == "sqrt_depth": ret *= model_hparams.hidden_size**0.5 ret *= tf.expand_dims( common_layers.cast_like(tf.not_equal(x, 0), ret), -1) return ret
def function[_symbol_bottom_simple, parameter[x, model_hparams, vocab_size, name, reuse]]: constant[Bottom transformation for symbols.] with call[name[tf].variable_scope, parameter[name[name]]] begin[:] if compare[call[name[len], parameter[call[name[x].get_shape, parameter[]]]] equal[==] constant[4]] begin[:] variable[x] assign[=] call[name[tf].squeeze, parameter[name[x]]] while compare[call[name[len], parameter[call[name[x].get_shape, parameter[]]]] less[<] constant[3]] begin[:] variable[x] assign[=] call[name[tf].expand_dims, parameter[name[x]]] variable[var] assign[=] call[name[get_weights], parameter[name[model_hparams], name[vocab_size]]] variable[x] assign[=] call[name[common_layers].dropout_no_scaling, parameter[name[x], binary_operation[constant[1.0] - name[model_hparams].symbol_dropout]]] variable[ret] assign[=] call[name[common_layers].gather, parameter[name[var], name[x]]] if compare[name[model_hparams].multiply_embedding_mode equal[==] constant[sqrt_depth]] begin[:] <ast.AugAssign object at 0x7da204622380> <ast.AugAssign object at 0x7da204622b90> return[name[ret]]
keyword[def] identifier[_symbol_bottom_simple] ( identifier[x] , identifier[model_hparams] , identifier[vocab_size] , identifier[name] , identifier[reuse] ): literal[string] keyword[with] identifier[tf] . identifier[variable_scope] ( identifier[name] , identifier[reuse] = identifier[reuse] ): keyword[if] identifier[len] ( identifier[x] . identifier[get_shape] ())== literal[int] : identifier[x] = identifier[tf] . identifier[squeeze] ( identifier[x] , identifier[axis] = literal[int] ) keyword[while] identifier[len] ( identifier[x] . identifier[get_shape] ())< literal[int] : identifier[x] = identifier[tf] . identifier[expand_dims] ( identifier[x] , identifier[axis] =- literal[int] ) identifier[var] = identifier[get_weights] ( identifier[model_hparams] , identifier[vocab_size] ) identifier[x] = identifier[common_layers] . identifier[dropout_no_scaling] ( identifier[x] , literal[int] - identifier[model_hparams] . identifier[symbol_dropout] ) identifier[ret] = identifier[common_layers] . identifier[gather] ( identifier[var] , identifier[x] ) keyword[if] identifier[model_hparams] . identifier[multiply_embedding_mode] == literal[string] : identifier[ret] *= identifier[model_hparams] . identifier[hidden_size] ** literal[int] identifier[ret] *= identifier[tf] . identifier[expand_dims] ( identifier[common_layers] . identifier[cast_like] ( identifier[tf] . identifier[not_equal] ( identifier[x] , literal[int] ), identifier[ret] ),- literal[int] ) keyword[return] identifier[ret]
def _symbol_bottom_simple(x, model_hparams, vocab_size, name, reuse): """Bottom transformation for symbols.""" with tf.variable_scope(name, reuse=reuse): # Ensure the inputs are 3-D if len(x.get_shape()) == 4: x = tf.squeeze(x, axis=3) # depends on [control=['if'], data=[]] while len(x.get_shape()) < 3: x = tf.expand_dims(x, axis=-1) # depends on [control=['while'], data=[]] var = get_weights(model_hparams, vocab_size) x = common_layers.dropout_no_scaling(x, 1.0 - model_hparams.symbol_dropout) ret = common_layers.gather(var, x) if model_hparams.multiply_embedding_mode == 'sqrt_depth': ret *= model_hparams.hidden_size ** 0.5 # depends on [control=['if'], data=[]] ret *= tf.expand_dims(common_layers.cast_like(tf.not_equal(x, 0), ret), -1) return ret # depends on [control=['with'], data=[]]
def get_constituents(self, index_ticker, date=None, only_list=False): """ Get a list of all constituents of a given index. index_ticker - Datastream ticker for index date - date for which list should be retrieved (if None then list of present constituents is retrieved) only_list - request only list of symbols. By default the method retrieves many extra fields with information (various mnemonics and codes). This might pose some problems for large indices like Russel-3000. If only_list=True, then only the list of symbols and names are retrieved. """ if date is not None: str_date = pd.to_datetime(date).strftime('%m%y') else: str_date = '' # Note: ~XREF is equal to the following large request # ~REP~=DSCD,EXMNEM,GEOG,GEOGC,IBTKR,INDC,INDG,INDM,INDX,INDXEG,INDXFS,INDXL, # INDXS,ISIN,ISINID,LOC,MNEM,NAME,SECD,TYPE fields = '~REP~=NAME' if only_list else '~XREF' query = 'L' + index_ticker + str_date + fields raw = self.request(query) res, metadata = self.parse_record_static(raw) return res
def function[get_constituents, parameter[self, index_ticker, date, only_list]]: constant[ Get a list of all constituents of a given index. index_ticker - Datastream ticker for index date - date for which list should be retrieved (if None then list of present constituents is retrieved) only_list - request only list of symbols. By default the method retrieves many extra fields with information (various mnemonics and codes). This might pose some problems for large indices like Russel-3000. If only_list=True, then only the list of symbols and names are retrieved. ] if compare[name[date] is_not constant[None]] begin[:] variable[str_date] assign[=] call[call[name[pd].to_datetime, parameter[name[date]]].strftime, parameter[constant[%m%y]]] variable[fields] assign[=] <ast.IfExp object at 0x7da1b0d8a500> variable[query] assign[=] binary_operation[binary_operation[binary_operation[constant[L] + name[index_ticker]] + name[str_date]] + name[fields]] variable[raw] assign[=] call[name[self].request, parameter[name[query]]] <ast.Tuple object at 0x7da1b0d88e80> assign[=] call[name[self].parse_record_static, parameter[name[raw]]] return[name[res]]
keyword[def] identifier[get_constituents] ( identifier[self] , identifier[index_ticker] , identifier[date] = keyword[None] , identifier[only_list] = keyword[False] ): literal[string] keyword[if] identifier[date] keyword[is] keyword[not] keyword[None] : identifier[str_date] = identifier[pd] . identifier[to_datetime] ( identifier[date] ). identifier[strftime] ( literal[string] ) keyword[else] : identifier[str_date] = literal[string] identifier[fields] = literal[string] keyword[if] identifier[only_list] keyword[else] literal[string] identifier[query] = literal[string] + identifier[index_ticker] + identifier[str_date] + identifier[fields] identifier[raw] = identifier[self] . identifier[request] ( identifier[query] ) identifier[res] , identifier[metadata] = identifier[self] . identifier[parse_record_static] ( identifier[raw] ) keyword[return] identifier[res]
def get_constituents(self, index_ticker, date=None, only_list=False): """ Get a list of all constituents of a given index. index_ticker - Datastream ticker for index date - date for which list should be retrieved (if None then list of present constituents is retrieved) only_list - request only list of symbols. By default the method retrieves many extra fields with information (various mnemonics and codes). This might pose some problems for large indices like Russel-3000. If only_list=True, then only the list of symbols and names are retrieved. """ if date is not None: str_date = pd.to_datetime(date).strftime('%m%y') # depends on [control=['if'], data=['date']] else: str_date = '' # Note: ~XREF is equal to the following large request # ~REP~=DSCD,EXMNEM,GEOG,GEOGC,IBTKR,INDC,INDG,INDM,INDX,INDXEG,INDXFS,INDXL, # INDXS,ISIN,ISINID,LOC,MNEM,NAME,SECD,TYPE fields = '~REP~=NAME' if only_list else '~XREF' query = 'L' + index_ticker + str_date + fields raw = self.request(query) (res, metadata) = self.parse_record_static(raw) return res
def rouge_2_fscore(predictions, labels, **unused_kwargs): """ROUGE-2 F1 score computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: rouge2_fscore: approx rouge-2 f1 score. """ outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) # Convert the outputs and labels to a [batch_size, input_length] tensor. outputs = tf.squeeze(outputs, axis=[-1, -2]) labels = tf.squeeze(labels, axis=[-1, -2]) rouge_2_f_score = tf.py_func(rouge_n, (outputs, labels), tf.float32) return rouge_2_f_score, tf.constant(1.0)
def function[rouge_2_fscore, parameter[predictions, labels]]: constant[ROUGE-2 F1 score computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: rouge2_fscore: approx rouge-2 f1 score. ] variable[outputs] assign[=] call[name[tf].to_int32, parameter[call[name[tf].argmax, parameter[name[predictions]]]]] variable[outputs] assign[=] call[name[tf].squeeze, parameter[name[outputs]]] variable[labels] assign[=] call[name[tf].squeeze, parameter[name[labels]]] variable[rouge_2_f_score] assign[=] call[name[tf].py_func, parameter[name[rouge_n], tuple[[<ast.Name object at 0x7da20c6e52a0>, <ast.Name object at 0x7da20c6e7df0>]], name[tf].float32]] return[tuple[[<ast.Name object at 0x7da20c6e6860>, <ast.Call object at 0x7da20c6e46a0>]]]
keyword[def] identifier[rouge_2_fscore] ( identifier[predictions] , identifier[labels] ,** identifier[unused_kwargs] ): literal[string] identifier[outputs] = identifier[tf] . identifier[to_int32] ( identifier[tf] . identifier[argmax] ( identifier[predictions] , identifier[axis] =- literal[int] )) identifier[outputs] = identifier[tf] . identifier[squeeze] ( identifier[outputs] , identifier[axis] =[- literal[int] ,- literal[int] ]) identifier[labels] = identifier[tf] . identifier[squeeze] ( identifier[labels] , identifier[axis] =[- literal[int] ,- literal[int] ]) identifier[rouge_2_f_score] = identifier[tf] . identifier[py_func] ( identifier[rouge_n] ,( identifier[outputs] , identifier[labels] ), identifier[tf] . identifier[float32] ) keyword[return] identifier[rouge_2_f_score] , identifier[tf] . identifier[constant] ( literal[int] )
def rouge_2_fscore(predictions, labels, **unused_kwargs): """ROUGE-2 F1 score computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: rouge2_fscore: approx rouge-2 f1 score. """ outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) # Convert the outputs and labels to a [batch_size, input_length] tensor. outputs = tf.squeeze(outputs, axis=[-1, -2]) labels = tf.squeeze(labels, axis=[-1, -2]) rouge_2_f_score = tf.py_func(rouge_n, (outputs, labels), tf.float32) return (rouge_2_f_score, tf.constant(1.0))
def asyncWrite(self): """ A function to write files asynchronously to the job store such that subsequent jobs are not delayed by a long write operation. """ try: while True: try: # Block for up to two seconds waiting for a file args = self.queue.get(timeout=2) except Empty: # Check if termination event is signaled # (set in the event of an exception in the worker) if self._terminateEvent.isSet(): raise RuntimeError("The termination flag is set, exiting") continue # Normal termination condition is getting None from queue if args is None: break inputFileHandle, jobStoreFileID = args cachedFileName = self.encodedFileID(jobStoreFileID) # Ensure that the harbinger exists in the cache directory and that the PID # matches that of this writing thread. # If asyncWrite is ported to subprocesses instead of threads in the future, # insert logic here to securely overwrite the harbinger file. harbingerFile = self.HarbingerFile(self, cachedFileName=cachedFileName) assert harbingerFile.exists() assert harbingerFile.read() == int(os.getpid()) # We pass in a fileHandle, rather than the file-name, in case # the file itself is deleted. The fileHandle itself should persist # while we maintain the open file handle with self.jobStore.updateFileStream(jobStoreFileID) as outputFileHandle: shutil.copyfileobj(inputFileHandle, outputFileHandle) inputFileHandle.close() # Remove the file from the lock files with self._pendingFileWritesLock: self._pendingFileWrites.remove(jobStoreFileID) # Remove the harbinger file harbingerFile.delete() except: self._terminateEvent.set() raise
def function[asyncWrite, parameter[self]]: constant[ A function to write files asynchronously to the job store such that subsequent jobs are not delayed by a long write operation. ] <ast.Try object at 0x7da20c6a9090>
keyword[def] identifier[asyncWrite] ( identifier[self] ): literal[string] keyword[try] : keyword[while] keyword[True] : keyword[try] : identifier[args] = identifier[self] . identifier[queue] . identifier[get] ( identifier[timeout] = literal[int] ) keyword[except] identifier[Empty] : keyword[if] identifier[self] . identifier[_terminateEvent] . identifier[isSet] (): keyword[raise] identifier[RuntimeError] ( literal[string] ) keyword[continue] keyword[if] identifier[args] keyword[is] keyword[None] : keyword[break] identifier[inputFileHandle] , identifier[jobStoreFileID] = identifier[args] identifier[cachedFileName] = identifier[self] . identifier[encodedFileID] ( identifier[jobStoreFileID] ) identifier[harbingerFile] = identifier[self] . identifier[HarbingerFile] ( identifier[self] , identifier[cachedFileName] = identifier[cachedFileName] ) keyword[assert] identifier[harbingerFile] . identifier[exists] () keyword[assert] identifier[harbingerFile] . identifier[read] ()== identifier[int] ( identifier[os] . identifier[getpid] ()) keyword[with] identifier[self] . identifier[jobStore] . identifier[updateFileStream] ( identifier[jobStoreFileID] ) keyword[as] identifier[outputFileHandle] : identifier[shutil] . identifier[copyfileobj] ( identifier[inputFileHandle] , identifier[outputFileHandle] ) identifier[inputFileHandle] . identifier[close] () keyword[with] identifier[self] . identifier[_pendingFileWritesLock] : identifier[self] . identifier[_pendingFileWrites] . identifier[remove] ( identifier[jobStoreFileID] ) identifier[harbingerFile] . identifier[delete] () keyword[except] : identifier[self] . identifier[_terminateEvent] . identifier[set] () keyword[raise]
def asyncWrite(self): """ A function to write files asynchronously to the job store such that subsequent jobs are not delayed by a long write operation. """ try: while True: try: # Block for up to two seconds waiting for a file args = self.queue.get(timeout=2) # depends on [control=['try'], data=[]] except Empty: # Check if termination event is signaled # (set in the event of an exception in the worker) if self._terminateEvent.isSet(): raise RuntimeError('The termination flag is set, exiting') # depends on [control=['if'], data=[]] continue # depends on [control=['except'], data=[]] # Normal termination condition is getting None from queue if args is None: break # depends on [control=['if'], data=[]] (inputFileHandle, jobStoreFileID) = args cachedFileName = self.encodedFileID(jobStoreFileID) # Ensure that the harbinger exists in the cache directory and that the PID # matches that of this writing thread. # If asyncWrite is ported to subprocesses instead of threads in the future, # insert logic here to securely overwrite the harbinger file. harbingerFile = self.HarbingerFile(self, cachedFileName=cachedFileName) assert harbingerFile.exists() assert harbingerFile.read() == int(os.getpid()) # We pass in a fileHandle, rather than the file-name, in case # the file itself is deleted. The fileHandle itself should persist # while we maintain the open file handle with self.jobStore.updateFileStream(jobStoreFileID) as outputFileHandle: shutil.copyfileobj(inputFileHandle, outputFileHandle) # depends on [control=['with'], data=['outputFileHandle']] inputFileHandle.close() # Remove the file from the lock files with self._pendingFileWritesLock: self._pendingFileWrites.remove(jobStoreFileID) # depends on [control=['with'], data=[]] # Remove the harbinger file harbingerFile.delete() # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]] except: self._terminateEvent.set() raise # depends on [control=['except'], data=[]]
def gru(name, input, state, kernel_r, kernel_u, kernel_c, bias_r, bias_u, bias_c, new_state, number_of_gates = 2): ''' - zt = f(Xt*Wz + Ht_1*Rz + Wbz + Rbz) - rt = f(Xt*Wr + Ht_1*Rr + Wbr + Rbr) - ht = g(Xt*Wh + (rt . Ht_1)*Rh + Rbh + Wbh) - Ht = (1-zt).ht + zt.Ht_1 ''' nn = Build(name) inputs = nn.concat(input, state) u = nn.sigmoid(nn.mad(inputs, kernel_u, bias_u)) r = nn.sigmoid(nn.mad(inputs, kernel_r, bias_r)) r_state = nn.mul(r, state) c = nn.tanh(nn.mad(kernel=kernel_c, bias=bias_c, x=nn.concat(input, r_state))) # new_h = u' * state + (1 - u') * c' # = u' * state + c' - u' * c' # u' * state + c' nn.add(nn.mul(u, state), c) # - u' * c' nn.sub(nn._, nn.mul(u, c), out=new_state) return nn.layers;
def function[gru, parameter[name, input, state, kernel_r, kernel_u, kernel_c, bias_r, bias_u, bias_c, new_state, number_of_gates]]: constant[ - zt = f(Xt*Wz + Ht_1*Rz + Wbz + Rbz) - rt = f(Xt*Wr + Ht_1*Rr + Wbr + Rbr) - ht = g(Xt*Wh + (rt . Ht_1)*Rh + Rbh + Wbh) - Ht = (1-zt).ht + zt.Ht_1 ] variable[nn] assign[=] call[name[Build], parameter[name[name]]] variable[inputs] assign[=] call[name[nn].concat, parameter[name[input], name[state]]] variable[u] assign[=] call[name[nn].sigmoid, parameter[call[name[nn].mad, parameter[name[inputs], name[kernel_u], name[bias_u]]]]] variable[r] assign[=] call[name[nn].sigmoid, parameter[call[name[nn].mad, parameter[name[inputs], name[kernel_r], name[bias_r]]]]] variable[r_state] assign[=] call[name[nn].mul, parameter[name[r], name[state]]] variable[c] assign[=] call[name[nn].tanh, parameter[call[name[nn].mad, parameter[]]]] call[name[nn].add, parameter[call[name[nn].mul, parameter[name[u], name[state]]], name[c]]] call[name[nn].sub, parameter[name[nn]._, call[name[nn].mul, parameter[name[u], name[c]]]]] return[name[nn].layers]
keyword[def] identifier[gru] ( identifier[name] , identifier[input] , identifier[state] , identifier[kernel_r] , identifier[kernel_u] , identifier[kernel_c] , identifier[bias_r] , identifier[bias_u] , identifier[bias_c] , identifier[new_state] , identifier[number_of_gates] = literal[int] ): literal[string] identifier[nn] = identifier[Build] ( identifier[name] ) identifier[inputs] = identifier[nn] . identifier[concat] ( identifier[input] , identifier[state] ) identifier[u] = identifier[nn] . identifier[sigmoid] ( identifier[nn] . identifier[mad] ( identifier[inputs] , identifier[kernel_u] , identifier[bias_u] )) identifier[r] = identifier[nn] . identifier[sigmoid] ( identifier[nn] . identifier[mad] ( identifier[inputs] , identifier[kernel_r] , identifier[bias_r] )) identifier[r_state] = identifier[nn] . identifier[mul] ( identifier[r] , identifier[state] ) identifier[c] = identifier[nn] . identifier[tanh] ( identifier[nn] . identifier[mad] ( identifier[kernel] = identifier[kernel_c] , identifier[bias] = identifier[bias_c] , identifier[x] = identifier[nn] . identifier[concat] ( identifier[input] , identifier[r_state] ))) identifier[nn] . identifier[add] ( identifier[nn] . identifier[mul] ( identifier[u] , identifier[state] ), identifier[c] ) identifier[nn] . identifier[sub] ( identifier[nn] . identifier[_] , identifier[nn] . identifier[mul] ( identifier[u] , identifier[c] ), identifier[out] = identifier[new_state] ) keyword[return] identifier[nn] . identifier[layers] ;
def gru(name, input, state, kernel_r, kernel_u, kernel_c, bias_r, bias_u, bias_c, new_state, number_of_gates=2): """ - zt = f(Xt*Wz + Ht_1*Rz + Wbz + Rbz) - rt = f(Xt*Wr + Ht_1*Rr + Wbr + Rbr) - ht = g(Xt*Wh + (rt . Ht_1)*Rh + Rbh + Wbh) - Ht = (1-zt).ht + zt.Ht_1 """ nn = Build(name) inputs = nn.concat(input, state) u = nn.sigmoid(nn.mad(inputs, kernel_u, bias_u)) r = nn.sigmoid(nn.mad(inputs, kernel_r, bias_r)) r_state = nn.mul(r, state) c = nn.tanh(nn.mad(kernel=kernel_c, bias=bias_c, x=nn.concat(input, r_state))) # new_h = u' * state + (1 - u') * c' # = u' * state + c' - u' * c' # u' * state + c' nn.add(nn.mul(u, state), c) # - u' * c' nn.sub(nn._, nn.mul(u, c), out=new_state) return nn.layers
def configure_app(**kwargs): """Builds up the settings using the same method as logan""" sys_args = sys.argv args, command, command_args = parse_args(sys_args[1:]) parser = OptionParser() parser.add_option('--config', metavar='CONFIG') (options, logan_args) = parser.parse_args(args) config_path = options.config logan_configure(config_path=config_path, **kwargs)
def function[configure_app, parameter[]]: constant[Builds up the settings using the same method as logan] variable[sys_args] assign[=] name[sys].argv <ast.Tuple object at 0x7da20c993460> assign[=] call[name[parse_args], parameter[call[name[sys_args]][<ast.Slice object at 0x7da20c990e50>]]] variable[parser] assign[=] call[name[OptionParser], parameter[]] call[name[parser].add_option, parameter[constant[--config]]] <ast.Tuple object at 0x7da20c991750> assign[=] call[name[parser].parse_args, parameter[name[args]]] variable[config_path] assign[=] name[options].config call[name[logan_configure], parameter[]]
keyword[def] identifier[configure_app] (** identifier[kwargs] ): literal[string] identifier[sys_args] = identifier[sys] . identifier[argv] identifier[args] , identifier[command] , identifier[command_args] = identifier[parse_args] ( identifier[sys_args] [ literal[int] :]) identifier[parser] = identifier[OptionParser] () identifier[parser] . identifier[add_option] ( literal[string] , identifier[metavar] = literal[string] ) ( identifier[options] , identifier[logan_args] )= identifier[parser] . identifier[parse_args] ( identifier[args] ) identifier[config_path] = identifier[options] . identifier[config] identifier[logan_configure] ( identifier[config_path] = identifier[config_path] ,** identifier[kwargs] )
def configure_app(**kwargs): """Builds up the settings using the same method as logan""" sys_args = sys.argv (args, command, command_args) = parse_args(sys_args[1:]) parser = OptionParser() parser.add_option('--config', metavar='CONFIG') (options, logan_args) = parser.parse_args(args) config_path = options.config logan_configure(config_path=config_path, **kwargs)
def isInstalledBuild(self): """ Determines if the Engine is an Installed Build """ sentinelFile = os.path.join(self.getEngineRoot(), 'Engine', 'Build', 'InstalledBuild.txt') return os.path.exists(sentinelFile)
def function[isInstalledBuild, parameter[self]]: constant[ Determines if the Engine is an Installed Build ] variable[sentinelFile] assign[=] call[name[os].path.join, parameter[call[name[self].getEngineRoot, parameter[]], constant[Engine], constant[Build], constant[InstalledBuild.txt]]] return[call[name[os].path.exists, parameter[name[sentinelFile]]]]
keyword[def] identifier[isInstalledBuild] ( identifier[self] ): literal[string] identifier[sentinelFile] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[getEngineRoot] (), literal[string] , literal[string] , literal[string] ) keyword[return] identifier[os] . identifier[path] . identifier[exists] ( identifier[sentinelFile] )
def isInstalledBuild(self): """ Determines if the Engine is an Installed Build """ sentinelFile = os.path.join(self.getEngineRoot(), 'Engine', 'Build', 'InstalledBuild.txt') return os.path.exists(sentinelFile)
def deploy_file(file_path, bucket): """ Uploads a file to an S3 bucket, as a public file. """ # Paths look like: # index.html # css/bootstrap.min.css logger.info("Deploying {0}".format(file_path)) # Upload the actual file to file_path k = Key(bucket) k.key = file_path try: k.set_contents_from_filename(file_path) k.set_acl('public-read') except socket.error: logger.warning("Caught socket.error while trying to upload {0}".format( file_path)) msg = "Please file an issue with alotofeffort if you see this," logger.warning(msg) logger.warning("providing as much info as you can.")
def function[deploy_file, parameter[file_path, bucket]]: constant[ Uploads a file to an S3 bucket, as a public file. ] call[name[logger].info, parameter[call[constant[Deploying {0}].format, parameter[name[file_path]]]]] variable[k] assign[=] call[name[Key], parameter[name[bucket]]] name[k].key assign[=] name[file_path] <ast.Try object at 0x7da18bcca8f0>
keyword[def] identifier[deploy_file] ( identifier[file_path] , identifier[bucket] ): literal[string] identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[file_path] )) identifier[k] = identifier[Key] ( identifier[bucket] ) identifier[k] . identifier[key] = identifier[file_path] keyword[try] : identifier[k] . identifier[set_contents_from_filename] ( identifier[file_path] ) identifier[k] . identifier[set_acl] ( literal[string] ) keyword[except] identifier[socket] . identifier[error] : identifier[logger] . identifier[warning] ( literal[string] . identifier[format] ( identifier[file_path] )) identifier[msg] = literal[string] identifier[logger] . identifier[warning] ( identifier[msg] ) identifier[logger] . identifier[warning] ( literal[string] )
def deploy_file(file_path, bucket): """ Uploads a file to an S3 bucket, as a public file. """ # Paths look like: # index.html # css/bootstrap.min.css logger.info('Deploying {0}'.format(file_path)) # Upload the actual file to file_path k = Key(bucket) k.key = file_path try: k.set_contents_from_filename(file_path) k.set_acl('public-read') # depends on [control=['try'], data=[]] except socket.error: logger.warning('Caught socket.error while trying to upload {0}'.format(file_path)) msg = 'Please file an issue with alotofeffort if you see this,' logger.warning(msg) logger.warning('providing as much info as you can.') # depends on [control=['except'], data=[]]
def approveproposal(self, proposal_ids, account=None, approver=None, **kwargs): """ Approve Proposal :param list proposal_id: Ids of the proposals :param str appprover: The account or key to use for approval (defaults to ``account``) :param str account: (optional) the account to allow access to (defaults to ``default_account``) """ from .proposal import Proposal if not account: if "default_account" in self.config: account = self.config["default_account"] if not account: raise ValueError("You need to provide an account") account = Account(account, blockchain_instance=self) is_key = approver and approver[:3] == self.prefix if not approver and not is_key: approver = account elif approver and not is_key: approver = Account(approver, blockchain_instance=self) else: approver = PublicKey(approver) if not isinstance(proposal_ids, (list, set, tuple)): proposal_ids = {proposal_ids} op = [] for proposal_id in proposal_ids: proposal = Proposal(proposal_id, blockchain_instance=self) update_dict = { "fee": {"amount": 0, "asset_id": "1.3.0"}, "fee_paying_account": account["id"], "proposal": proposal["id"], "prefix": self.prefix, } if is_key: update_dict.update({"key_approvals_to_add": [str(approver)]}) else: update_dict.update({"active_approvals_to_add": [approver["id"]]}) op.append(operations.Proposal_update(**update_dict)) if is_key: self.txbuffer.appendSigner(approver, "active") return self.finalizeOp(op, account["name"], "active", **kwargs) return self.finalizeOp(op, approver, "active", **kwargs)
def function[approveproposal, parameter[self, proposal_ids, account, approver]]: constant[ Approve Proposal :param list proposal_id: Ids of the proposals :param str appprover: The account or key to use for approval (defaults to ``account``) :param str account: (optional) the account to allow access to (defaults to ``default_account``) ] from relative_module[proposal] import module[Proposal] if <ast.UnaryOp object at 0x7da20c6c48b0> begin[:] if compare[constant[default_account] in name[self].config] begin[:] variable[account] assign[=] call[name[self].config][constant[default_account]] if <ast.UnaryOp object at 0x7da20c6c53f0> begin[:] <ast.Raise object at 0x7da20c6c6a70> variable[account] assign[=] call[name[Account], parameter[name[account]]] variable[is_key] assign[=] <ast.BoolOp object at 0x7da1b08a3b50> if <ast.BoolOp object at 0x7da1b08a1180> begin[:] variable[approver] assign[=] name[account] if <ast.UnaryOp object at 0x7da1b08a0c10> begin[:] variable[proposal_ids] assign[=] <ast.Set object at 0x7da1b08a23b0> variable[op] assign[=] list[[]] for taget[name[proposal_id]] in starred[name[proposal_ids]] begin[:] variable[proposal] assign[=] call[name[Proposal], parameter[name[proposal_id]]] variable[update_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b086fb20>, <ast.Constant object at 0x7da1b086ded0>, <ast.Constant object at 0x7da1b086e020>, <ast.Constant object at 0x7da1b086c220>], [<ast.Dict object at 0x7da1b086d450>, <ast.Subscript object at 0x7da1b086f790>, <ast.Subscript object at 0x7da1b086dcc0>, <ast.Attribute object at 0x7da1b086fd90>]] if name[is_key] begin[:] call[name[update_dict].update, parameter[dictionary[[<ast.Constant object at 0x7da1b086c7f0>], [<ast.List object at 0x7da1b086ebc0>]]]] call[name[op].append, parameter[call[name[operations].Proposal_update, parameter[]]]] if name[is_key] begin[:] call[name[self].txbuffer.appendSigner, parameter[name[approver], constant[active]]] return[call[name[self].finalizeOp, parameter[name[op], call[name[account]][constant[name]], constant[active]]]] return[call[name[self].finalizeOp, parameter[name[op], name[approver], constant[active]]]]
keyword[def] identifier[approveproposal] ( identifier[self] , identifier[proposal_ids] , identifier[account] = keyword[None] , identifier[approver] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[from] . identifier[proposal] keyword[import] identifier[Proposal] keyword[if] keyword[not] identifier[account] : keyword[if] literal[string] keyword[in] identifier[self] . identifier[config] : identifier[account] = identifier[self] . identifier[config] [ literal[string] ] keyword[if] keyword[not] identifier[account] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[account] = identifier[Account] ( identifier[account] , identifier[blockchain_instance] = identifier[self] ) identifier[is_key] = identifier[approver] keyword[and] identifier[approver] [: literal[int] ]== identifier[self] . identifier[prefix] keyword[if] keyword[not] identifier[approver] keyword[and] keyword[not] identifier[is_key] : identifier[approver] = identifier[account] keyword[elif] identifier[approver] keyword[and] keyword[not] identifier[is_key] : identifier[approver] = identifier[Account] ( identifier[approver] , identifier[blockchain_instance] = identifier[self] ) keyword[else] : identifier[approver] = identifier[PublicKey] ( identifier[approver] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[proposal_ids] ,( identifier[list] , identifier[set] , identifier[tuple] )): identifier[proposal_ids] ={ identifier[proposal_ids] } identifier[op] =[] keyword[for] identifier[proposal_id] keyword[in] identifier[proposal_ids] : identifier[proposal] = identifier[Proposal] ( identifier[proposal_id] , identifier[blockchain_instance] = identifier[self] ) identifier[update_dict] ={ literal[string] :{ literal[string] : literal[int] , literal[string] : literal[string] }, literal[string] : identifier[account] [ literal[string] ], literal[string] : identifier[proposal] [ literal[string] ], literal[string] : identifier[self] . identifier[prefix] , } keyword[if] identifier[is_key] : identifier[update_dict] . identifier[update] ({ literal[string] :[ identifier[str] ( identifier[approver] )]}) keyword[else] : identifier[update_dict] . identifier[update] ({ literal[string] :[ identifier[approver] [ literal[string] ]]}) identifier[op] . identifier[append] ( identifier[operations] . identifier[Proposal_update] (** identifier[update_dict] )) keyword[if] identifier[is_key] : identifier[self] . identifier[txbuffer] . identifier[appendSigner] ( identifier[approver] , literal[string] ) keyword[return] identifier[self] . identifier[finalizeOp] ( identifier[op] , identifier[account] [ literal[string] ], literal[string] ,** identifier[kwargs] ) keyword[return] identifier[self] . identifier[finalizeOp] ( identifier[op] , identifier[approver] , literal[string] ,** identifier[kwargs] )
def approveproposal(self, proposal_ids, account=None, approver=None, **kwargs): """ Approve Proposal :param list proposal_id: Ids of the proposals :param str appprover: The account or key to use for approval (defaults to ``account``) :param str account: (optional) the account to allow access to (defaults to ``default_account``) """ from .proposal import Proposal if not account: if 'default_account' in self.config: account = self.config['default_account'] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if not account: raise ValueError('You need to provide an account') # depends on [control=['if'], data=[]] account = Account(account, blockchain_instance=self) is_key = approver and approver[:3] == self.prefix if not approver and (not is_key): approver = account # depends on [control=['if'], data=[]] elif approver and (not is_key): approver = Account(approver, blockchain_instance=self) # depends on [control=['if'], data=[]] else: approver = PublicKey(approver) if not isinstance(proposal_ids, (list, set, tuple)): proposal_ids = {proposal_ids} # depends on [control=['if'], data=[]] op = [] for proposal_id in proposal_ids: proposal = Proposal(proposal_id, blockchain_instance=self) update_dict = {'fee': {'amount': 0, 'asset_id': '1.3.0'}, 'fee_paying_account': account['id'], 'proposal': proposal['id'], 'prefix': self.prefix} if is_key: update_dict.update({'key_approvals_to_add': [str(approver)]}) # depends on [control=['if'], data=[]] else: update_dict.update({'active_approvals_to_add': [approver['id']]}) op.append(operations.Proposal_update(**update_dict)) # depends on [control=['for'], data=['proposal_id']] if is_key: self.txbuffer.appendSigner(approver, 'active') return self.finalizeOp(op, account['name'], 'active', **kwargs) # depends on [control=['if'], data=[]] return self.finalizeOp(op, approver, 'active', **kwargs)
def table(self): """Return a large string of the entire table ready to be printed to the terminal.""" ascii_table = super(UnixTable, self).table optimized = ascii_table.replace('\033(B\033(0', '') return optimized
def function[table, parameter[self]]: constant[Return a large string of the entire table ready to be printed to the terminal.] variable[ascii_table] assign[=] call[name[super], parameter[name[UnixTable], name[self]]].table variable[optimized] assign[=] call[name[ascii_table].replace, parameter[constant[(B(0], constant[]]] return[name[optimized]]
keyword[def] identifier[table] ( identifier[self] ): literal[string] identifier[ascii_table] = identifier[super] ( identifier[UnixTable] , identifier[self] ). identifier[table] identifier[optimized] = identifier[ascii_table] . identifier[replace] ( literal[string] , literal[string] ) keyword[return] identifier[optimized]
def table(self): """Return a large string of the entire table ready to be printed to the terminal.""" ascii_table = super(UnixTable, self).table optimized = ascii_table.replace('\x1b(B\x1b(0', '') return optimized
def LEVINSON(r, order=None, allow_singularity=False): r"""Levinson-Durbin recursion. Find the coefficients of a length(r)-1 order autoregressive linear process :param r: autocorrelation sequence of length N + 1 (first element being the zero-lag autocorrelation) :param order: requested order of the autoregressive coefficients. default is N. :param allow_singularity: false by default. Other implementations may be True (e.g., octave) :return: * the `N+1` autoregressive coefficients :math:`A=(1, a_1...a_N)` * the prediction errors * the `N` reflections coefficients values This algorithm solves the set of complex linear simultaneous equations using Levinson algorithm. .. math:: \bold{T}_M \left( \begin{array}{c} 1 \\ \bold{a}_M \end{array} \right) = \left( \begin{array}{c} \rho_M \\ \bold{0}_M \end{array} \right) where :math:`\bold{T}_M` is a Hermitian Toeplitz matrix with elements :math:`T_0, T_1, \dots ,T_M`. .. note:: Solving this equations by Gaussian elimination would require :math:`M^3` operations whereas the levinson algorithm requires :math:`M^2+M` additions and :math:`M^2+M` multiplications. This is equivalent to solve the following symmetric Toeplitz system of linear equations .. math:: \left( \begin{array}{cccc} r_1 & r_2^* & \dots & r_{n}^*\\ r_2 & r_1^* & \dots & r_{n-1}^*\\ \dots & \dots & \dots & \dots\\ r_n & \dots & r_2 & r_1 \end{array} \right) \left( \begin{array}{cccc} a_2\\ a_3 \\ \dots \\ a_{N+1} \end{array} \right) = \left( \begin{array}{cccc} -r_2\\ -r_3 \\ \dots \\ -r_{N+1} \end{array} \right) where :math:`r = (r_1 ... r_{N+1})` is the input autocorrelation vector, and :math:`r_i^*` denotes the complex conjugate of :math:`r_i`. The input r is typically a vector of autocorrelation coefficients where lag 0 is the first element :math:`r_1`. .. doctest:: >>> import numpy; from spectrum import LEVINSON >>> T = numpy.array([3., -2+0.5j, .7-1j]) >>> a, e, k = LEVINSON(T) """ #from numpy import isrealobj T0 = numpy.real(r[0]) T = r[1:] M = len(T) if order is None: M = len(T) else: assert order <= M, 'order must be less than size of the input data' M = order realdata = numpy.isrealobj(r) if realdata is True: A = numpy.zeros(M, dtype=float) ref = numpy.zeros(M, dtype=float) else: A = numpy.zeros(M, dtype=complex) ref = numpy.zeros(M, dtype=complex) P = T0 for k in range(0, M): save = T[k] if k == 0: temp = -save / P else: #save += sum([A[j]*T[k-j-1] for j in range(0,k)]) for j in range(0, k): save = save + A[j] * T[k-j-1] temp = -save / P if realdata: P = P * (1. - temp**2.) else: P = P * (1. - (temp.real**2+temp.imag**2)) if P <= 0 and allow_singularity==False: raise ValueError("singular matrix") A[k] = temp ref[k] = temp # save reflection coeff at each step if k == 0: continue khalf = (k+1)//2 if realdata is True: for j in range(0, khalf): kj = k-j-1 save = A[j] A[j] = save + temp * A[kj] if j != kj: A[kj] += temp*save else: for j in range(0, khalf): kj = k-j-1 save = A[j] A[j] = save + temp * A[kj].conjugate() if j != kj: A[kj] = A[kj] + temp * save.conjugate() return A, P, ref
def function[LEVINSON, parameter[r, order, allow_singularity]]: constant[Levinson-Durbin recursion. Find the coefficients of a length(r)-1 order autoregressive linear process :param r: autocorrelation sequence of length N + 1 (first element being the zero-lag autocorrelation) :param order: requested order of the autoregressive coefficients. default is N. :param allow_singularity: false by default. Other implementations may be True (e.g., octave) :return: * the `N+1` autoregressive coefficients :math:`A=(1, a_1...a_N)` * the prediction errors * the `N` reflections coefficients values This algorithm solves the set of complex linear simultaneous equations using Levinson algorithm. .. math:: \bold{T}_M \left( \begin{array}{c} 1 \\ \bold{a}_M \end{array} \right) = \left( \begin{array}{c} \rho_M \\ \bold{0}_M \end{array} \right) where :math:`\bold{T}_M` is a Hermitian Toeplitz matrix with elements :math:`T_0, T_1, \dots ,T_M`. .. note:: Solving this equations by Gaussian elimination would require :math:`M^3` operations whereas the levinson algorithm requires :math:`M^2+M` additions and :math:`M^2+M` multiplications. This is equivalent to solve the following symmetric Toeplitz system of linear equations .. math:: \left( \begin{array}{cccc} r_1 & r_2^* & \dots & r_{n}^*\\ r_2 & r_1^* & \dots & r_{n-1}^*\\ \dots & \dots & \dots & \dots\\ r_n & \dots & r_2 & r_1 \end{array} \right) \left( \begin{array}{cccc} a_2\\ a_3 \\ \dots \\ a_{N+1} \end{array} \right) = \left( \begin{array}{cccc} -r_2\\ -r_3 \\ \dots \\ -r_{N+1} \end{array} \right) where :math:`r = (r_1 ... r_{N+1})` is the input autocorrelation vector, and :math:`r_i^*` denotes the complex conjugate of :math:`r_i`. The input r is typically a vector of autocorrelation coefficients where lag 0 is the first element :math:`r_1`. .. doctest:: >>> import numpy; from spectrum import LEVINSON >>> T = numpy.array([3., -2+0.5j, .7-1j]) >>> a, e, k = LEVINSON(T) ] variable[T0] assign[=] call[name[numpy].real, parameter[call[name[r]][constant[0]]]] variable[T] assign[=] call[name[r]][<ast.Slice object at 0x7da1b015b250>] variable[M] assign[=] call[name[len], parameter[name[T]]] if compare[name[order] is constant[None]] begin[:] variable[M] assign[=] call[name[len], parameter[name[T]]] variable[realdata] assign[=] call[name[numpy].isrealobj, parameter[name[r]]] if compare[name[realdata] is constant[True]] begin[:] variable[A] assign[=] call[name[numpy].zeros, parameter[name[M]]] variable[ref] assign[=] call[name[numpy].zeros, parameter[name[M]]] variable[P] assign[=] name[T0] for taget[name[k]] in starred[call[name[range], parameter[constant[0], name[M]]]] begin[:] variable[save] assign[=] call[name[T]][name[k]] if compare[name[k] equal[==] constant[0]] begin[:] variable[temp] assign[=] binary_operation[<ast.UnaryOp object at 0x7da1b015a200> / name[P]] if name[realdata] begin[:] variable[P] assign[=] binary_operation[name[P] * binary_operation[constant[1.0] - binary_operation[name[temp] ** constant[2.0]]]] if <ast.BoolOp object at 0x7da1b0159750> begin[:] <ast.Raise object at 0x7da1b0159600> call[name[A]][name[k]] assign[=] name[temp] call[name[ref]][name[k]] assign[=] name[temp] if compare[name[k] equal[==] constant[0]] begin[:] continue variable[khalf] assign[=] binary_operation[binary_operation[name[k] + constant[1]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]] if compare[name[realdata] is constant[True]] begin[:] for taget[name[j]] in starred[call[name[range], parameter[constant[0], name[khalf]]]] begin[:] variable[kj] assign[=] binary_operation[binary_operation[name[k] - name[j]] - constant[1]] variable[save] assign[=] call[name[A]][name[j]] call[name[A]][name[j]] assign[=] binary_operation[name[save] + binary_operation[name[temp] * call[name[A]][name[kj]]]] if compare[name[j] not_equal[!=] name[kj]] begin[:] <ast.AugAssign object at 0x7da1b01c2710> return[tuple[[<ast.Name object at 0x7da1b01c2f80>, <ast.Name object at 0x7da1b01c0dc0>, <ast.Name object at 0x7da1b01c1630>]]]
keyword[def] identifier[LEVINSON] ( identifier[r] , identifier[order] = keyword[None] , identifier[allow_singularity] = keyword[False] ): literal[string] identifier[T0] = identifier[numpy] . identifier[real] ( identifier[r] [ literal[int] ]) identifier[T] = identifier[r] [ literal[int] :] identifier[M] = identifier[len] ( identifier[T] ) keyword[if] identifier[order] keyword[is] keyword[None] : identifier[M] = identifier[len] ( identifier[T] ) keyword[else] : keyword[assert] identifier[order] <= identifier[M] , literal[string] identifier[M] = identifier[order] identifier[realdata] = identifier[numpy] . identifier[isrealobj] ( identifier[r] ) keyword[if] identifier[realdata] keyword[is] keyword[True] : identifier[A] = identifier[numpy] . identifier[zeros] ( identifier[M] , identifier[dtype] = identifier[float] ) identifier[ref] = identifier[numpy] . identifier[zeros] ( identifier[M] , identifier[dtype] = identifier[float] ) keyword[else] : identifier[A] = identifier[numpy] . identifier[zeros] ( identifier[M] , identifier[dtype] = identifier[complex] ) identifier[ref] = identifier[numpy] . identifier[zeros] ( identifier[M] , identifier[dtype] = identifier[complex] ) identifier[P] = identifier[T0] keyword[for] identifier[k] keyword[in] identifier[range] ( literal[int] , identifier[M] ): identifier[save] = identifier[T] [ identifier[k] ] keyword[if] identifier[k] == literal[int] : identifier[temp] =- identifier[save] / identifier[P] keyword[else] : keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[k] ): identifier[save] = identifier[save] + identifier[A] [ identifier[j] ]* identifier[T] [ identifier[k] - identifier[j] - literal[int] ] identifier[temp] =- identifier[save] / identifier[P] keyword[if] identifier[realdata] : identifier[P] = identifier[P] *( literal[int] - identifier[temp] ** literal[int] ) keyword[else] : identifier[P] = identifier[P] *( literal[int] -( identifier[temp] . identifier[real] ** literal[int] + identifier[temp] . identifier[imag] ** literal[int] )) keyword[if] identifier[P] <= literal[int] keyword[and] identifier[allow_singularity] == keyword[False] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[A] [ identifier[k] ]= identifier[temp] identifier[ref] [ identifier[k] ]= identifier[temp] keyword[if] identifier[k] == literal[int] : keyword[continue] identifier[khalf] =( identifier[k] + literal[int] )// literal[int] keyword[if] identifier[realdata] keyword[is] keyword[True] : keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[khalf] ): identifier[kj] = identifier[k] - identifier[j] - literal[int] identifier[save] = identifier[A] [ identifier[j] ] identifier[A] [ identifier[j] ]= identifier[save] + identifier[temp] * identifier[A] [ identifier[kj] ] keyword[if] identifier[j] != identifier[kj] : identifier[A] [ identifier[kj] ]+= identifier[temp] * identifier[save] keyword[else] : keyword[for] identifier[j] keyword[in] identifier[range] ( literal[int] , identifier[khalf] ): identifier[kj] = identifier[k] - identifier[j] - literal[int] identifier[save] = identifier[A] [ identifier[j] ] identifier[A] [ identifier[j] ]= identifier[save] + identifier[temp] * identifier[A] [ identifier[kj] ]. identifier[conjugate] () keyword[if] identifier[j] != identifier[kj] : identifier[A] [ identifier[kj] ]= identifier[A] [ identifier[kj] ]+ identifier[temp] * identifier[save] . identifier[conjugate] () keyword[return] identifier[A] , identifier[P] , identifier[ref]
def LEVINSON(r, order=None, allow_singularity=False): """Levinson-Durbin recursion. Find the coefficients of a length(r)-1 order autoregressive linear process :param r: autocorrelation sequence of length N + 1 (first element being the zero-lag autocorrelation) :param order: requested order of the autoregressive coefficients. default is N. :param allow_singularity: false by default. Other implementations may be True (e.g., octave) :return: * the `N+1` autoregressive coefficients :math:`A=(1, a_1...a_N)` * the prediction errors * the `N` reflections coefficients values This algorithm solves the set of complex linear simultaneous equations using Levinson algorithm. .. math:: \\bold{T}_M \\left( \\begin{array}{c} 1 \\\\ \\bold{a}_M \\end{array} \\right) = \\left( \\begin{array}{c} \\rho_M \\\\ \\bold{0}_M \\end{array} \\right) where :math:`\\bold{T}_M` is a Hermitian Toeplitz matrix with elements :math:`T_0, T_1, \\dots ,T_M`. .. note:: Solving this equations by Gaussian elimination would require :math:`M^3` operations whereas the levinson algorithm requires :math:`M^2+M` additions and :math:`M^2+M` multiplications. This is equivalent to solve the following symmetric Toeplitz system of linear equations .. math:: \\left( \\begin{array}{cccc} r_1 & r_2^* & \\dots & r_{n}^*\\\\ r_2 & r_1^* & \\dots & r_{n-1}^*\\\\ \\dots & \\dots & \\dots & \\dots\\\\ r_n & \\dots & r_2 & r_1 \\end{array} \\right) \\left( \\begin{array}{cccc} a_2\\\\ a_3 \\\\ \\dots \\\\ a_{N+1} \\end{array} \\right) = \\left( \\begin{array}{cccc} -r_2\\\\ -r_3 \\\\ \\dots \\\\ -r_{N+1} \\end{array} \\right) where :math:`r = (r_1 ... r_{N+1})` is the input autocorrelation vector, and :math:`r_i^*` denotes the complex conjugate of :math:`r_i`. The input r is typically a vector of autocorrelation coefficients where lag 0 is the first element :math:`r_1`. .. doctest:: >>> import numpy; from spectrum import LEVINSON >>> T = numpy.array([3., -2+0.5j, .7-1j]) >>> a, e, k = LEVINSON(T) """ #from numpy import isrealobj T0 = numpy.real(r[0]) T = r[1:] M = len(T) if order is None: M = len(T) # depends on [control=['if'], data=[]] else: assert order <= M, 'order must be less than size of the input data' M = order realdata = numpy.isrealobj(r) if realdata is True: A = numpy.zeros(M, dtype=float) ref = numpy.zeros(M, dtype=float) # depends on [control=['if'], data=[]] else: A = numpy.zeros(M, dtype=complex) ref = numpy.zeros(M, dtype=complex) P = T0 for k in range(0, M): save = T[k] if k == 0: temp = -save / P # depends on [control=['if'], data=[]] else: #save += sum([A[j]*T[k-j-1] for j in range(0,k)]) for j in range(0, k): save = save + A[j] * T[k - j - 1] # depends on [control=['for'], data=['j']] temp = -save / P if realdata: P = P * (1.0 - temp ** 2.0) # depends on [control=['if'], data=[]] else: P = P * (1.0 - (temp.real ** 2 + temp.imag ** 2)) if P <= 0 and allow_singularity == False: raise ValueError('singular matrix') # depends on [control=['if'], data=[]] A[k] = temp ref[k] = temp # save reflection coeff at each step if k == 0: continue # depends on [control=['if'], data=[]] khalf = (k + 1) // 2 if realdata is True: for j in range(0, khalf): kj = k - j - 1 save = A[j] A[j] = save + temp * A[kj] if j != kj: A[kj] += temp * save # depends on [control=['if'], data=['kj']] # depends on [control=['for'], data=['j']] # depends on [control=['if'], data=[]] else: for j in range(0, khalf): kj = k - j - 1 save = A[j] A[j] = save + temp * A[kj].conjugate() if j != kj: A[kj] = A[kj] + temp * save.conjugate() # depends on [control=['if'], data=['kj']] # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['k']] return (A, P, ref)
def p_new_expr_nobf(self, p): """new_expr_nobf : member_expr_nobf | NEW new_expr """ if len(p) == 2: p[0] = p[1] else: p[0] = self.asttypes.NewExpr(p[2]) p[0].setpos(p)
def function[p_new_expr_nobf, parameter[self, p]]: constant[new_expr_nobf : member_expr_nobf | NEW new_expr ] if compare[call[name[len], parameter[name[p]]] equal[==] constant[2]] begin[:] call[name[p]][constant[0]] assign[=] call[name[p]][constant[1]]
keyword[def] identifier[p_new_expr_nobf] ( identifier[self] , identifier[p] ): literal[string] keyword[if] identifier[len] ( identifier[p] )== literal[int] : identifier[p] [ literal[int] ]= identifier[p] [ literal[int] ] keyword[else] : identifier[p] [ literal[int] ]= identifier[self] . identifier[asttypes] . identifier[NewExpr] ( identifier[p] [ literal[int] ]) identifier[p] [ literal[int] ]. identifier[setpos] ( identifier[p] )
def p_new_expr_nobf(self, p): """new_expr_nobf : member_expr_nobf | NEW new_expr """ if len(p) == 2: p[0] = p[1] # depends on [control=['if'], data=[]] else: p[0] = self.asttypes.NewExpr(p[2]) p[0].setpos(p)
def get_initial_data(self, request, user, profile, client): """ Return initial data for the setup form. The function can be controlled with ``SOCIALREGISTRATION_INITIAL_DATA_FUNCTION``. :param request: The current request object :param user: The unsaved user object :param profile: The unsaved profile object :param client: The API client """ if INITAL_DATA_FUNCTION: func = self.import_attribute(INITAL_DATA_FUNCTION) return func(request, user, profile, client) return {}
def function[get_initial_data, parameter[self, request, user, profile, client]]: constant[ Return initial data for the setup form. The function can be controlled with ``SOCIALREGISTRATION_INITIAL_DATA_FUNCTION``. :param request: The current request object :param user: The unsaved user object :param profile: The unsaved profile object :param client: The API client ] if name[INITAL_DATA_FUNCTION] begin[:] variable[func] assign[=] call[name[self].import_attribute, parameter[name[INITAL_DATA_FUNCTION]]] return[call[name[func], parameter[name[request], name[user], name[profile], name[client]]]] return[dictionary[[], []]]
keyword[def] identifier[get_initial_data] ( identifier[self] , identifier[request] , identifier[user] , identifier[profile] , identifier[client] ): literal[string] keyword[if] identifier[INITAL_DATA_FUNCTION] : identifier[func] = identifier[self] . identifier[import_attribute] ( identifier[INITAL_DATA_FUNCTION] ) keyword[return] identifier[func] ( identifier[request] , identifier[user] , identifier[profile] , identifier[client] ) keyword[return] {}
def get_initial_data(self, request, user, profile, client): """ Return initial data for the setup form. The function can be controlled with ``SOCIALREGISTRATION_INITIAL_DATA_FUNCTION``. :param request: The current request object :param user: The unsaved user object :param profile: The unsaved profile object :param client: The API client """ if INITAL_DATA_FUNCTION: func = self.import_attribute(INITAL_DATA_FUNCTION) return func(request, user, profile, client) # depends on [control=['if'], data=[]] return {}
def _logspace_mean(log_values): """Evaluate `Log[E[values]]` in a stable manner. Args: log_values: `Tensor` holding `Log[values]`. Returns: `Tensor` of same `dtype` as `log_values`, reduced across dim 0. `Log[Mean[values]]`. """ # center = Max[Log[values]], with stop-gradient # The center hopefully keep the exponentiated term small. It is canceled # from the final result, so putting stop gradient on it will not change the # final result. We put stop gradient on to eliminate unnecessary computation. center = tf.stop_gradient(_sample_max(log_values)) # centered_values = exp{Log[values] - E[Log[values]]} centered_values = tf.math.exp(log_values - center) # log_mean_of_values = Log[ E[centered_values] ] + center # = Log[ E[exp{log_values - E[log_values]}] ] + center # = Log[E[values]] - E[log_values] + center # = Log[E[values]] log_mean_of_values = tf.math.log(_sample_mean(centered_values)) + center return log_mean_of_values
def function[_logspace_mean, parameter[log_values]]: constant[Evaluate `Log[E[values]]` in a stable manner. Args: log_values: `Tensor` holding `Log[values]`. Returns: `Tensor` of same `dtype` as `log_values`, reduced across dim 0. `Log[Mean[values]]`. ] variable[center] assign[=] call[name[tf].stop_gradient, parameter[call[name[_sample_max], parameter[name[log_values]]]]] variable[centered_values] assign[=] call[name[tf].math.exp, parameter[binary_operation[name[log_values] - name[center]]]] variable[log_mean_of_values] assign[=] binary_operation[call[name[tf].math.log, parameter[call[name[_sample_mean], parameter[name[centered_values]]]]] + name[center]] return[name[log_mean_of_values]]
keyword[def] identifier[_logspace_mean] ( identifier[log_values] ): literal[string] identifier[center] = identifier[tf] . identifier[stop_gradient] ( identifier[_sample_max] ( identifier[log_values] )) identifier[centered_values] = identifier[tf] . identifier[math] . identifier[exp] ( identifier[log_values] - identifier[center] ) identifier[log_mean_of_values] = identifier[tf] . identifier[math] . identifier[log] ( identifier[_sample_mean] ( identifier[centered_values] ))+ identifier[center] keyword[return] identifier[log_mean_of_values]
def _logspace_mean(log_values): """Evaluate `Log[E[values]]` in a stable manner. Args: log_values: `Tensor` holding `Log[values]`. Returns: `Tensor` of same `dtype` as `log_values`, reduced across dim 0. `Log[Mean[values]]`. """ # center = Max[Log[values]], with stop-gradient # The center hopefully keep the exponentiated term small. It is canceled # from the final result, so putting stop gradient on it will not change the # final result. We put stop gradient on to eliminate unnecessary computation. center = tf.stop_gradient(_sample_max(log_values)) # centered_values = exp{Log[values] - E[Log[values]]} centered_values = tf.math.exp(log_values - center) # log_mean_of_values = Log[ E[centered_values] ] + center # = Log[ E[exp{log_values - E[log_values]}] ] + center # = Log[E[values]] - E[log_values] + center # = Log[E[values]] log_mean_of_values = tf.math.log(_sample_mean(centered_values)) + center return log_mean_of_values
def get_report_hook(self): """ Return a callback function suitable for using reporthook argument of urllib(.request).urlretrieve :return: function object """ def report_hook(chunkNumber, chunkSize, totalSize): if totalSize != -1 and not self._callback.range_initialized(): log.debug('Initializing range: [{},{}]'.format(0, totalSize)) self._callback.set_range(0, totalSize) self._chunkNumber = chunkNumber self._total += chunkSize if self._total > totalSize: # The chunk size can be bigger than the file self._total = totalSize self._callback.update(self._total) return report_hook
def function[get_report_hook, parameter[self]]: constant[ Return a callback function suitable for using reporthook argument of urllib(.request).urlretrieve :return: function object ] def function[report_hook, parameter[chunkNumber, chunkSize, totalSize]]: if <ast.BoolOp object at 0x7da1b109b250> begin[:] call[name[log].debug, parameter[call[constant[Initializing range: [{},{}]].format, parameter[constant[0], name[totalSize]]]]] call[name[self]._callback.set_range, parameter[constant[0], name[totalSize]]] name[self]._chunkNumber assign[=] name[chunkNumber] <ast.AugAssign object at 0x7da1b1098820> if compare[name[self]._total greater[>] name[totalSize]] begin[:] name[self]._total assign[=] name[totalSize] call[name[self]._callback.update, parameter[name[self]._total]] return[name[report_hook]]
keyword[def] identifier[get_report_hook] ( identifier[self] ): literal[string] keyword[def] identifier[report_hook] ( identifier[chunkNumber] , identifier[chunkSize] , identifier[totalSize] ): keyword[if] identifier[totalSize] !=- literal[int] keyword[and] keyword[not] identifier[self] . identifier[_callback] . identifier[range_initialized] (): identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( literal[int] , identifier[totalSize] )) identifier[self] . identifier[_callback] . identifier[set_range] ( literal[int] , identifier[totalSize] ) identifier[self] . identifier[_chunkNumber] = identifier[chunkNumber] identifier[self] . identifier[_total] += identifier[chunkSize] keyword[if] identifier[self] . identifier[_total] > identifier[totalSize] : identifier[self] . identifier[_total] = identifier[totalSize] identifier[self] . identifier[_callback] . identifier[update] ( identifier[self] . identifier[_total] ) keyword[return] identifier[report_hook]
def get_report_hook(self): """ Return a callback function suitable for using reporthook argument of urllib(.request).urlretrieve :return: function object """ def report_hook(chunkNumber, chunkSize, totalSize): if totalSize != -1 and (not self._callback.range_initialized()): log.debug('Initializing range: [{},{}]'.format(0, totalSize)) self._callback.set_range(0, totalSize) # depends on [control=['if'], data=[]] self._chunkNumber = chunkNumber self._total += chunkSize if self._total > totalSize: # The chunk size can be bigger than the file self._total = totalSize # depends on [control=['if'], data=['totalSize']] self._callback.update(self._total) return report_hook
def define_residues_for_plotting_topology(self,cutoff): """ This function defines the residues for plotting in case only a topology file has been submitted. In this case the residence time analysis in not necessary and it is enough just to find all residues within a cutoff distance. Takes: * cutoff * - cutoff distance in angstroms that defines native contacts Output: * """ #self.protein_selection = self.universe.select_atoms('all and around '+str(cutoff)+' (segid '+str(self.universe.ligand.segids[0])+' and resid '+str(self.universe.ligand.resids[0])+')') #The previous line was not working on some examples for some reason - switch to more efficient Neighbour Search n = AtomNeighborSearch(self.universe.select_atoms('protein and not name H* or (segid '+str(self.universe.ligand.segids[0])+' and resid '+str(self.universe.ligand.resids[0])+')'), bucket_size=10) self.protein_selection = n.search(self.universe.ligand,cutoff,level="A") for atom in self.protein_selection.atoms: #for non-analysis plots residue = (atom.resname, str(atom.resid), atom.segid) if residue not in self.dict_of_plotted_res.keys() and atom not in self.universe.ligand.atoms: self.dict_of_plotted_res[residue]=[1] assert len(self.dict_of_plotted_res)!=0, "Nothing to draw for this ligand (residue number: "+ self.universe.ligand.resids[0] +" on the chain "+ self.universe.ligand.segids[0] +") - check the position of your ligand within the topology file."
def function[define_residues_for_plotting_topology, parameter[self, cutoff]]: constant[ This function defines the residues for plotting in case only a topology file has been submitted. In this case the residence time analysis in not necessary and it is enough just to find all residues within a cutoff distance. Takes: * cutoff * - cutoff distance in angstroms that defines native contacts Output: * ] variable[n] assign[=] call[name[AtomNeighborSearch], parameter[call[name[self].universe.select_atoms, parameter[binary_operation[binary_operation[binary_operation[binary_operation[constant[protein and not name H* or (segid ] + call[name[str], parameter[call[name[self].universe.ligand.segids][constant[0]]]]] + constant[ and resid ]] + call[name[str], parameter[call[name[self].universe.ligand.resids][constant[0]]]]] + constant[)]]]]]] name[self].protein_selection assign[=] call[name[n].search, parameter[name[self].universe.ligand, name[cutoff]]] for taget[name[atom]] in starred[name[self].protein_selection.atoms] begin[:] variable[residue] assign[=] tuple[[<ast.Attribute object at 0x7da18f09c640>, <ast.Call object at 0x7da18f09d330>, <ast.Attribute object at 0x7da18f09ecb0>]] if <ast.BoolOp object at 0x7da18dc04400> begin[:] call[name[self].dict_of_plotted_res][name[residue]] assign[=] list[[<ast.Constant object at 0x7da18dc06440>]] assert[compare[call[name[len], parameter[name[self].dict_of_plotted_res]] not_equal[!=] constant[0]]]
keyword[def] identifier[define_residues_for_plotting_topology] ( identifier[self] , identifier[cutoff] ): literal[string] identifier[n] = identifier[AtomNeighborSearch] ( identifier[self] . identifier[universe] . identifier[select_atoms] ( literal[string] + identifier[str] ( identifier[self] . identifier[universe] . identifier[ligand] . identifier[segids] [ literal[int] ])+ literal[string] + identifier[str] ( identifier[self] . identifier[universe] . identifier[ligand] . identifier[resids] [ literal[int] ])+ literal[string] ), identifier[bucket_size] = literal[int] ) identifier[self] . identifier[protein_selection] = identifier[n] . identifier[search] ( identifier[self] . identifier[universe] . identifier[ligand] , identifier[cutoff] , identifier[level] = literal[string] ) keyword[for] identifier[atom] keyword[in] identifier[self] . identifier[protein_selection] . identifier[atoms] : identifier[residue] =( identifier[atom] . identifier[resname] , identifier[str] ( identifier[atom] . identifier[resid] ), identifier[atom] . identifier[segid] ) keyword[if] identifier[residue] keyword[not] keyword[in] identifier[self] . identifier[dict_of_plotted_res] . identifier[keys] () keyword[and] identifier[atom] keyword[not] keyword[in] identifier[self] . identifier[universe] . identifier[ligand] . identifier[atoms] : identifier[self] . identifier[dict_of_plotted_res] [ identifier[residue] ]=[ literal[int] ] keyword[assert] identifier[len] ( identifier[self] . identifier[dict_of_plotted_res] )!= literal[int] , literal[string] + identifier[self] . identifier[universe] . identifier[ligand] . identifier[resids] [ literal[int] ]+ literal[string] + identifier[self] . identifier[universe] . identifier[ligand] . identifier[segids] [ literal[int] ]+ literal[string]
def define_residues_for_plotting_topology(self, cutoff): """ This function defines the residues for plotting in case only a topology file has been submitted. In this case the residence time analysis in not necessary and it is enough just to find all residues within a cutoff distance. Takes: * cutoff * - cutoff distance in angstroms that defines native contacts Output: * """ #self.protein_selection = self.universe.select_atoms('all and around '+str(cutoff)+' (segid '+str(self.universe.ligand.segids[0])+' and resid '+str(self.universe.ligand.resids[0])+')') #The previous line was not working on some examples for some reason - switch to more efficient Neighbour Search n = AtomNeighborSearch(self.universe.select_atoms('protein and not name H* or (segid ' + str(self.universe.ligand.segids[0]) + ' and resid ' + str(self.universe.ligand.resids[0]) + ')'), bucket_size=10) self.protein_selection = n.search(self.universe.ligand, cutoff, level='A') for atom in self.protein_selection.atoms: #for non-analysis plots residue = (atom.resname, str(atom.resid), atom.segid) if residue not in self.dict_of_plotted_res.keys() and atom not in self.universe.ligand.atoms: self.dict_of_plotted_res[residue] = [1] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['atom']] assert len(self.dict_of_plotted_res) != 0, 'Nothing to draw for this ligand (residue number: ' + self.universe.ligand.resids[0] + ' on the chain ' + self.universe.ligand.segids[0] + ') - check the position of your ligand within the topology file.'
def dependency_status(data): """Return abstracted status of dependencies. - ``STATUS_ERROR`` .. one dependency has error status or was deleted - ``STATUS_DONE`` .. all dependencies have done status - ``None`` .. other """ parents_statuses = set( DataDependency.objects.filter( child=data, kind=DataDependency.KIND_IO ).distinct('parent__status').values_list('parent__status', flat=True) ) if not parents_statuses: return Data.STATUS_DONE if None in parents_statuses: # Some parents have been deleted. return Data.STATUS_ERROR if Data.STATUS_ERROR in parents_statuses: return Data.STATUS_ERROR if len(parents_statuses) == 1 and Data.STATUS_DONE in parents_statuses: return Data.STATUS_DONE return None
def function[dependency_status, parameter[data]]: constant[Return abstracted status of dependencies. - ``STATUS_ERROR`` .. one dependency has error status or was deleted - ``STATUS_DONE`` .. all dependencies have done status - ``None`` .. other ] variable[parents_statuses] assign[=] call[name[set], parameter[call[call[call[name[DataDependency].objects.filter, parameter[]].distinct, parameter[constant[parent__status]]].values_list, parameter[constant[parent__status]]]]] if <ast.UnaryOp object at 0x7da1b1a9f040> begin[:] return[name[Data].STATUS_DONE] if compare[constant[None] in name[parents_statuses]] begin[:] return[name[Data].STATUS_ERROR] if compare[name[Data].STATUS_ERROR in name[parents_statuses]] begin[:] return[name[Data].STATUS_ERROR] if <ast.BoolOp object at 0x7da1b1a9ea70> begin[:] return[name[Data].STATUS_DONE] return[constant[None]]
keyword[def] identifier[dependency_status] ( identifier[data] ): literal[string] identifier[parents_statuses] = identifier[set] ( identifier[DataDependency] . identifier[objects] . identifier[filter] ( identifier[child] = identifier[data] , identifier[kind] = identifier[DataDependency] . identifier[KIND_IO] ). identifier[distinct] ( literal[string] ). identifier[values_list] ( literal[string] , identifier[flat] = keyword[True] ) ) keyword[if] keyword[not] identifier[parents_statuses] : keyword[return] identifier[Data] . identifier[STATUS_DONE] keyword[if] keyword[None] keyword[in] identifier[parents_statuses] : keyword[return] identifier[Data] . identifier[STATUS_ERROR] keyword[if] identifier[Data] . identifier[STATUS_ERROR] keyword[in] identifier[parents_statuses] : keyword[return] identifier[Data] . identifier[STATUS_ERROR] keyword[if] identifier[len] ( identifier[parents_statuses] )== literal[int] keyword[and] identifier[Data] . identifier[STATUS_DONE] keyword[in] identifier[parents_statuses] : keyword[return] identifier[Data] . identifier[STATUS_DONE] keyword[return] keyword[None]
def dependency_status(data): """Return abstracted status of dependencies. - ``STATUS_ERROR`` .. one dependency has error status or was deleted - ``STATUS_DONE`` .. all dependencies have done status - ``None`` .. other """ parents_statuses = set(DataDependency.objects.filter(child=data, kind=DataDependency.KIND_IO).distinct('parent__status').values_list('parent__status', flat=True)) if not parents_statuses: return Data.STATUS_DONE # depends on [control=['if'], data=[]] if None in parents_statuses: # Some parents have been deleted. return Data.STATUS_ERROR # depends on [control=['if'], data=[]] if Data.STATUS_ERROR in parents_statuses: return Data.STATUS_ERROR # depends on [control=['if'], data=[]] if len(parents_statuses) == 1 and Data.STATUS_DONE in parents_statuses: return Data.STATUS_DONE # depends on [control=['if'], data=[]] return None
def div(computation: BaseComputation) -> None: """ Division """ numerator, denominator = computation.stack_pop(num_items=2, type_hint=constants.UINT256) if denominator == 0: result = 0 else: result = (numerator // denominator) & constants.UINT_256_MAX computation.stack_push(result)
def function[div, parameter[computation]]: constant[ Division ] <ast.Tuple object at 0x7da1b1602350> assign[=] call[name[computation].stack_pop, parameter[]] if compare[name[denominator] equal[==] constant[0]] begin[:] variable[result] assign[=] constant[0] call[name[computation].stack_push, parameter[name[result]]]
keyword[def] identifier[div] ( identifier[computation] : identifier[BaseComputation] )-> keyword[None] : literal[string] identifier[numerator] , identifier[denominator] = identifier[computation] . identifier[stack_pop] ( identifier[num_items] = literal[int] , identifier[type_hint] = identifier[constants] . identifier[UINT256] ) keyword[if] identifier[denominator] == literal[int] : identifier[result] = literal[int] keyword[else] : identifier[result] =( identifier[numerator] // identifier[denominator] )& identifier[constants] . identifier[UINT_256_MAX] identifier[computation] . identifier[stack_push] ( identifier[result] )
def div(computation: BaseComputation) -> None: """ Division """ (numerator, denominator) = computation.stack_pop(num_items=2, type_hint=constants.UINT256) if denominator == 0: result = 0 # depends on [control=['if'], data=[]] else: result = numerator // denominator & constants.UINT_256_MAX computation.stack_push(result)
def warn_deprecated(since, message='', name='', alternative='', pending=False, obj_type='attribute', addendum=''): """Display deprecation warning in a standard way. Parameters ---------- since : str The release at which this API became deprecated. message : str, optional Override the default deprecation message. The format specifier `%(name)s` may be used for the name of the function, and `%(alternative)s` may be used in the deprecation message to insert the name of an alternative to the deprecated function. `%(obj_type)s` may be used to insert a friendly name for the type of object being deprecated. name : str, optional The name of the deprecated object. alternative : str, optional An alternative function that the user may use in place of the deprecated function. The deprecation warning will tell the user about this alternative if provided. pending : bool, optional If True, uses a PendingDeprecationWarning instead of a DeprecationWarning. obj_type : str, optional The object type being deprecated. addendum : str, optional Additional text appended directly to the final message. Examples -------- Basic example:: # To warn of the deprecation of "metpy.name_of_module" warn_deprecated('0.6.0', name='metpy.name_of_module', obj_type='module') """ message = _generate_deprecation_message(since, message, name, alternative, pending, obj_type) warnings.warn(message, metpyDeprecation, stacklevel=1)
def function[warn_deprecated, parameter[since, message, name, alternative, pending, obj_type, addendum]]: constant[Display deprecation warning in a standard way. Parameters ---------- since : str The release at which this API became deprecated. message : str, optional Override the default deprecation message. The format specifier `%(name)s` may be used for the name of the function, and `%(alternative)s` may be used in the deprecation message to insert the name of an alternative to the deprecated function. `%(obj_type)s` may be used to insert a friendly name for the type of object being deprecated. name : str, optional The name of the deprecated object. alternative : str, optional An alternative function that the user may use in place of the deprecated function. The deprecation warning will tell the user about this alternative if provided. pending : bool, optional If True, uses a PendingDeprecationWarning instead of a DeprecationWarning. obj_type : str, optional The object type being deprecated. addendum : str, optional Additional text appended directly to the final message. Examples -------- Basic example:: # To warn of the deprecation of "metpy.name_of_module" warn_deprecated('0.6.0', name='metpy.name_of_module', obj_type='module') ] variable[message] assign[=] call[name[_generate_deprecation_message], parameter[name[since], name[message], name[name], name[alternative], name[pending], name[obj_type]]] call[name[warnings].warn, parameter[name[message], name[metpyDeprecation]]]
keyword[def] identifier[warn_deprecated] ( identifier[since] , identifier[message] = literal[string] , identifier[name] = literal[string] , identifier[alternative] = literal[string] , identifier[pending] = keyword[False] , identifier[obj_type] = literal[string] , identifier[addendum] = literal[string] ): literal[string] identifier[message] = identifier[_generate_deprecation_message] ( identifier[since] , identifier[message] , identifier[name] , identifier[alternative] , identifier[pending] , identifier[obj_type] ) identifier[warnings] . identifier[warn] ( identifier[message] , identifier[metpyDeprecation] , identifier[stacklevel] = literal[int] )
def warn_deprecated(since, message='', name='', alternative='', pending=False, obj_type='attribute', addendum=''): """Display deprecation warning in a standard way. Parameters ---------- since : str The release at which this API became deprecated. message : str, optional Override the default deprecation message. The format specifier `%(name)s` may be used for the name of the function, and `%(alternative)s` may be used in the deprecation message to insert the name of an alternative to the deprecated function. `%(obj_type)s` may be used to insert a friendly name for the type of object being deprecated. name : str, optional The name of the deprecated object. alternative : str, optional An alternative function that the user may use in place of the deprecated function. The deprecation warning will tell the user about this alternative if provided. pending : bool, optional If True, uses a PendingDeprecationWarning instead of a DeprecationWarning. obj_type : str, optional The object type being deprecated. addendum : str, optional Additional text appended directly to the final message. Examples -------- Basic example:: # To warn of the deprecation of "metpy.name_of_module" warn_deprecated('0.6.0', name='metpy.name_of_module', obj_type='module') """ message = _generate_deprecation_message(since, message, name, alternative, pending, obj_type) warnings.warn(message, metpyDeprecation, stacklevel=1)
def save_to_store(self): """Save index to store. :raise AttributeError: If no datastore is defined """ if not self._store: raise AttributeError('No datastore defined!') saved_data = self.save_to_data(in_place=True) data = Serializer.serialize(saved_data) self._store.store_blob(data, 'all_keys_with_undefined')
def function[save_to_store, parameter[self]]: constant[Save index to store. :raise AttributeError: If no datastore is defined ] if <ast.UnaryOp object at 0x7da1b190ee00> begin[:] <ast.Raise object at 0x7da1b190e6b0> variable[saved_data] assign[=] call[name[self].save_to_data, parameter[]] variable[data] assign[=] call[name[Serializer].serialize, parameter[name[saved_data]]] call[name[self]._store.store_blob, parameter[name[data], constant[all_keys_with_undefined]]]
keyword[def] identifier[save_to_store] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[_store] : keyword[raise] identifier[AttributeError] ( literal[string] ) identifier[saved_data] = identifier[self] . identifier[save_to_data] ( identifier[in_place] = keyword[True] ) identifier[data] = identifier[Serializer] . identifier[serialize] ( identifier[saved_data] ) identifier[self] . identifier[_store] . identifier[store_blob] ( identifier[data] , literal[string] )
def save_to_store(self): """Save index to store. :raise AttributeError: If no datastore is defined """ if not self._store: raise AttributeError('No datastore defined!') # depends on [control=['if'], data=[]] saved_data = self.save_to_data(in_place=True) data = Serializer.serialize(saved_data) self._store.store_blob(data, 'all_keys_with_undefined')
def auto_override_class(cls, force = False, force_recursive = False): """Works like auto_override, but is only applicable to classes. """ if not pytypes.checking_enabled: return cls assert(isclass(cls)) if not force and is_no_type_check(cls): return cls # To play it safe we avoid to modify the dict while iterating over it, # so we previously cache keys. # For this we don't use keys() because of Python 3. # Todo: Better use inspect.getmembers here keys = [key for key in cls.__dict__] for key in keys: memb = cls.__dict__[key] if force_recursive or not is_no_type_check(memb): if isfunction(memb) or ismethod(memb) or ismethoddescriptor(memb): if util._has_base_method(memb, cls): setattr(cls, key, override(memb)) elif isclass(memb): auto_override_class(memb, force_recursive, force_recursive) return cls
def function[auto_override_class, parameter[cls, force, force_recursive]]: constant[Works like auto_override, but is only applicable to classes. ] if <ast.UnaryOp object at 0x7da18ede4a60> begin[:] return[name[cls]] assert[call[name[isclass], parameter[name[cls]]]] if <ast.BoolOp object at 0x7da18ede5870> begin[:] return[name[cls]] variable[keys] assign[=] <ast.ListComp object at 0x7da18ede4ee0> for taget[name[key]] in starred[name[keys]] begin[:] variable[memb] assign[=] call[name[cls].__dict__][name[key]] if <ast.BoolOp object at 0x7da18ede6710> begin[:] if <ast.BoolOp object at 0x7da18ede4f70> begin[:] if call[name[util]._has_base_method, parameter[name[memb], name[cls]]] begin[:] call[name[setattr], parameter[name[cls], name[key], call[name[override], parameter[name[memb]]]]] return[name[cls]]
keyword[def] identifier[auto_override_class] ( identifier[cls] , identifier[force] = keyword[False] , identifier[force_recursive] = keyword[False] ): literal[string] keyword[if] keyword[not] identifier[pytypes] . identifier[checking_enabled] : keyword[return] identifier[cls] keyword[assert] ( identifier[isclass] ( identifier[cls] )) keyword[if] keyword[not] identifier[force] keyword[and] identifier[is_no_type_check] ( identifier[cls] ): keyword[return] identifier[cls] identifier[keys] =[ identifier[key] keyword[for] identifier[key] keyword[in] identifier[cls] . identifier[__dict__] ] keyword[for] identifier[key] keyword[in] identifier[keys] : identifier[memb] = identifier[cls] . identifier[__dict__] [ identifier[key] ] keyword[if] identifier[force_recursive] keyword[or] keyword[not] identifier[is_no_type_check] ( identifier[memb] ): keyword[if] identifier[isfunction] ( identifier[memb] ) keyword[or] identifier[ismethod] ( identifier[memb] ) keyword[or] identifier[ismethoddescriptor] ( identifier[memb] ): keyword[if] identifier[util] . identifier[_has_base_method] ( identifier[memb] , identifier[cls] ): identifier[setattr] ( identifier[cls] , identifier[key] , identifier[override] ( identifier[memb] )) keyword[elif] identifier[isclass] ( identifier[memb] ): identifier[auto_override_class] ( identifier[memb] , identifier[force_recursive] , identifier[force_recursive] ) keyword[return] identifier[cls]
def auto_override_class(cls, force=False, force_recursive=False): """Works like auto_override, but is only applicable to classes. """ if not pytypes.checking_enabled: return cls # depends on [control=['if'], data=[]] assert isclass(cls) if not force and is_no_type_check(cls): return cls # depends on [control=['if'], data=[]] # To play it safe we avoid to modify the dict while iterating over it, # so we previously cache keys. # For this we don't use keys() because of Python 3. # Todo: Better use inspect.getmembers here keys = [key for key in cls.__dict__] for key in keys: memb = cls.__dict__[key] if force_recursive or not is_no_type_check(memb): if isfunction(memb) or ismethod(memb) or ismethoddescriptor(memb): if util._has_base_method(memb, cls): setattr(cls, key, override(memb)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif isclass(memb): auto_override_class(memb, force_recursive, force_recursive) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] return cls
def p_l_expression(self, p): ''' l : expression ''' _LOGGER.debug("l -> expresion") l = TypedList( [ p[1] ] ) p[0] = l
def function[p_l_expression, parameter[self, p]]: constant[ l : expression ] call[name[_LOGGER].debug, parameter[constant[l -> expresion]]] variable[l] assign[=] call[name[TypedList], parameter[list[[<ast.Subscript object at 0x7da20c7cb610>]]]] call[name[p]][constant[0]] assign[=] name[l]
keyword[def] identifier[p_l_expression] ( identifier[self] , identifier[p] ): literal[string] identifier[_LOGGER] . identifier[debug] ( literal[string] ) identifier[l] = identifier[TypedList] ([ identifier[p] [ literal[int] ]]) identifier[p] [ literal[int] ]= identifier[l]
def p_l_expression(self, p): """ l : expression """ _LOGGER.debug('l -> expresion') l = TypedList([p[1]]) p[0] = l
def init(scope, app, settings): """Plugin for serving static files in development mode""" cfg = settings.get('rw.static', {}) static = Static() scope['static'] = static scope['template_env'].globals['static'] = static for base_uri, sources in cfg.items(): full_paths = [] for source in sources: if isinstance(source, dict): full_path = source['path'] full_paths.append(full_path.format(**os.environ)) continue elif ',' in source: module_name, path = [part.strip() for part in source.split(',')] else: module_name = source path = 'static' full_path = pkg_resources.resource_filename(module_name, path) full_paths.append(full_path) app.root.mount('/' + base_uri + '/<h>/<path:path>', StaticHandler, {'path': full_paths}, name='static_' + base_uri.replace('.', '_')) static.handlers.append((base_uri, StaticHandler, full_paths)) static.setup()
def function[init, parameter[scope, app, settings]]: constant[Plugin for serving static files in development mode] variable[cfg] assign[=] call[name[settings].get, parameter[constant[rw.static], dictionary[[], []]]] variable[static] assign[=] call[name[Static], parameter[]] call[name[scope]][constant[static]] assign[=] name[static] call[call[name[scope]][constant[template_env]].globals][constant[static]] assign[=] name[static] for taget[tuple[[<ast.Name object at 0x7da20c6a90c0>, <ast.Name object at 0x7da20c6aa9e0>]]] in starred[call[name[cfg].items, parameter[]]] begin[:] variable[full_paths] assign[=] list[[]] for taget[name[source]] in starred[name[sources]] begin[:] if call[name[isinstance], parameter[name[source], name[dict]]] begin[:] variable[full_path] assign[=] call[name[source]][constant[path]] call[name[full_paths].append, parameter[call[name[full_path].format, parameter[]]]] continue variable[full_path] assign[=] call[name[pkg_resources].resource_filename, parameter[name[module_name], name[path]]] call[name[full_paths].append, parameter[name[full_path]]] call[name[app].root.mount, parameter[binary_operation[binary_operation[constant[/] + name[base_uri]] + constant[/<h>/<path:path>]], name[StaticHandler], dictionary[[<ast.Constant object at 0x7da204623070>], [<ast.Name object at 0x7da204622e90>]]]] call[name[static].handlers.append, parameter[tuple[[<ast.Name object at 0x7da204621cf0>, <ast.Name object at 0x7da204623460>, <ast.Name object at 0x7da204621480>]]]] call[name[static].setup, parameter[]]
keyword[def] identifier[init] ( identifier[scope] , identifier[app] , identifier[settings] ): literal[string] identifier[cfg] = identifier[settings] . identifier[get] ( literal[string] ,{}) identifier[static] = identifier[Static] () identifier[scope] [ literal[string] ]= identifier[static] identifier[scope] [ literal[string] ]. identifier[globals] [ literal[string] ]= identifier[static] keyword[for] identifier[base_uri] , identifier[sources] keyword[in] identifier[cfg] . identifier[items] (): identifier[full_paths] =[] keyword[for] identifier[source] keyword[in] identifier[sources] : keyword[if] identifier[isinstance] ( identifier[source] , identifier[dict] ): identifier[full_path] = identifier[source] [ literal[string] ] identifier[full_paths] . identifier[append] ( identifier[full_path] . identifier[format] (** identifier[os] . identifier[environ] )) keyword[continue] keyword[elif] literal[string] keyword[in] identifier[source] : identifier[module_name] , identifier[path] =[ identifier[part] . identifier[strip] () keyword[for] identifier[part] keyword[in] identifier[source] . identifier[split] ( literal[string] )] keyword[else] : identifier[module_name] = identifier[source] identifier[path] = literal[string] identifier[full_path] = identifier[pkg_resources] . identifier[resource_filename] ( identifier[module_name] , identifier[path] ) identifier[full_paths] . identifier[append] ( identifier[full_path] ) identifier[app] . identifier[root] . identifier[mount] ( literal[string] + identifier[base_uri] + literal[string] , identifier[StaticHandler] ,{ literal[string] : identifier[full_paths] }, identifier[name] = literal[string] + identifier[base_uri] . identifier[replace] ( literal[string] , literal[string] )) identifier[static] . identifier[handlers] . identifier[append] (( identifier[base_uri] , identifier[StaticHandler] , identifier[full_paths] )) identifier[static] . identifier[setup] ()
def init(scope, app, settings): """Plugin for serving static files in development mode""" cfg = settings.get('rw.static', {}) static = Static() scope['static'] = static scope['template_env'].globals['static'] = static for (base_uri, sources) in cfg.items(): full_paths = [] for source in sources: if isinstance(source, dict): full_path = source['path'] full_paths.append(full_path.format(**os.environ)) continue # depends on [control=['if'], data=[]] elif ',' in source: (module_name, path) = [part.strip() for part in source.split(',')] # depends on [control=['if'], data=['source']] else: module_name = source path = 'static' full_path = pkg_resources.resource_filename(module_name, path) full_paths.append(full_path) # depends on [control=['for'], data=['source']] app.root.mount('/' + base_uri + '/<h>/<path:path>', StaticHandler, {'path': full_paths}, name='static_' + base_uri.replace('.', '_')) static.handlers.append((base_uri, StaticHandler, full_paths)) # depends on [control=['for'], data=[]] static.setup()
def parse_member( cls, obj: dict, collection: "DtsCollection", direction: str, **additional_parameters) -> List["DtsCollection"]: """ Parse the member value of a Collection response and returns the list of object while setting the graph relationship based on `direction` :param obj: PyLD parsed JSON+LD :param collection: Collection attached to the member property :param direction: Direction of the member (children, parent) """ members = [] for member in obj.get(str(_hyd.member), []): subcollection = cls.parse(member, **additional_parameters) if direction == "children": subcollection.parents.update({collection}) members.append(subcollection) return members
def function[parse_member, parameter[cls, obj, collection, direction]]: constant[ Parse the member value of a Collection response and returns the list of object while setting the graph relationship based on `direction` :param obj: PyLD parsed JSON+LD :param collection: Collection attached to the member property :param direction: Direction of the member (children, parent) ] variable[members] assign[=] list[[]] for taget[name[member]] in starred[call[name[obj].get, parameter[call[name[str], parameter[name[_hyd].member]], list[[]]]]] begin[:] variable[subcollection] assign[=] call[name[cls].parse, parameter[name[member]]] if compare[name[direction] equal[==] constant[children]] begin[:] call[name[subcollection].parents.update, parameter[<ast.Set object at 0x7da20e9547f0>]] call[name[members].append, parameter[name[subcollection]]] return[name[members]]
keyword[def] identifier[parse_member] ( identifier[cls] , identifier[obj] : identifier[dict] , identifier[collection] : literal[string] , identifier[direction] : identifier[str] , ** identifier[additional_parameters] )-> identifier[List] [ literal[string] ]: literal[string] identifier[members] =[] keyword[for] identifier[member] keyword[in] identifier[obj] . identifier[get] ( identifier[str] ( identifier[_hyd] . identifier[member] ),[]): identifier[subcollection] = identifier[cls] . identifier[parse] ( identifier[member] ,** identifier[additional_parameters] ) keyword[if] identifier[direction] == literal[string] : identifier[subcollection] . identifier[parents] . identifier[update] ({ identifier[collection] }) identifier[members] . identifier[append] ( identifier[subcollection] ) keyword[return] identifier[members]
def parse_member(cls, obj: dict, collection: 'DtsCollection', direction: str, **additional_parameters) -> List['DtsCollection']: """ Parse the member value of a Collection response and returns the list of object while setting the graph relationship based on `direction` :param obj: PyLD parsed JSON+LD :param collection: Collection attached to the member property :param direction: Direction of the member (children, parent) """ members = [] for member in obj.get(str(_hyd.member), []): subcollection = cls.parse(member, **additional_parameters) if direction == 'children': subcollection.parents.update({collection}) # depends on [control=['if'], data=[]] members.append(subcollection) # depends on [control=['for'], data=['member']] return members
def _escape_identifiers(self, item): """ This function escapes column and table names @param item: """ if self._escape_char == '': return item for field in self._reserved_identifiers: if item.find('.%s' % field) != -1: _str = "%s%s" % (self._escape_char, item.replace('.', '%s.' % self._escape_char)) # remove duplicates if the user already included the escape return re.sub(r'[%s]+'%self._escape_char, self._escape_char, _str) if item.find('.') != -1: _str = "%s%s%s" % (self._escape_char, item.replace('.', '%s.%s'%(self._escape_char, self._escape_char)), self._escape_char) else: _str = self._escape_char+item+self._escape_char # remove duplicates if the user already included the escape return re.sub(r'[%s]+'%self._escape_char, self._escape_char, _str)
def function[_escape_identifiers, parameter[self, item]]: constant[ This function escapes column and table names @param item: ] if compare[name[self]._escape_char equal[==] constant[]] begin[:] return[name[item]] for taget[name[field]] in starred[name[self]._reserved_identifiers] begin[:] if compare[call[name[item].find, parameter[binary_operation[constant[.%s] <ast.Mod object at 0x7da2590d6920> name[field]]]] not_equal[!=] <ast.UnaryOp object at 0x7da18ede63e0>] begin[:] variable[_str] assign[=] binary_operation[constant[%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18ede6410>, <ast.Call object at 0x7da18ede4ee0>]]] return[call[name[re].sub, parameter[binary_operation[constant[[%s]+] <ast.Mod object at 0x7da2590d6920> name[self]._escape_char], name[self]._escape_char, name[_str]]]] if compare[call[name[item].find, parameter[constant[.]]] not_equal[!=] <ast.UnaryOp object at 0x7da18ede4370>] begin[:] variable[_str] assign[=] binary_operation[constant[%s%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18ede69b0>, <ast.Call object at 0x7da18ede79a0>, <ast.Attribute object at 0x7da18ede4070>]]] return[call[name[re].sub, parameter[binary_operation[constant[[%s]+] <ast.Mod object at 0x7da2590d6920> name[self]._escape_char], name[self]._escape_char, name[_str]]]]
keyword[def] identifier[_escape_identifiers] ( identifier[self] , identifier[item] ): literal[string] keyword[if] identifier[self] . identifier[_escape_char] == literal[string] : keyword[return] identifier[item] keyword[for] identifier[field] keyword[in] identifier[self] . identifier[_reserved_identifiers] : keyword[if] identifier[item] . identifier[find] ( literal[string] % identifier[field] )!=- literal[int] : identifier[_str] = literal[string] %( identifier[self] . identifier[_escape_char] , identifier[item] . identifier[replace] ( literal[string] , literal[string] % identifier[self] . identifier[_escape_char] )) keyword[return] identifier[re] . identifier[sub] ( literal[string] % identifier[self] . identifier[_escape_char] , identifier[self] . identifier[_escape_char] , identifier[_str] ) keyword[if] identifier[item] . identifier[find] ( literal[string] )!=- literal[int] : identifier[_str] = literal[string] %( identifier[self] . identifier[_escape_char] , identifier[item] . identifier[replace] ( literal[string] , literal[string] %( identifier[self] . identifier[_escape_char] , identifier[self] . identifier[_escape_char] )), identifier[self] . identifier[_escape_char] ) keyword[else] : identifier[_str] = identifier[self] . identifier[_escape_char] + identifier[item] + identifier[self] . identifier[_escape_char] keyword[return] identifier[re] . identifier[sub] ( literal[string] % identifier[self] . identifier[_escape_char] , identifier[self] . identifier[_escape_char] , identifier[_str] )
def _escape_identifiers(self, item): """ This function escapes column and table names @param item: """ if self._escape_char == '': return item # depends on [control=['if'], data=[]] for field in self._reserved_identifiers: if item.find('.%s' % field) != -1: _str = '%s%s' % (self._escape_char, item.replace('.', '%s.' % self._escape_char)) # remove duplicates if the user already included the escape return re.sub('[%s]+' % self._escape_char, self._escape_char, _str) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['field']] if item.find('.') != -1: _str = '%s%s%s' % (self._escape_char, item.replace('.', '%s.%s' % (self._escape_char, self._escape_char)), self._escape_char) # depends on [control=['if'], data=[]] else: _str = self._escape_char + item + self._escape_char # remove duplicates if the user already included the escape return re.sub('[%s]+' % self._escape_char, self._escape_char, _str)
def unpack_systemtime(self, offset): """ Returns a datetime from the QWORD Windows SYSTEMTIME timestamp starting at the relative offset. See http://msdn.microsoft.com/en-us/library/ms724950%28VS.85%29.aspx Arguments: - `offset`: The relative offset from the start of the block. Throws: - `OverrunBufferException` """ o = self._offset + offset try: parts = struct.unpack_from("<HHHHHHHH", self._buf, o) except struct.error: raise OverrunBufferException(o, len(self._buf)) return datetime(parts[0], parts[1], parts[3], # skip part 2 (day of week) parts[4], parts[5], parts[6], parts[7])
def function[unpack_systemtime, parameter[self, offset]]: constant[ Returns a datetime from the QWORD Windows SYSTEMTIME timestamp starting at the relative offset. See http://msdn.microsoft.com/en-us/library/ms724950%28VS.85%29.aspx Arguments: - `offset`: The relative offset from the start of the block. Throws: - `OverrunBufferException` ] variable[o] assign[=] binary_operation[name[self]._offset + name[offset]] <ast.Try object at 0x7da1b20a9c90> return[call[name[datetime], parameter[call[name[parts]][constant[0]], call[name[parts]][constant[1]], call[name[parts]][constant[3]], call[name[parts]][constant[4]], call[name[parts]][constant[5]], call[name[parts]][constant[6]], call[name[parts]][constant[7]]]]]
keyword[def] identifier[unpack_systemtime] ( identifier[self] , identifier[offset] ): literal[string] identifier[o] = identifier[self] . identifier[_offset] + identifier[offset] keyword[try] : identifier[parts] = identifier[struct] . identifier[unpack_from] ( literal[string] , identifier[self] . identifier[_buf] , identifier[o] ) keyword[except] identifier[struct] . identifier[error] : keyword[raise] identifier[OverrunBufferException] ( identifier[o] , identifier[len] ( identifier[self] . identifier[_buf] )) keyword[return] identifier[datetime] ( identifier[parts] [ literal[int] ], identifier[parts] [ literal[int] ], identifier[parts] [ literal[int] ], identifier[parts] [ literal[int] ], identifier[parts] [ literal[int] ], identifier[parts] [ literal[int] ], identifier[parts] [ literal[int] ])
def unpack_systemtime(self, offset): """ Returns a datetime from the QWORD Windows SYSTEMTIME timestamp starting at the relative offset. See http://msdn.microsoft.com/en-us/library/ms724950%28VS.85%29.aspx Arguments: - `offset`: The relative offset from the start of the block. Throws: - `OverrunBufferException` """ o = self._offset + offset try: parts = struct.unpack_from('<HHHHHHHH', self._buf, o) # depends on [control=['try'], data=[]] except struct.error: raise OverrunBufferException(o, len(self._buf)) # depends on [control=['except'], data=[]] # skip part 2 (day of week) return datetime(parts[0], parts[1], parts[3], parts[4], parts[5], parts[6], parts[7])
def collections(cloud=None, api_key=None, version=None, **kwargs): """ This is a status report endpoint. It is used to get the status on all of the collections currently trained, as well as some basic statistics on their accuracies. Inputs api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. Example usage: .. code-block:: python >>> collections = indicoio.collections() { "tag_predictor": { "input_type": "text", "model_type": "classification", "number_of_samples": 224 'status': 'ready' }, "popularity_predictor": { "input_type": "text", "model_type": "regression", "number_of_samples": 231 'status': 'training' } } } """ url_params = {"batch": False, "api_key": api_key, "version": version, "method": "collections"} return api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs)
def function[collections, parameter[cloud, api_key, version]]: constant[ This is a status report endpoint. It is used to get the status on all of the collections currently trained, as well as some basic statistics on their accuracies. Inputs api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. Example usage: .. code-block:: python >>> collections = indicoio.collections() { "tag_predictor": { "input_type": "text", "model_type": "classification", "number_of_samples": 224 'status': 'ready' }, "popularity_predictor": { "input_type": "text", "model_type": "regression", "number_of_samples": 231 'status': 'training' } } } ] variable[url_params] assign[=] dictionary[[<ast.Constant object at 0x7da207f03c10>, <ast.Constant object at 0x7da207f01b70>, <ast.Constant object at 0x7da207f02200>, <ast.Constant object at 0x7da207f02ce0>], [<ast.Constant object at 0x7da207f02650>, <ast.Name object at 0x7da207f010f0>, <ast.Name object at 0x7da207f03ac0>, <ast.Constant object at 0x7da207f00c70>]] return[call[name[api_handler], parameter[constant[None]]]]
keyword[def] identifier[collections] ( identifier[cloud] = keyword[None] , identifier[api_key] = keyword[None] , identifier[version] = keyword[None] ,** identifier[kwargs] ): literal[string] identifier[url_params] ={ literal[string] : keyword[False] , literal[string] : identifier[api_key] , literal[string] : identifier[version] , literal[string] : literal[string] } keyword[return] identifier[api_handler] ( keyword[None] , identifier[cloud] = identifier[cloud] , identifier[api] = literal[string] , identifier[url_params] = identifier[url_params] ,** identifier[kwargs] )
def collections(cloud=None, api_key=None, version=None, **kwargs): """ This is a status report endpoint. It is used to get the status on all of the collections currently trained, as well as some basic statistics on their accuracies. Inputs api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. Example usage: .. code-block:: python >>> collections = indicoio.collections() { "tag_predictor": { "input_type": "text", "model_type": "classification", "number_of_samples": 224 'status': 'ready' }, "popularity_predictor": { "input_type": "text", "model_type": "regression", "number_of_samples": 231 'status': 'training' } } } """ url_params = {'batch': False, 'api_key': api_key, 'version': version, 'method': 'collections'} return api_handler(None, cloud=cloud, api='custom', url_params=url_params, **kwargs)
def jbcorrelation(sites_or_distances, imt, vs30_clustering=False): """ Returns the Jayaram-Baker correlation model. :param sites_or_distances: SiteCollection instance o ristance matrix :param imt: Intensity Measure Type (PGA or SA) :param vs30_clustering: flag, defalt false """ if hasattr(sites_or_distances, 'mesh'): distances = sites_or_distances.mesh.get_distance_matrix() else: distances = sites_or_distances # formulae are from page 1700 if imt.period < 1: if not vs30_clustering: # case 1, eq. (17) b = 8.5 + 17.2 * imt.period else: # case 2, eq. (18) b = 40.7 - 15.0 * imt.period else: # both cases, eq. (19) b = 22.0 + 3.7 * imt.period # eq. (20) return numpy.exp((- 3.0 / b) * distances)
def function[jbcorrelation, parameter[sites_or_distances, imt, vs30_clustering]]: constant[ Returns the Jayaram-Baker correlation model. :param sites_or_distances: SiteCollection instance o ristance matrix :param imt: Intensity Measure Type (PGA or SA) :param vs30_clustering: flag, defalt false ] if call[name[hasattr], parameter[name[sites_or_distances], constant[mesh]]] begin[:] variable[distances] assign[=] call[name[sites_or_distances].mesh.get_distance_matrix, parameter[]] if compare[name[imt].period less[<] constant[1]] begin[:] if <ast.UnaryOp object at 0x7da20e956980> begin[:] variable[b] assign[=] binary_operation[constant[8.5] + binary_operation[constant[17.2] * name[imt].period]] return[call[name[numpy].exp, parameter[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da20e955690> / name[b]] * name[distances]]]]]
keyword[def] identifier[jbcorrelation] ( identifier[sites_or_distances] , identifier[imt] , identifier[vs30_clustering] = keyword[False] ): literal[string] keyword[if] identifier[hasattr] ( identifier[sites_or_distances] , literal[string] ): identifier[distances] = identifier[sites_or_distances] . identifier[mesh] . identifier[get_distance_matrix] () keyword[else] : identifier[distances] = identifier[sites_or_distances] keyword[if] identifier[imt] . identifier[period] < literal[int] : keyword[if] keyword[not] identifier[vs30_clustering] : identifier[b] = literal[int] + literal[int] * identifier[imt] . identifier[period] keyword[else] : identifier[b] = literal[int] - literal[int] * identifier[imt] . identifier[period] keyword[else] : identifier[b] = literal[int] + literal[int] * identifier[imt] . identifier[period] keyword[return] identifier[numpy] . identifier[exp] ((- literal[int] / identifier[b] )* identifier[distances] )
def jbcorrelation(sites_or_distances, imt, vs30_clustering=False): """ Returns the Jayaram-Baker correlation model. :param sites_or_distances: SiteCollection instance o ristance matrix :param imt: Intensity Measure Type (PGA or SA) :param vs30_clustering: flag, defalt false """ if hasattr(sites_or_distances, 'mesh'): distances = sites_or_distances.mesh.get_distance_matrix() # depends on [control=['if'], data=[]] else: distances = sites_or_distances # formulae are from page 1700 if imt.period < 1: if not vs30_clustering: # case 1, eq. (17) b = 8.5 + 17.2 * imt.period # depends on [control=['if'], data=[]] else: # case 2, eq. (18) b = 40.7 - 15.0 * imt.period # depends on [control=['if'], data=[]] else: # both cases, eq. (19) b = 22.0 + 3.7 * imt.period # eq. (20) return numpy.exp(-3.0 / b * distances)
def delete(ctx, opts, owner_repo_package, yes): """ Delete a package from a repository. - OWNER/REPO/PACKAGE: Specify the OWNER namespace (i.e. user or org), the REPO name where the package is stored, and the PACKAGE name (slug) of the package itself. All separated by a slash. Example: 'your-org/awesome-repo/better-pkg'. """ owner, repo, slug = owner_repo_package delete_args = { "owner": click.style(owner, bold=True), "repo": click.style(repo, bold=True), "package": click.style(slug, bold=True), } prompt = "delete the %(package)s from %(owner)s/%(repo)s" % delete_args if not utils.confirm_operation(prompt, assume_yes=yes): return click.echo( "Deleting %(package)s from %(owner)s/%(repo)s ... " % delete_args, nl=False ) context_msg = "Failed to delete the package!" with handle_api_exceptions(ctx, opts=opts, context_msg=context_msg): with maybe_spinner(opts): delete_package(owner=owner, repo=repo, identifier=slug) click.secho("OK", fg="green")
def function[delete, parameter[ctx, opts, owner_repo_package, yes]]: constant[ Delete a package from a repository. - OWNER/REPO/PACKAGE: Specify the OWNER namespace (i.e. user or org), the REPO name where the package is stored, and the PACKAGE name (slug) of the package itself. All separated by a slash. Example: 'your-org/awesome-repo/better-pkg'. ] <ast.Tuple object at 0x7da1b191de70> assign[=] name[owner_repo_package] variable[delete_args] assign[=] dictionary[[<ast.Constant object at 0x7da1b1802770>, <ast.Constant object at 0x7da1b1802740>, <ast.Constant object at 0x7da1b1802710>], [<ast.Call object at 0x7da1b18026e0>, <ast.Call object at 0x7da1b196b250>, <ast.Call object at 0x7da1b196b5b0>]] variable[prompt] assign[=] binary_operation[constant[delete the %(package)s from %(owner)s/%(repo)s] <ast.Mod object at 0x7da2590d6920> name[delete_args]] if <ast.UnaryOp object at 0x7da1b19681c0> begin[:] return[None] call[name[click].echo, parameter[binary_operation[constant[Deleting %(package)s from %(owner)s/%(repo)s ... ] <ast.Mod object at 0x7da2590d6920> name[delete_args]]]] variable[context_msg] assign[=] constant[Failed to delete the package!] with call[name[handle_api_exceptions], parameter[name[ctx]]] begin[:] with call[name[maybe_spinner], parameter[name[opts]]] begin[:] call[name[delete_package], parameter[]] call[name[click].secho, parameter[constant[OK]]]
keyword[def] identifier[delete] ( identifier[ctx] , identifier[opts] , identifier[owner_repo_package] , identifier[yes] ): literal[string] identifier[owner] , identifier[repo] , identifier[slug] = identifier[owner_repo_package] identifier[delete_args] ={ literal[string] : identifier[click] . identifier[style] ( identifier[owner] , identifier[bold] = keyword[True] ), literal[string] : identifier[click] . identifier[style] ( identifier[repo] , identifier[bold] = keyword[True] ), literal[string] : identifier[click] . identifier[style] ( identifier[slug] , identifier[bold] = keyword[True] ), } identifier[prompt] = literal[string] % identifier[delete_args] keyword[if] keyword[not] identifier[utils] . identifier[confirm_operation] ( identifier[prompt] , identifier[assume_yes] = identifier[yes] ): keyword[return] identifier[click] . identifier[echo] ( literal[string] % identifier[delete_args] , identifier[nl] = keyword[False] ) identifier[context_msg] = literal[string] keyword[with] identifier[handle_api_exceptions] ( identifier[ctx] , identifier[opts] = identifier[opts] , identifier[context_msg] = identifier[context_msg] ): keyword[with] identifier[maybe_spinner] ( identifier[opts] ): identifier[delete_package] ( identifier[owner] = identifier[owner] , identifier[repo] = identifier[repo] , identifier[identifier] = identifier[slug] ) identifier[click] . identifier[secho] ( literal[string] , identifier[fg] = literal[string] )
def delete(ctx, opts, owner_repo_package, yes): """ Delete a package from a repository. - OWNER/REPO/PACKAGE: Specify the OWNER namespace (i.e. user or org), the REPO name where the package is stored, and the PACKAGE name (slug) of the package itself. All separated by a slash. Example: 'your-org/awesome-repo/better-pkg'. """ (owner, repo, slug) = owner_repo_package delete_args = {'owner': click.style(owner, bold=True), 'repo': click.style(repo, bold=True), 'package': click.style(slug, bold=True)} prompt = 'delete the %(package)s from %(owner)s/%(repo)s' % delete_args if not utils.confirm_operation(prompt, assume_yes=yes): return # depends on [control=['if'], data=[]] click.echo('Deleting %(package)s from %(owner)s/%(repo)s ... ' % delete_args, nl=False) context_msg = 'Failed to delete the package!' with handle_api_exceptions(ctx, opts=opts, context_msg=context_msg): with maybe_spinner(opts): delete_package(owner=owner, repo=repo, identifier=slug) # depends on [control=['with'], data=[]] # depends on [control=['with'], data=[]] click.secho('OK', fg='green')
def open_hist(self): """ Open the HIST file located in the in self.outdir. Returns :class:`HistFile` object, None if file could not be found or file is not readable. """ if not self.hist_path: if self.status == self.S_OK: logger.critical("%s reached S_OK but didn't produce a HIST file in %s" % (self, self.outdir)) return None # Open the HIST file from abipy.dynamics.hist import HistFile try: return HistFile(self.hist_path) except Exception as exc: logger.critical("Exception while reading HIST file at %s:\n%s" % (self.hist_path, str(exc))) return None
def function[open_hist, parameter[self]]: constant[ Open the HIST file located in the in self.outdir. Returns :class:`HistFile` object, None if file could not be found or file is not readable. ] if <ast.UnaryOp object at 0x7da18dc98760> begin[:] if compare[name[self].status equal[==] name[self].S_OK] begin[:] call[name[logger].critical, parameter[binary_operation[constant[%s reached S_OK but didn't produce a HIST file in %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18dc99d50>, <ast.Attribute object at 0x7da18dc98310>]]]]] return[constant[None]] from relative_module[abipy.dynamics.hist] import module[HistFile] <ast.Try object at 0x7da18dc9a8c0>
keyword[def] identifier[open_hist] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[hist_path] : keyword[if] identifier[self] . identifier[status] == identifier[self] . identifier[S_OK] : identifier[logger] . identifier[critical] ( literal[string] %( identifier[self] , identifier[self] . identifier[outdir] )) keyword[return] keyword[None] keyword[from] identifier[abipy] . identifier[dynamics] . identifier[hist] keyword[import] identifier[HistFile] keyword[try] : keyword[return] identifier[HistFile] ( identifier[self] . identifier[hist_path] ) keyword[except] identifier[Exception] keyword[as] identifier[exc] : identifier[logger] . identifier[critical] ( literal[string] %( identifier[self] . identifier[hist_path] , identifier[str] ( identifier[exc] ))) keyword[return] keyword[None]
def open_hist(self): """ Open the HIST file located in the in self.outdir. Returns :class:`HistFile` object, None if file could not be found or file is not readable. """ if not self.hist_path: if self.status == self.S_OK: logger.critical("%s reached S_OK but didn't produce a HIST file in %s" % (self, self.outdir)) # depends on [control=['if'], data=[]] return None # depends on [control=['if'], data=[]] # Open the HIST file from abipy.dynamics.hist import HistFile try: return HistFile(self.hist_path) # depends on [control=['try'], data=[]] except Exception as exc: logger.critical('Exception while reading HIST file at %s:\n%s' % (self.hist_path, str(exc))) return None # depends on [control=['except'], data=['exc']]
def convert_date(obj): """Returns a DATE column as a date object: >>> date_or_None('2007-02-26') datetime.date(2007, 2, 26) Illegal values are returned as None: >>> date_or_None('2007-02-31') is None True >>> date_or_None('0000-00-00') is None True """ try: return datetime.date(*[ int(x) for x in obj.split('-', 2) ]) except ValueError: return None
def function[convert_date, parameter[obj]]: constant[Returns a DATE column as a date object: >>> date_or_None('2007-02-26') datetime.date(2007, 2, 26) Illegal values are returned as None: >>> date_or_None('2007-02-31') is None True >>> date_or_None('0000-00-00') is None True ] <ast.Try object at 0x7da18f09c370>
keyword[def] identifier[convert_date] ( identifier[obj] ): literal[string] keyword[try] : keyword[return] identifier[datetime] . identifier[date] (*[ identifier[int] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[obj] . identifier[split] ( literal[string] , literal[int] )]) keyword[except] identifier[ValueError] : keyword[return] keyword[None]
def convert_date(obj): """Returns a DATE column as a date object: >>> date_or_None('2007-02-26') datetime.date(2007, 2, 26) Illegal values are returned as None: >>> date_or_None('2007-02-31') is None True >>> date_or_None('0000-00-00') is None True """ try: return datetime.date(*[int(x) for x in obj.split('-', 2)]) # depends on [control=['try'], data=[]] except ValueError: return None # depends on [control=['except'], data=[]]
def health(): """Check the health of this service.""" up_time = time.time() - START_TIME response = dict(service=__service_id__, uptime='{:.2f}s'.format(up_time)) return response, HTTPStatus.OK
def function[health, parameter[]]: constant[Check the health of this service.] variable[up_time] assign[=] binary_operation[call[name[time].time, parameter[]] - name[START_TIME]] variable[response] assign[=] call[name[dict], parameter[]] return[tuple[[<ast.Name object at 0x7da1b05fb640>, <ast.Attribute object at 0x7da1b05f96f0>]]]
keyword[def] identifier[health] (): literal[string] identifier[up_time] = identifier[time] . identifier[time] ()- identifier[START_TIME] identifier[response] = identifier[dict] ( identifier[service] = identifier[__service_id__] , identifier[uptime] = literal[string] . identifier[format] ( identifier[up_time] )) keyword[return] identifier[response] , identifier[HTTPStatus] . identifier[OK]
def health(): """Check the health of this service.""" up_time = time.time() - START_TIME response = dict(service=__service_id__, uptime='{:.2f}s'.format(up_time)) return (response, HTTPStatus.OK)
def project_create_notif(self, tenant_id, tenant_name): """Tenant Create notification. """ if not self.fw_init: return self.os_helper.create_router('_'.join([fw_constants.TENANT_EDGE_RTR, tenant_name]), tenant_id, [])
def function[project_create_notif, parameter[self, tenant_id, tenant_name]]: constant[Tenant Create notification. ] if <ast.UnaryOp object at 0x7da1b1be5e40> begin[:] return[None] call[name[self].os_helper.create_router, parameter[call[constant[_].join, parameter[list[[<ast.Attribute object at 0x7da1b1be5d50>, <ast.Name object at 0x7da1b1be50c0>]]]], name[tenant_id], list[[]]]]
keyword[def] identifier[project_create_notif] ( identifier[self] , identifier[tenant_id] , identifier[tenant_name] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[fw_init] : keyword[return] identifier[self] . identifier[os_helper] . identifier[create_router] ( literal[string] . identifier[join] ([ identifier[fw_constants] . identifier[TENANT_EDGE_RTR] , identifier[tenant_name] ]), identifier[tenant_id] ,[])
def project_create_notif(self, tenant_id, tenant_name): """Tenant Create notification. """ if not self.fw_init: return # depends on [control=['if'], data=[]] self.os_helper.create_router('_'.join([fw_constants.TENANT_EDGE_RTR, tenant_name]), tenant_id, [])
def pem_as_string(cert): """ Only return False if the certificate is a file path. Otherwise it is a file object or raw string and will need to be fed to the file open context. """ if hasattr(cert, 'read'): # File object - return as is return cert cert = cert.encode('utf-8') if isinstance(cert, unicode) else cert if re.match(_PEM_RE, cert): return True return False
def function[pem_as_string, parameter[cert]]: constant[ Only return False if the certificate is a file path. Otherwise it is a file object or raw string and will need to be fed to the file open context. ] if call[name[hasattr], parameter[name[cert], constant[read]]] begin[:] return[name[cert]] variable[cert] assign[=] <ast.IfExp object at 0x7da1b1bc2e30> if call[name[re].match, parameter[name[_PEM_RE], name[cert]]] begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[pem_as_string] ( identifier[cert] ): literal[string] keyword[if] identifier[hasattr] ( identifier[cert] , literal[string] ): keyword[return] identifier[cert] identifier[cert] = identifier[cert] . identifier[encode] ( literal[string] ) keyword[if] identifier[isinstance] ( identifier[cert] , identifier[unicode] ) keyword[else] identifier[cert] keyword[if] identifier[re] . identifier[match] ( identifier[_PEM_RE] , identifier[cert] ): keyword[return] keyword[True] keyword[return] keyword[False]
def pem_as_string(cert): """ Only return False if the certificate is a file path. Otherwise it is a file object or raw string and will need to be fed to the file open context. """ if hasattr(cert, 'read'): # File object - return as is return cert # depends on [control=['if'], data=[]] cert = cert.encode('utf-8') if isinstance(cert, unicode) else cert if re.match(_PEM_RE, cert): return True # depends on [control=['if'], data=[]] return False