code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def set_baseline(self, version): """Set the baseline into the creation information table version: str The version of the current database to set in the information table. The baseline must be in the format x.x.x where x are numbers. """ pattern = re.compile(r"^\d+\.\d+\.\d+$") if not re.match(pattern, version): raise ValueError('Wrong version format') query = """ INSERT INTO {} ( version, description, type, script, checksum, installed_by, execution_time, success ) VALUES( '{}', '{}', {}, '{}', '{}', '{}', 1, TRUE ) """.format(self.upgrades_table, version, 'baseline', 0, '', '', self.__get_dbuser()) self.cursor.execute(query) self.connection.commit()
def function[set_baseline, parameter[self, version]]: constant[Set the baseline into the creation information table version: str The version of the current database to set in the information table. The baseline must be in the format x.x.x where x are numbers. ] variable[pattern] assign[=] call[name[re].compile, parameter[constant[^\d+\.\d+\.\d+$]]] if <ast.UnaryOp object at 0x7da204344430> begin[:] <ast.Raise object at 0x7da204346260> variable[query] assign[=] call[constant[ INSERT INTO {} ( version, description, type, script, checksum, installed_by, execution_time, success ) VALUES( '{}', '{}', {}, '{}', '{}', '{}', 1, TRUE ) ].format, parameter[name[self].upgrades_table, name[version], constant[baseline], constant[0], constant[], constant[], call[name[self].__get_dbuser, parameter[]]]] call[name[self].cursor.execute, parameter[name[query]]] call[name[self].connection.commit, parameter[]]
keyword[def] identifier[set_baseline] ( identifier[self] , identifier[version] ): literal[string] identifier[pattern] = identifier[re] . identifier[compile] ( literal[string] ) keyword[if] keyword[not] identifier[re] . identifier[match] ( identifier[pattern] , identifier[version] ): keyword[raise] identifier[ValueError] ( literal[string] ) identifier[query] = literal[string] . identifier[format] ( identifier[self] . identifier[upgrades_table] , identifier[version] , literal[string] , literal[int] , literal[string] , literal[string] , identifier[self] . identifier[__get_dbuser] ()) identifier[self] . identifier[cursor] . identifier[execute] ( identifier[query] ) identifier[self] . identifier[connection] . identifier[commit] ()
def set_baseline(self, version): """Set the baseline into the creation information table version: str The version of the current database to set in the information table. The baseline must be in the format x.x.x where x are numbers. """ pattern = re.compile('^\\d+\\.\\d+\\.\\d+$') if not re.match(pattern, version): raise ValueError('Wrong version format') # depends on [control=['if'], data=[]] query = "\n INSERT INTO {} (\n version,\n description,\n type,\n script,\n checksum,\n installed_by,\n execution_time,\n success\n ) VALUES(\n '{}',\n '{}',\n {},\n '{}',\n '{}',\n '{}',\n 1,\n TRUE\n ) ".format(self.upgrades_table, version, 'baseline', 0, '', '', self.__get_dbuser()) self.cursor.execute(query) self.connection.commit()
def _get_join_indexers(self): """ return the join indexers """ return _get_join_indexers(self.left_join_keys, self.right_join_keys, sort=self.sort, how=self.how)
def function[_get_join_indexers, parameter[self]]: constant[ return the join indexers ] return[call[name[_get_join_indexers], parameter[name[self].left_join_keys, name[self].right_join_keys]]]
keyword[def] identifier[_get_join_indexers] ( identifier[self] ): literal[string] keyword[return] identifier[_get_join_indexers] ( identifier[self] . identifier[left_join_keys] , identifier[self] . identifier[right_join_keys] , identifier[sort] = identifier[self] . identifier[sort] , identifier[how] = identifier[self] . identifier[how] )
def _get_join_indexers(self): """ return the join indexers """ return _get_join_indexers(self.left_join_keys, self.right_join_keys, sort=self.sort, how=self.how)
def load_pa11y_ignore_rules(file=None, url=None): # pylint: disable=redefined-builtin """ Load the pa11y ignore rules from the given file or URL. """ if not file and not url: return None if file: file = Path(file) if not file.isfile(): msg = ( u"pa11y_ignore_rules_file specified, but file does not exist! {file}" ).format(file=file) raise ValueError(msg) return yaml.safe_load(file.text()) # must be URL resp = requests.get(url) if not resp.ok: msg = ( u"pa11y_ignore_rules_url specified, but failed to fetch URL. status={status}" ).format(status=resp.status_code) err = RuntimeError(msg) err.response = resp raise err return yaml.safe_load(resp.text)
def function[load_pa11y_ignore_rules, parameter[file, url]]: constant[ Load the pa11y ignore rules from the given file or URL. ] if <ast.BoolOp object at 0x7da20c6aa5c0> begin[:] return[constant[None]] if name[file] begin[:] variable[file] assign[=] call[name[Path], parameter[name[file]]] if <ast.UnaryOp object at 0x7da20c6aba00> begin[:] variable[msg] assign[=] call[constant[pa11y_ignore_rules_file specified, but file does not exist! {file}].format, parameter[]] <ast.Raise object at 0x7da20c6a95a0> return[call[name[yaml].safe_load, parameter[call[name[file].text, parameter[]]]]] variable[resp] assign[=] call[name[requests].get, parameter[name[url]]] if <ast.UnaryOp object at 0x7da20c6abb20> begin[:] variable[msg] assign[=] call[constant[pa11y_ignore_rules_url specified, but failed to fetch URL. status={status}].format, parameter[]] variable[err] assign[=] call[name[RuntimeError], parameter[name[msg]]] name[err].response assign[=] name[resp] <ast.Raise object at 0x7da20c6a8730> return[call[name[yaml].safe_load, parameter[name[resp].text]]]
keyword[def] identifier[load_pa11y_ignore_rules] ( identifier[file] = keyword[None] , identifier[url] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[file] keyword[and] keyword[not] identifier[url] : keyword[return] keyword[None] keyword[if] identifier[file] : identifier[file] = identifier[Path] ( identifier[file] ) keyword[if] keyword[not] identifier[file] . identifier[isfile] (): identifier[msg] =( literal[string] ). identifier[format] ( identifier[file] = identifier[file] ) keyword[raise] identifier[ValueError] ( identifier[msg] ) keyword[return] identifier[yaml] . identifier[safe_load] ( identifier[file] . identifier[text] ()) identifier[resp] = identifier[requests] . identifier[get] ( identifier[url] ) keyword[if] keyword[not] identifier[resp] . identifier[ok] : identifier[msg] =( literal[string] ). identifier[format] ( identifier[status] = identifier[resp] . identifier[status_code] ) identifier[err] = identifier[RuntimeError] ( identifier[msg] ) identifier[err] . identifier[response] = identifier[resp] keyword[raise] identifier[err] keyword[return] identifier[yaml] . identifier[safe_load] ( identifier[resp] . identifier[text] )
def load_pa11y_ignore_rules(file=None, url=None): # pylint: disable=redefined-builtin '\n Load the pa11y ignore rules from the given file or URL.\n ' if not file and (not url): return None # depends on [control=['if'], data=[]] if file: file = Path(file) if not file.isfile(): msg = u'pa11y_ignore_rules_file specified, but file does not exist! {file}'.format(file=file) raise ValueError(msg) # depends on [control=['if'], data=[]] return yaml.safe_load(file.text()) # depends on [control=['if'], data=[]] # must be URL resp = requests.get(url) if not resp.ok: msg = u'pa11y_ignore_rules_url specified, but failed to fetch URL. status={status}'.format(status=resp.status_code) err = RuntimeError(msg) err.response = resp raise err # depends on [control=['if'], data=[]] return yaml.safe_load(resp.text)
def transformer_tpu_range(rhp): """Small range of hyperparameters.""" # After starting from base, set intervals for some parameters. rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE) rhp.set_discrete("learning_rate_warmup_steps", [1000, 2000, 4000, 8000, 16000]) rhp.set_float("initializer_gain", 0.5, 2.0) rhp.set_float("optimizer_adam_beta1", 0.85, 0.95) rhp.set_float("optimizer_adam_beta2", 0.97, 0.99) rhp.set_float("weight_decay", 0.0, 2.0)
def function[transformer_tpu_range, parameter[rhp]]: constant[Small range of hyperparameters.] call[name[rhp].set_float, parameter[constant[learning_rate], constant[0.3], constant[3.0]]] call[name[rhp].set_discrete, parameter[constant[learning_rate_warmup_steps], list[[<ast.Constant object at 0x7da1b201f9d0>, <ast.Constant object at 0x7da1b201caf0>, <ast.Constant object at 0x7da1b201da20>, <ast.Constant object at 0x7da1b201d3c0>, <ast.Constant object at 0x7da1b201e110>]]]] call[name[rhp].set_float, parameter[constant[initializer_gain], constant[0.5], constant[2.0]]] call[name[rhp].set_float, parameter[constant[optimizer_adam_beta1], constant[0.85], constant[0.95]]] call[name[rhp].set_float, parameter[constant[optimizer_adam_beta2], constant[0.97], constant[0.99]]] call[name[rhp].set_float, parameter[constant[weight_decay], constant[0.0], constant[2.0]]]
keyword[def] identifier[transformer_tpu_range] ( identifier[rhp] ): literal[string] identifier[rhp] . identifier[set_float] ( literal[string] , literal[int] , literal[int] , identifier[scale] = identifier[rhp] . identifier[LOG_SCALE] ) identifier[rhp] . identifier[set_discrete] ( literal[string] , [ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]) identifier[rhp] . identifier[set_float] ( literal[string] , literal[int] , literal[int] ) identifier[rhp] . identifier[set_float] ( literal[string] , literal[int] , literal[int] ) identifier[rhp] . identifier[set_float] ( literal[string] , literal[int] , literal[int] ) identifier[rhp] . identifier[set_float] ( literal[string] , literal[int] , literal[int] )
def transformer_tpu_range(rhp): """Small range of hyperparameters.""" # After starting from base, set intervals for some parameters. rhp.set_float('learning_rate', 0.3, 3.0, scale=rhp.LOG_SCALE) rhp.set_discrete('learning_rate_warmup_steps', [1000, 2000, 4000, 8000, 16000]) rhp.set_float('initializer_gain', 0.5, 2.0) rhp.set_float('optimizer_adam_beta1', 0.85, 0.95) rhp.set_float('optimizer_adam_beta2', 0.97, 0.99) rhp.set_float('weight_decay', 0.0, 2.0)
def execute_edit(args, root_dir=None): """Edit a existing queue command in the daemon. Args: args['key'] int: The key of the queue entry to be edited root_dir (string): The path to the root directory the daemon is running in. """ # Get editor EDITOR = os.environ.get('EDITOR', 'vim') # Get command from server key = args['key'] status = command_factory('status')({}, root_dir=root_dir) # Check if queue is not empty, the entry exists and is queued or stashed if not isinstance(status['data'], str) and key in status['data']: if status['data'][key]['status'] in ['queued', 'stashed']: command = status['data'][key]['command'] else: print("Entry is not 'queued' or 'stashed'") sys.exit(1) else: print('No entry with this key') sys.exit(1) with tempfile.NamedTemporaryFile(suffix=".tmp") as tf: tf.write(command.encode('utf-8')) tf.flush() call([EDITOR, tf.name]) # do the parsing with `tf` using regular File operations. # for instance: tf.seek(0) edited_command = tf.read().decode('utf-8') print_command_factory('edit')({ 'key': key, 'command': edited_command, }, root_dir=root_dir)
def function[execute_edit, parameter[args, root_dir]]: constant[Edit a existing queue command in the daemon. Args: args['key'] int: The key of the queue entry to be edited root_dir (string): The path to the root directory the daemon is running in. ] variable[EDITOR] assign[=] call[name[os].environ.get, parameter[constant[EDITOR], constant[vim]]] variable[key] assign[=] call[name[args]][constant[key]] variable[status] assign[=] call[call[name[command_factory], parameter[constant[status]]], parameter[dictionary[[], []]]] if <ast.BoolOp object at 0x7da1b0ebe140> begin[:] if compare[call[call[call[name[status]][constant[data]]][name[key]]][constant[status]] in list[[<ast.Constant object at 0x7da1b0ebf040>, <ast.Constant object at 0x7da1b0ebce80>]]] begin[:] variable[command] assign[=] call[call[call[name[status]][constant[data]]][name[key]]][constant[command]] with call[name[tempfile].NamedTemporaryFile, parameter[]] begin[:] call[name[tf].write, parameter[call[name[command].encode, parameter[constant[utf-8]]]]] call[name[tf].flush, parameter[]] call[name[call], parameter[list[[<ast.Name object at 0x7da1b0ebf820>, <ast.Attribute object at 0x7da1b0ebd840>]]]] call[name[tf].seek, parameter[constant[0]]] variable[edited_command] assign[=] call[call[name[tf].read, parameter[]].decode, parameter[constant[utf-8]]] call[call[name[print_command_factory], parameter[constant[edit]]], parameter[dictionary[[<ast.Constant object at 0x7da1b0e4c820>, <ast.Constant object at 0x7da1b0e4ded0>], [<ast.Name object at 0x7da1b0e4e350>, <ast.Name object at 0x7da1b0e4fc40>]]]]
keyword[def] identifier[execute_edit] ( identifier[args] , identifier[root_dir] = keyword[None] ): literal[string] identifier[EDITOR] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[string] ) identifier[key] = identifier[args] [ literal[string] ] identifier[status] = identifier[command_factory] ( literal[string] )({}, identifier[root_dir] = identifier[root_dir] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[status] [ literal[string] ], identifier[str] ) keyword[and] identifier[key] keyword[in] identifier[status] [ literal[string] ]: keyword[if] identifier[status] [ literal[string] ][ identifier[key] ][ literal[string] ] keyword[in] [ literal[string] , literal[string] ]: identifier[command] = identifier[status] [ literal[string] ][ identifier[key] ][ literal[string] ] keyword[else] : identifier[print] ( literal[string] ) identifier[sys] . identifier[exit] ( literal[int] ) keyword[else] : identifier[print] ( literal[string] ) identifier[sys] . identifier[exit] ( literal[int] ) keyword[with] identifier[tempfile] . identifier[NamedTemporaryFile] ( identifier[suffix] = literal[string] ) keyword[as] identifier[tf] : identifier[tf] . identifier[write] ( identifier[command] . identifier[encode] ( literal[string] )) identifier[tf] . identifier[flush] () identifier[call] ([ identifier[EDITOR] , identifier[tf] . identifier[name] ]) identifier[tf] . identifier[seek] ( literal[int] ) identifier[edited_command] = identifier[tf] . identifier[read] (). identifier[decode] ( literal[string] ) identifier[print_command_factory] ( literal[string] )({ literal[string] : identifier[key] , literal[string] : identifier[edited_command] , }, identifier[root_dir] = identifier[root_dir] )
def execute_edit(args, root_dir=None): """Edit a existing queue command in the daemon. Args: args['key'] int: The key of the queue entry to be edited root_dir (string): The path to the root directory the daemon is running in. """ # Get editor EDITOR = os.environ.get('EDITOR', 'vim') # Get command from server key = args['key'] status = command_factory('status')({}, root_dir=root_dir) # Check if queue is not empty, the entry exists and is queued or stashed if not isinstance(status['data'], str) and key in status['data']: if status['data'][key]['status'] in ['queued', 'stashed']: command = status['data'][key]['command'] # depends on [control=['if'], data=[]] else: print("Entry is not 'queued' or 'stashed'") sys.exit(1) # depends on [control=['if'], data=[]] else: print('No entry with this key') sys.exit(1) with tempfile.NamedTemporaryFile(suffix='.tmp') as tf: tf.write(command.encode('utf-8')) tf.flush() call([EDITOR, tf.name]) # do the parsing with `tf` using regular File operations. # for instance: tf.seek(0) edited_command = tf.read().decode('utf-8') # depends on [control=['with'], data=['tf']] print_command_factory('edit')({'key': key, 'command': edited_command}, root_dir=root_dir)
def get_string(ea): """Read the string at the given ea. This function uses IDA's string APIs and does not implement any special logic. """ # We get the item-head because the `GetStringType` function only works on the head of an item. string_type = idc.GetStringType(idaapi.get_item_head(ea)) if string_type is None: raise exceptions.SarkNoString("No string at 0x{:08X}".format(ea)) string = idc.GetString(ea, strtype=string_type) if not string: raise exceptions.SarkNoString("No string at 0x{:08X}".format(ea)) return string
def function[get_string, parameter[ea]]: constant[Read the string at the given ea. This function uses IDA's string APIs and does not implement any special logic. ] variable[string_type] assign[=] call[name[idc].GetStringType, parameter[call[name[idaapi].get_item_head, parameter[name[ea]]]]] if compare[name[string_type] is constant[None]] begin[:] <ast.Raise object at 0x7da1b12cc4f0> variable[string] assign[=] call[name[idc].GetString, parameter[name[ea]]] if <ast.UnaryOp object at 0x7da1b12cdff0> begin[:] <ast.Raise object at 0x7da1b12ce110> return[name[string]]
keyword[def] identifier[get_string] ( identifier[ea] ): literal[string] identifier[string_type] = identifier[idc] . identifier[GetStringType] ( identifier[idaapi] . identifier[get_item_head] ( identifier[ea] )) keyword[if] identifier[string_type] keyword[is] keyword[None] : keyword[raise] identifier[exceptions] . identifier[SarkNoString] ( literal[string] . identifier[format] ( identifier[ea] )) identifier[string] = identifier[idc] . identifier[GetString] ( identifier[ea] , identifier[strtype] = identifier[string_type] ) keyword[if] keyword[not] identifier[string] : keyword[raise] identifier[exceptions] . identifier[SarkNoString] ( literal[string] . identifier[format] ( identifier[ea] )) keyword[return] identifier[string]
def get_string(ea): """Read the string at the given ea. This function uses IDA's string APIs and does not implement any special logic. """ # We get the item-head because the `GetStringType` function only works on the head of an item. string_type = idc.GetStringType(idaapi.get_item_head(ea)) if string_type is None: raise exceptions.SarkNoString('No string at 0x{:08X}'.format(ea)) # depends on [control=['if'], data=[]] string = idc.GetString(ea, strtype=string_type) if not string: raise exceptions.SarkNoString('No string at 0x{:08X}'.format(ea)) # depends on [control=['if'], data=[]] return string
def register_exception(self, task, raised_exception, exception_details, event_details=None): """ :meth:`.WSimpleTrackerStorage.register_exception` method implementation """ if self.record_exception() is True: record = WSimpleTrackerStorage.ExceptionRecord( task, raised_exception, exception_details, event_details=event_details ) self.__store_record(record)
def function[register_exception, parameter[self, task, raised_exception, exception_details, event_details]]: constant[ :meth:`.WSimpleTrackerStorage.register_exception` method implementation ] if compare[call[name[self].record_exception, parameter[]] is constant[True]] begin[:] variable[record] assign[=] call[name[WSimpleTrackerStorage].ExceptionRecord, parameter[name[task], name[raised_exception], name[exception_details]]] call[name[self].__store_record, parameter[name[record]]]
keyword[def] identifier[register_exception] ( identifier[self] , identifier[task] , identifier[raised_exception] , identifier[exception_details] , identifier[event_details] = keyword[None] ): literal[string] keyword[if] identifier[self] . identifier[record_exception] () keyword[is] keyword[True] : identifier[record] = identifier[WSimpleTrackerStorage] . identifier[ExceptionRecord] ( identifier[task] , identifier[raised_exception] , identifier[exception_details] , identifier[event_details] = identifier[event_details] ) identifier[self] . identifier[__store_record] ( identifier[record] )
def register_exception(self, task, raised_exception, exception_details, event_details=None): """ :meth:`.WSimpleTrackerStorage.register_exception` method implementation """ if self.record_exception() is True: record = WSimpleTrackerStorage.ExceptionRecord(task, raised_exception, exception_details, event_details=event_details) self.__store_record(record) # depends on [control=['if'], data=[]]
def draw_sample(num_samples, num_classes, logits, num_trials, dtype, seed): """Sample a multinomial. The batch shape is given by broadcasting num_trials with remove_last_dimension(logits). Args: num_samples: Python int or singleton integer Tensor: number of multinomial samples to draw. num_classes: Python int or singleton integer Tensor: number of classes. logits: Floating Tensor with last dimension k, of (unnormalized) logit probabilities per class. num_trials: Tensor of number of categorical trials each multinomial consists of. num_trials[..., tf.newaxis] must broadcast with logits. dtype: dtype at which to emit samples. seed: Random seed. Returns: samples: Tensor of given dtype and shape [n] + batch_shape + [k]. """ with tf.name_scope("multinomial.draw_sample"): # broadcast the num_trials and logits to same shape num_trials = tf.ones_like( logits[..., 0], dtype=num_trials.dtype) * num_trials logits = tf.ones_like( num_trials[..., tf.newaxis], dtype=logits.dtype) * logits # flatten the total_count and logits # flat_logits has shape [B1B2...Bm, num_classes] flat_logits = tf.reshape(logits, [-1, num_classes]) flat_num_trials = num_samples * tf.reshape(num_trials, [-1]) # [B1B2...Bm] # Computes each logits and num_trials situation by map_fn. # Using just one batch tf.random.categorical call doesn't work because that # requires num_trials to be the same across all members of the batch of # logits. This restriction makes sense for tf.random.categorical because # for it, num_trials is part of the returned shape. However, the # multinomial sampler does not need that restriction, because it sums out # exactly that dimension. # One possibility would be to draw a batch categorical whose sample count is # max(num_trials) and mask out the excess ones. However, if the elements of # num_trials vary widely, this can be wasteful of memory. # TODO(b/123763054, b/112152209): Revisit the possibility of writing this # with a batch categorical followed by batch unsorted_segment_sum, once both # of those work and are memory-efficient enough. def _sample_one_batch_member(args): logits, num_cat_samples = args[0], args[1] # [K], [] # x has shape [1, num_cat_samples = num_samples * num_trials] x = tf.random.categorical( logits[tf.newaxis, ...], num_cat_samples, seed=seed) x = tf.reshape(x, shape=[num_samples, -1]) # [num_samples, num_trials] x = tf.one_hot( x, depth=num_classes) # [num_samples, num_trials, num_classes] x = tf.reduce_sum(input_tensor=x, axis=-2) # [num_samples, num_classes] return tf.cast(x, dtype=dtype) x = tf.map_fn( _sample_one_batch_member, [flat_logits, flat_num_trials], dtype=dtype) # [B1B2...Bm, num_samples, num_classes] # reshape the results to proper shape x = tf.transpose(a=x, perm=[1, 0, 2]) final_shape = tf.concat([[num_samples], tf.shape(input=num_trials), [num_classes]], axis=0) x = tf.reshape(x, final_shape) return x
def function[draw_sample, parameter[num_samples, num_classes, logits, num_trials, dtype, seed]]: constant[Sample a multinomial. The batch shape is given by broadcasting num_trials with remove_last_dimension(logits). Args: num_samples: Python int or singleton integer Tensor: number of multinomial samples to draw. num_classes: Python int or singleton integer Tensor: number of classes. logits: Floating Tensor with last dimension k, of (unnormalized) logit probabilities per class. num_trials: Tensor of number of categorical trials each multinomial consists of. num_trials[..., tf.newaxis] must broadcast with logits. dtype: dtype at which to emit samples. seed: Random seed. Returns: samples: Tensor of given dtype and shape [n] + batch_shape + [k]. ] with call[name[tf].name_scope, parameter[constant[multinomial.draw_sample]]] begin[:] variable[num_trials] assign[=] binary_operation[call[name[tf].ones_like, parameter[call[name[logits]][tuple[[<ast.Constant object at 0x7da20e961450>, <ast.Constant object at 0x7da20e960af0>]]]]] * name[num_trials]] variable[logits] assign[=] binary_operation[call[name[tf].ones_like, parameter[call[name[num_trials]][tuple[[<ast.Constant object at 0x7da20e960fa0>, <ast.Attribute object at 0x7da20e963280>]]]]] * name[logits]] variable[flat_logits] assign[=] call[name[tf].reshape, parameter[name[logits], list[[<ast.UnaryOp object at 0x7da20e963f70>, <ast.Name object at 0x7da20e960070>]]]] variable[flat_num_trials] assign[=] binary_operation[name[num_samples] * call[name[tf].reshape, parameter[name[num_trials], list[[<ast.UnaryOp object at 0x7da20e9601c0>]]]]] def function[_sample_one_batch_member, parameter[args]]: <ast.Tuple object at 0x7da20e9626e0> assign[=] tuple[[<ast.Subscript object at 0x7da20e960b50>, <ast.Subscript object at 0x7da20e961f00>]] variable[x] assign[=] call[name[tf].random.categorical, parameter[call[name[logits]][tuple[[<ast.Attribute object at 0x7da20e960c10>, <ast.Constant object at 0x7da20e9619f0>]]], name[num_cat_samples]]] variable[x] assign[=] call[name[tf].reshape, parameter[name[x]]] variable[x] assign[=] call[name[tf].one_hot, parameter[name[x]]] variable[x] assign[=] call[name[tf].reduce_sum, parameter[]] return[call[name[tf].cast, parameter[name[x]]]] variable[x] assign[=] call[name[tf].map_fn, parameter[name[_sample_one_batch_member], list[[<ast.Name object at 0x7da1b05bd3f0>, <ast.Name object at 0x7da1b05be3e0>]]]] variable[x] assign[=] call[name[tf].transpose, parameter[]] variable[final_shape] assign[=] call[name[tf].concat, parameter[list[[<ast.List object at 0x7da1b05bf160>, <ast.Call object at 0x7da1b05be860>, <ast.List object at 0x7da1b05bf9d0>]]]] variable[x] assign[=] call[name[tf].reshape, parameter[name[x], name[final_shape]]] return[name[x]]
keyword[def] identifier[draw_sample] ( identifier[num_samples] , identifier[num_classes] , identifier[logits] , identifier[num_trials] , identifier[dtype] , identifier[seed] ): literal[string] keyword[with] identifier[tf] . identifier[name_scope] ( literal[string] ): identifier[num_trials] = identifier[tf] . identifier[ones_like] ( identifier[logits] [..., literal[int] ], identifier[dtype] = identifier[num_trials] . identifier[dtype] )* identifier[num_trials] identifier[logits] = identifier[tf] . identifier[ones_like] ( identifier[num_trials] [..., identifier[tf] . identifier[newaxis] ], identifier[dtype] = identifier[logits] . identifier[dtype] )* identifier[logits] identifier[flat_logits] = identifier[tf] . identifier[reshape] ( identifier[logits] ,[- literal[int] , identifier[num_classes] ]) identifier[flat_num_trials] = identifier[num_samples] * identifier[tf] . identifier[reshape] ( identifier[num_trials] ,[- literal[int] ]) keyword[def] identifier[_sample_one_batch_member] ( identifier[args] ): identifier[logits] , identifier[num_cat_samples] = identifier[args] [ literal[int] ], identifier[args] [ literal[int] ] identifier[x] = identifier[tf] . identifier[random] . identifier[categorical] ( identifier[logits] [ identifier[tf] . identifier[newaxis] ,...], identifier[num_cat_samples] , identifier[seed] = identifier[seed] ) identifier[x] = identifier[tf] . identifier[reshape] ( identifier[x] , identifier[shape] =[ identifier[num_samples] ,- literal[int] ]) identifier[x] = identifier[tf] . identifier[one_hot] ( identifier[x] , identifier[depth] = identifier[num_classes] ) identifier[x] = identifier[tf] . identifier[reduce_sum] ( identifier[input_tensor] = identifier[x] , identifier[axis] =- literal[int] ) keyword[return] identifier[tf] . identifier[cast] ( identifier[x] , identifier[dtype] = identifier[dtype] ) identifier[x] = identifier[tf] . identifier[map_fn] ( identifier[_sample_one_batch_member] ,[ identifier[flat_logits] , identifier[flat_num_trials] ], identifier[dtype] = identifier[dtype] ) identifier[x] = identifier[tf] . identifier[transpose] ( identifier[a] = identifier[x] , identifier[perm] =[ literal[int] , literal[int] , literal[int] ]) identifier[final_shape] = identifier[tf] . identifier[concat] ([[ identifier[num_samples] ], identifier[tf] . identifier[shape] ( identifier[input] = identifier[num_trials] ),[ identifier[num_classes] ]], identifier[axis] = literal[int] ) identifier[x] = identifier[tf] . identifier[reshape] ( identifier[x] , identifier[final_shape] ) keyword[return] identifier[x]
def draw_sample(num_samples, num_classes, logits, num_trials, dtype, seed): """Sample a multinomial. The batch shape is given by broadcasting num_trials with remove_last_dimension(logits). Args: num_samples: Python int or singleton integer Tensor: number of multinomial samples to draw. num_classes: Python int or singleton integer Tensor: number of classes. logits: Floating Tensor with last dimension k, of (unnormalized) logit probabilities per class. num_trials: Tensor of number of categorical trials each multinomial consists of. num_trials[..., tf.newaxis] must broadcast with logits. dtype: dtype at which to emit samples. seed: Random seed. Returns: samples: Tensor of given dtype and shape [n] + batch_shape + [k]. """ with tf.name_scope('multinomial.draw_sample'): # broadcast the num_trials and logits to same shape num_trials = tf.ones_like(logits[..., 0], dtype=num_trials.dtype) * num_trials logits = tf.ones_like(num_trials[..., tf.newaxis], dtype=logits.dtype) * logits # flatten the total_count and logits # flat_logits has shape [B1B2...Bm, num_classes] flat_logits = tf.reshape(logits, [-1, num_classes]) flat_num_trials = num_samples * tf.reshape(num_trials, [-1]) # [B1B2...Bm] # Computes each logits and num_trials situation by map_fn. # Using just one batch tf.random.categorical call doesn't work because that # requires num_trials to be the same across all members of the batch of # logits. This restriction makes sense for tf.random.categorical because # for it, num_trials is part of the returned shape. However, the # multinomial sampler does not need that restriction, because it sums out # exactly that dimension. # One possibility would be to draw a batch categorical whose sample count is # max(num_trials) and mask out the excess ones. However, if the elements of # num_trials vary widely, this can be wasteful of memory. # TODO(b/123763054, b/112152209): Revisit the possibility of writing this # with a batch categorical followed by batch unsorted_segment_sum, once both # of those work and are memory-efficient enough. def _sample_one_batch_member(args): (logits, num_cat_samples) = (args[0], args[1]) # [K], [] # x has shape [1, num_cat_samples = num_samples * num_trials] x = tf.random.categorical(logits[tf.newaxis, ...], num_cat_samples, seed=seed) x = tf.reshape(x, shape=[num_samples, -1]) # [num_samples, num_trials] x = tf.one_hot(x, depth=num_classes) # [num_samples, num_trials, num_classes] x = tf.reduce_sum(input_tensor=x, axis=-2) # [num_samples, num_classes] return tf.cast(x, dtype=dtype) x = tf.map_fn(_sample_one_batch_member, [flat_logits, flat_num_trials], dtype=dtype) # [B1B2...Bm, num_samples, num_classes] # reshape the results to proper shape x = tf.transpose(a=x, perm=[1, 0, 2]) final_shape = tf.concat([[num_samples], tf.shape(input=num_trials), [num_classes]], axis=0) x = tf.reshape(x, final_shape) return x # depends on [control=['with'], data=[]]
def get_data(self, cache=True, as_text=False, parse_form_data=False): """This reads the buffered incoming data from the client into one bytestring. By default this is cached but that behavior can be changed by setting `cache` to `False`. Usually it's a bad idea to call this method without checking the content length first as a client could send dozens of megabytes or more to cause memory problems on the server. Note that if the form data was already parsed this method will not return anything as form data parsing does not cache the data like this method does. To implicitly invoke form data parsing function set `parse_form_data` to `True`. When this is done the return value of this method will be an empty string if the form parser handles the data. This generally is not necessary as if the whole data is cached (which is the default) the form parser will used the cached data to parse the form data. Please be generally aware of checking the content length first in any case before calling this method to avoid exhausting server memory. If `as_text` is set to `True` the return value will be a decoded unicode string. .. versionadded:: 0.9 """ rv = getattr(self, '_cached_data', None) if rv is None: if parse_form_data: self._load_form_data() rv = self.stream.read() if cache: self._cached_data = rv if as_text: rv = rv.decode(self.charset, self.encoding_errors) return rv
def function[get_data, parameter[self, cache, as_text, parse_form_data]]: constant[This reads the buffered incoming data from the client into one bytestring. By default this is cached but that behavior can be changed by setting `cache` to `False`. Usually it's a bad idea to call this method without checking the content length first as a client could send dozens of megabytes or more to cause memory problems on the server. Note that if the form data was already parsed this method will not return anything as form data parsing does not cache the data like this method does. To implicitly invoke form data parsing function set `parse_form_data` to `True`. When this is done the return value of this method will be an empty string if the form parser handles the data. This generally is not necessary as if the whole data is cached (which is the default) the form parser will used the cached data to parse the form data. Please be generally aware of checking the content length first in any case before calling this method to avoid exhausting server memory. If `as_text` is set to `True` the return value will be a decoded unicode string. .. versionadded:: 0.9 ] variable[rv] assign[=] call[name[getattr], parameter[name[self], constant[_cached_data], constant[None]]] if compare[name[rv] is constant[None]] begin[:] if name[parse_form_data] begin[:] call[name[self]._load_form_data, parameter[]] variable[rv] assign[=] call[name[self].stream.read, parameter[]] if name[cache] begin[:] name[self]._cached_data assign[=] name[rv] if name[as_text] begin[:] variable[rv] assign[=] call[name[rv].decode, parameter[name[self].charset, name[self].encoding_errors]] return[name[rv]]
keyword[def] identifier[get_data] ( identifier[self] , identifier[cache] = keyword[True] , identifier[as_text] = keyword[False] , identifier[parse_form_data] = keyword[False] ): literal[string] identifier[rv] = identifier[getattr] ( identifier[self] , literal[string] , keyword[None] ) keyword[if] identifier[rv] keyword[is] keyword[None] : keyword[if] identifier[parse_form_data] : identifier[self] . identifier[_load_form_data] () identifier[rv] = identifier[self] . identifier[stream] . identifier[read] () keyword[if] identifier[cache] : identifier[self] . identifier[_cached_data] = identifier[rv] keyword[if] identifier[as_text] : identifier[rv] = identifier[rv] . identifier[decode] ( identifier[self] . identifier[charset] , identifier[self] . identifier[encoding_errors] ) keyword[return] identifier[rv]
def get_data(self, cache=True, as_text=False, parse_form_data=False): """This reads the buffered incoming data from the client into one bytestring. By default this is cached but that behavior can be changed by setting `cache` to `False`. Usually it's a bad idea to call this method without checking the content length first as a client could send dozens of megabytes or more to cause memory problems on the server. Note that if the form data was already parsed this method will not return anything as form data parsing does not cache the data like this method does. To implicitly invoke form data parsing function set `parse_form_data` to `True`. When this is done the return value of this method will be an empty string if the form parser handles the data. This generally is not necessary as if the whole data is cached (which is the default) the form parser will used the cached data to parse the form data. Please be generally aware of checking the content length first in any case before calling this method to avoid exhausting server memory. If `as_text` is set to `True` the return value will be a decoded unicode string. .. versionadded:: 0.9 """ rv = getattr(self, '_cached_data', None) if rv is None: if parse_form_data: self._load_form_data() # depends on [control=['if'], data=[]] rv = self.stream.read() if cache: self._cached_data = rv # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['rv']] if as_text: rv = rv.decode(self.charset, self.encoding_errors) # depends on [control=['if'], data=[]] return rv
def extract_angular(fileobj, keywords, comment_tags, options): """Extract messages from angular template (HTML) files that use the angular-gettext translate directive as per https://angular-gettext.rocketeer.be/ . :param fileobj: the file-like object the messages should be extracted from :param keywords: This is a standard parameter so it is accepted but ignored. :param comment_tags: This is a standard parameter so it is accepted but ignored. :param options: Another standard parameter that is accepted but ignored. :return: an iterator over ``(lineno, funcname, message, comments)`` tuples :rtype: ``iterator`` This particular extractor is quite simple because it is intended to only deal with angular templates which do not need comments, or the more complicated forms of translations. A later version will address pluralization. """ if keywords: logging.debug('Parameter keywords ignored.') if comment_tags: logging.debug('Parameter comment_tags ignored.') if options: logging.debug('Parameter options ignored.') parser = AngularGettextHTMLParser() for line in fileobj: if not isinstance(line, str): line = line.decode(locale.getpreferredencoding()) parser.feed(line) for string in parser.strings: yield(string)
def function[extract_angular, parameter[fileobj, keywords, comment_tags, options]]: constant[Extract messages from angular template (HTML) files that use the angular-gettext translate directive as per https://angular-gettext.rocketeer.be/ . :param fileobj: the file-like object the messages should be extracted from :param keywords: This is a standard parameter so it is accepted but ignored. :param comment_tags: This is a standard parameter so it is accepted but ignored. :param options: Another standard parameter that is accepted but ignored. :return: an iterator over ``(lineno, funcname, message, comments)`` tuples :rtype: ``iterator`` This particular extractor is quite simple because it is intended to only deal with angular templates which do not need comments, or the more complicated forms of translations. A later version will address pluralization. ] if name[keywords] begin[:] call[name[logging].debug, parameter[constant[Parameter keywords ignored.]]] if name[comment_tags] begin[:] call[name[logging].debug, parameter[constant[Parameter comment_tags ignored.]]] if name[options] begin[:] call[name[logging].debug, parameter[constant[Parameter options ignored.]]] variable[parser] assign[=] call[name[AngularGettextHTMLParser], parameter[]] for taget[name[line]] in starred[name[fileobj]] begin[:] if <ast.UnaryOp object at 0x7da18fe90070> begin[:] variable[line] assign[=] call[name[line].decode, parameter[call[name[locale].getpreferredencoding, parameter[]]]] call[name[parser].feed, parameter[name[line]]] for taget[name[string]] in starred[name[parser].strings] begin[:] <ast.Yield object at 0x7da18fe90760>
keyword[def] identifier[extract_angular] ( identifier[fileobj] , identifier[keywords] , identifier[comment_tags] , identifier[options] ): literal[string] keyword[if] identifier[keywords] : identifier[logging] . identifier[debug] ( literal[string] ) keyword[if] identifier[comment_tags] : identifier[logging] . identifier[debug] ( literal[string] ) keyword[if] identifier[options] : identifier[logging] . identifier[debug] ( literal[string] ) identifier[parser] = identifier[AngularGettextHTMLParser] () keyword[for] identifier[line] keyword[in] identifier[fileobj] : keyword[if] keyword[not] identifier[isinstance] ( identifier[line] , identifier[str] ): identifier[line] = identifier[line] . identifier[decode] ( identifier[locale] . identifier[getpreferredencoding] ()) identifier[parser] . identifier[feed] ( identifier[line] ) keyword[for] identifier[string] keyword[in] identifier[parser] . identifier[strings] : keyword[yield] ( identifier[string] )
def extract_angular(fileobj, keywords, comment_tags, options): """Extract messages from angular template (HTML) files that use the angular-gettext translate directive as per https://angular-gettext.rocketeer.be/ . :param fileobj: the file-like object the messages should be extracted from :param keywords: This is a standard parameter so it is accepted but ignored. :param comment_tags: This is a standard parameter so it is accepted but ignored. :param options: Another standard parameter that is accepted but ignored. :return: an iterator over ``(lineno, funcname, message, comments)`` tuples :rtype: ``iterator`` This particular extractor is quite simple because it is intended to only deal with angular templates which do not need comments, or the more complicated forms of translations. A later version will address pluralization. """ if keywords: logging.debug('Parameter keywords ignored.') # depends on [control=['if'], data=[]] if comment_tags: logging.debug('Parameter comment_tags ignored.') # depends on [control=['if'], data=[]] if options: logging.debug('Parameter options ignored.') # depends on [control=['if'], data=[]] parser = AngularGettextHTMLParser() for line in fileobj: if not isinstance(line, str): line = line.decode(locale.getpreferredencoding()) # depends on [control=['if'], data=[]] parser.feed(line) # depends on [control=['for'], data=['line']] for string in parser.strings: yield string # depends on [control=['for'], data=['string']]
def apps(self): """ Dictionary with loaded applications. """ logger.debug("initialize applications ...") enabled = None apps = self.args.apps or self._config_apps.keys() unknown = set(apps) - set(self._config_apps.keys()) if unknown: raise LogRaptorArgumentError("--apps", "not found apps %r" % list(unknown)) if apps or enabled is None: return {k: v for k, v in self._config_apps.items() if k in apps} else: return {k: v for k, v in self._config_apps.items() if k in apps and v.enabled == enabled}
def function[apps, parameter[self]]: constant[ Dictionary with loaded applications. ] call[name[logger].debug, parameter[constant[initialize applications ...]]] variable[enabled] assign[=] constant[None] variable[apps] assign[=] <ast.BoolOp object at 0x7da20c6a9ba0> variable[unknown] assign[=] binary_operation[call[name[set], parameter[name[apps]]] - call[name[set], parameter[call[name[self]._config_apps.keys, parameter[]]]]] if name[unknown] begin[:] <ast.Raise object at 0x7da20c6a8a90> if <ast.BoolOp object at 0x7da20c6a8df0> begin[:] return[<ast.DictComp object at 0x7da20c6a8e20>]
keyword[def] identifier[apps] ( identifier[self] ): literal[string] identifier[logger] . identifier[debug] ( literal[string] ) identifier[enabled] = keyword[None] identifier[apps] = identifier[self] . identifier[args] . identifier[apps] keyword[or] identifier[self] . identifier[_config_apps] . identifier[keys] () identifier[unknown] = identifier[set] ( identifier[apps] )- identifier[set] ( identifier[self] . identifier[_config_apps] . identifier[keys] ()) keyword[if] identifier[unknown] : keyword[raise] identifier[LogRaptorArgumentError] ( literal[string] , literal[string] % identifier[list] ( identifier[unknown] )) keyword[if] identifier[apps] keyword[or] identifier[enabled] keyword[is] keyword[None] : keyword[return] { identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[_config_apps] . identifier[items] () keyword[if] identifier[k] keyword[in] identifier[apps] } keyword[else] : keyword[return] { identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[_config_apps] . identifier[items] () keyword[if] identifier[k] keyword[in] identifier[apps] keyword[and] identifier[v] . identifier[enabled] == identifier[enabled] }
def apps(self): """ Dictionary with loaded applications. """ logger.debug('initialize applications ...') enabled = None apps = self.args.apps or self._config_apps.keys() unknown = set(apps) - set(self._config_apps.keys()) if unknown: raise LogRaptorArgumentError('--apps', 'not found apps %r' % list(unknown)) # depends on [control=['if'], data=[]] if apps or enabled is None: return {k: v for (k, v) in self._config_apps.items() if k in apps} # depends on [control=['if'], data=[]] else: return {k: v for (k, v) in self._config_apps.items() if k in apps and v.enabled == enabled}
def cmd_fw_manifest_purge(self): '''remove all downloaded manifests''' for filepath in self.find_manifests(): os.unlink(filepath) self.manifests_parse()
def function[cmd_fw_manifest_purge, parameter[self]]: constant[remove all downloaded manifests] for taget[name[filepath]] in starred[call[name[self].find_manifests, parameter[]]] begin[:] call[name[os].unlink, parameter[name[filepath]]] call[name[self].manifests_parse, parameter[]]
keyword[def] identifier[cmd_fw_manifest_purge] ( identifier[self] ): literal[string] keyword[for] identifier[filepath] keyword[in] identifier[self] . identifier[find_manifests] (): identifier[os] . identifier[unlink] ( identifier[filepath] ) identifier[self] . identifier[manifests_parse] ()
def cmd_fw_manifest_purge(self): """remove all downloaded manifests""" for filepath in self.find_manifests(): os.unlink(filepath) # depends on [control=['for'], data=['filepath']] self.manifests_parse()
def bdd_common_after_scenario(context_or_world, scenario, status): """Clean method that will be executed after each scenario in behave or lettuce :param context_or_world: behave context or lettuce world :param scenario: running scenario :param status: scenario status (passed, failed or skipped) """ if status == 'skipped': return elif status == 'passed': test_status = 'Pass' test_comment = None context_or_world.logger.info("The scenario '%s' has passed", scenario.name) else: test_status = 'Fail' test_comment = "The scenario '%s' has failed" % scenario.name context_or_world.logger.error("The scenario '%s' has failed", scenario.name) context_or_world.global_status['test_passed'] = False # Close drivers DriverWrappersPool.close_drivers(scope='function', test_name=scenario.name, test_passed=status == 'passed', context=context_or_world) # Save test status to be updated later add_jira_status(get_jira_key_from_scenario(scenario), test_status, test_comment)
def function[bdd_common_after_scenario, parameter[context_or_world, scenario, status]]: constant[Clean method that will be executed after each scenario in behave or lettuce :param context_or_world: behave context or lettuce world :param scenario: running scenario :param status: scenario status (passed, failed or skipped) ] if compare[name[status] equal[==] constant[skipped]] begin[:] return[None] call[name[DriverWrappersPool].close_drivers, parameter[]] call[name[add_jira_status], parameter[call[name[get_jira_key_from_scenario], parameter[name[scenario]]], name[test_status], name[test_comment]]]
keyword[def] identifier[bdd_common_after_scenario] ( identifier[context_or_world] , identifier[scenario] , identifier[status] ): literal[string] keyword[if] identifier[status] == literal[string] : keyword[return] keyword[elif] identifier[status] == literal[string] : identifier[test_status] = literal[string] identifier[test_comment] = keyword[None] identifier[context_or_world] . identifier[logger] . identifier[info] ( literal[string] , identifier[scenario] . identifier[name] ) keyword[else] : identifier[test_status] = literal[string] identifier[test_comment] = literal[string] % identifier[scenario] . identifier[name] identifier[context_or_world] . identifier[logger] . identifier[error] ( literal[string] , identifier[scenario] . identifier[name] ) identifier[context_or_world] . identifier[global_status] [ literal[string] ]= keyword[False] identifier[DriverWrappersPool] . identifier[close_drivers] ( identifier[scope] = literal[string] , identifier[test_name] = identifier[scenario] . identifier[name] , identifier[test_passed] = identifier[status] == literal[string] , identifier[context] = identifier[context_or_world] ) identifier[add_jira_status] ( identifier[get_jira_key_from_scenario] ( identifier[scenario] ), identifier[test_status] , identifier[test_comment] )
def bdd_common_after_scenario(context_or_world, scenario, status): """Clean method that will be executed after each scenario in behave or lettuce :param context_or_world: behave context or lettuce world :param scenario: running scenario :param status: scenario status (passed, failed or skipped) """ if status == 'skipped': return # depends on [control=['if'], data=[]] elif status == 'passed': test_status = 'Pass' test_comment = None context_or_world.logger.info("The scenario '%s' has passed", scenario.name) # depends on [control=['if'], data=[]] else: test_status = 'Fail' test_comment = "The scenario '%s' has failed" % scenario.name context_or_world.logger.error("The scenario '%s' has failed", scenario.name) context_or_world.global_status['test_passed'] = False # Close drivers DriverWrappersPool.close_drivers(scope='function', test_name=scenario.name, test_passed=status == 'passed', context=context_or_world) # Save test status to be updated later add_jira_status(get_jira_key_from_scenario(scenario), test_status, test_comment)
def _stroke_simplification(self, pointlist): """The Douglas-Peucker line simplification takes a list of points as an argument. It tries to simplifiy this list by removing as many points as possible while still maintaining the overall shape of the stroke. It does so by taking the first and the last point, connecting them by a straight line and searchin for the point with the highest distance. If that distance is bigger than 'epsilon', the point is important and the algorithm continues recursively.""" # Find the point with the biggest distance dmax = 0 index = 0 for i in range(1, len(pointlist)): d = geometry.perpendicular_distance(pointlist[i], pointlist[0], pointlist[-1]) if d > dmax: index = i dmax = d # If the maximum distance is bigger than the threshold 'epsilon', then # simplify the pointlist recursively if dmax >= self.epsilon: # Recursive call rec_results1 = self._stroke_simplification(pointlist[0:index]) rec_results2 = self._stroke_simplification(pointlist[index:]) result_list = rec_results1[:-1] + rec_results2 else: result_list = [pointlist[0], pointlist[-1]] return result_list
def function[_stroke_simplification, parameter[self, pointlist]]: constant[The Douglas-Peucker line simplification takes a list of points as an argument. It tries to simplifiy this list by removing as many points as possible while still maintaining the overall shape of the stroke. It does so by taking the first and the last point, connecting them by a straight line and searchin for the point with the highest distance. If that distance is bigger than 'epsilon', the point is important and the algorithm continues recursively.] variable[dmax] assign[=] constant[0] variable[index] assign[=] constant[0] for taget[name[i]] in starred[call[name[range], parameter[constant[1], call[name[len], parameter[name[pointlist]]]]]] begin[:] variable[d] assign[=] call[name[geometry].perpendicular_distance, parameter[call[name[pointlist]][name[i]], call[name[pointlist]][constant[0]], call[name[pointlist]][<ast.UnaryOp object at 0x7da1b287db10>]]] if compare[name[d] greater[>] name[dmax]] begin[:] variable[index] assign[=] name[i] variable[dmax] assign[=] name[d] if compare[name[dmax] greater_or_equal[>=] name[self].epsilon] begin[:] variable[rec_results1] assign[=] call[name[self]._stroke_simplification, parameter[call[name[pointlist]][<ast.Slice object at 0x7da1b287ec20>]]] variable[rec_results2] assign[=] call[name[self]._stroke_simplification, parameter[call[name[pointlist]][<ast.Slice object at 0x7da1b287f1f0>]]] variable[result_list] assign[=] binary_operation[call[name[rec_results1]][<ast.Slice object at 0x7da1b287df30>] + name[rec_results2]] return[name[result_list]]
keyword[def] identifier[_stroke_simplification] ( identifier[self] , identifier[pointlist] ): literal[string] identifier[dmax] = literal[int] identifier[index] = literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[pointlist] )): identifier[d] = identifier[geometry] . identifier[perpendicular_distance] ( identifier[pointlist] [ identifier[i] ], identifier[pointlist] [ literal[int] ], identifier[pointlist] [- literal[int] ]) keyword[if] identifier[d] > identifier[dmax] : identifier[index] = identifier[i] identifier[dmax] = identifier[d] keyword[if] identifier[dmax] >= identifier[self] . identifier[epsilon] : identifier[rec_results1] = identifier[self] . identifier[_stroke_simplification] ( identifier[pointlist] [ literal[int] : identifier[index] ]) identifier[rec_results2] = identifier[self] . identifier[_stroke_simplification] ( identifier[pointlist] [ identifier[index] :]) identifier[result_list] = identifier[rec_results1] [:- literal[int] ]+ identifier[rec_results2] keyword[else] : identifier[result_list] =[ identifier[pointlist] [ literal[int] ], identifier[pointlist] [- literal[int] ]] keyword[return] identifier[result_list]
def _stroke_simplification(self, pointlist): """The Douglas-Peucker line simplification takes a list of points as an argument. It tries to simplifiy this list by removing as many points as possible while still maintaining the overall shape of the stroke. It does so by taking the first and the last point, connecting them by a straight line and searchin for the point with the highest distance. If that distance is bigger than 'epsilon', the point is important and the algorithm continues recursively.""" # Find the point with the biggest distance dmax = 0 index = 0 for i in range(1, len(pointlist)): d = geometry.perpendicular_distance(pointlist[i], pointlist[0], pointlist[-1]) if d > dmax: index = i dmax = d # depends on [control=['if'], data=['d', 'dmax']] # depends on [control=['for'], data=['i']] # If the maximum distance is bigger than the threshold 'epsilon', then # simplify the pointlist recursively if dmax >= self.epsilon: # Recursive call rec_results1 = self._stroke_simplification(pointlist[0:index]) rec_results2 = self._stroke_simplification(pointlist[index:]) result_list = rec_results1[:-1] + rec_results2 # depends on [control=['if'], data=[]] else: result_list = [pointlist[0], pointlist[-1]] return result_list
def setSectionCount(self, count): """ Sets the number of editors that the serial widget should have. :param count | <int> """ # cap the sections at 10 count = max(1, min(count, 10)) # create additional editors while self.layout().count() < count: editor = XLineEdit(self) editor.setFont(self.font()) editor.setReadOnly(self.isReadOnly()) editor.setHint(self.hint()) editor.setAlignment(QtCore.Qt.AlignCenter) editor.installEventFilter(self) editor.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) editor.setMaxLength(self.sectionLength()) editor.returnPressed.connect(self.returnPressed) self.layout().addWidget(editor) # remove unnecessary editors while count < self.layout().count(): widget = self.layout().itemAt(0).widget() widget.close() widget.setParent(None) widget.deleteLater()
def function[setSectionCount, parameter[self, count]]: constant[ Sets the number of editors that the serial widget should have. :param count | <int> ] variable[count] assign[=] call[name[max], parameter[constant[1], call[name[min], parameter[name[count], constant[10]]]]] while compare[call[call[name[self].layout, parameter[]].count, parameter[]] less[<] name[count]] begin[:] variable[editor] assign[=] call[name[XLineEdit], parameter[name[self]]] call[name[editor].setFont, parameter[call[name[self].font, parameter[]]]] call[name[editor].setReadOnly, parameter[call[name[self].isReadOnly, parameter[]]]] call[name[editor].setHint, parameter[call[name[self].hint, parameter[]]]] call[name[editor].setAlignment, parameter[name[QtCore].Qt.AlignCenter]] call[name[editor].installEventFilter, parameter[name[self]]] call[name[editor].setSizePolicy, parameter[name[QtGui].QSizePolicy.Expanding, name[QtGui].QSizePolicy.Expanding]] call[name[editor].setMaxLength, parameter[call[name[self].sectionLength, parameter[]]]] call[name[editor].returnPressed.connect, parameter[name[self].returnPressed]] call[call[name[self].layout, parameter[]].addWidget, parameter[name[editor]]] while compare[name[count] less[<] call[call[name[self].layout, parameter[]].count, parameter[]]] begin[:] variable[widget] assign[=] call[call[call[name[self].layout, parameter[]].itemAt, parameter[constant[0]]].widget, parameter[]] call[name[widget].close, parameter[]] call[name[widget].setParent, parameter[constant[None]]] call[name[widget].deleteLater, parameter[]]
keyword[def] identifier[setSectionCount] ( identifier[self] , identifier[count] ): literal[string] identifier[count] = identifier[max] ( literal[int] , identifier[min] ( identifier[count] , literal[int] )) keyword[while] identifier[self] . identifier[layout] (). identifier[count] ()< identifier[count] : identifier[editor] = identifier[XLineEdit] ( identifier[self] ) identifier[editor] . identifier[setFont] ( identifier[self] . identifier[font] ()) identifier[editor] . identifier[setReadOnly] ( identifier[self] . identifier[isReadOnly] ()) identifier[editor] . identifier[setHint] ( identifier[self] . identifier[hint] ()) identifier[editor] . identifier[setAlignment] ( identifier[QtCore] . identifier[Qt] . identifier[AlignCenter] ) identifier[editor] . identifier[installEventFilter] ( identifier[self] ) identifier[editor] . identifier[setSizePolicy] ( identifier[QtGui] . identifier[QSizePolicy] . identifier[Expanding] , identifier[QtGui] . identifier[QSizePolicy] . identifier[Expanding] ) identifier[editor] . identifier[setMaxLength] ( identifier[self] . identifier[sectionLength] ()) identifier[editor] . identifier[returnPressed] . identifier[connect] ( identifier[self] . identifier[returnPressed] ) identifier[self] . identifier[layout] (). identifier[addWidget] ( identifier[editor] ) keyword[while] identifier[count] < identifier[self] . identifier[layout] (). identifier[count] (): identifier[widget] = identifier[self] . identifier[layout] (). identifier[itemAt] ( literal[int] ). identifier[widget] () identifier[widget] . identifier[close] () identifier[widget] . identifier[setParent] ( keyword[None] ) identifier[widget] . identifier[deleteLater] ()
def setSectionCount(self, count): """ Sets the number of editors that the serial widget should have. :param count | <int> """ # cap the sections at 10 count = max(1, min(count, 10)) # create additional editors while self.layout().count() < count: editor = XLineEdit(self) editor.setFont(self.font()) editor.setReadOnly(self.isReadOnly()) editor.setHint(self.hint()) editor.setAlignment(QtCore.Qt.AlignCenter) editor.installEventFilter(self) editor.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) editor.setMaxLength(self.sectionLength()) editor.returnPressed.connect(self.returnPressed) self.layout().addWidget(editor) # depends on [control=['while'], data=[]] # remove unnecessary editors while count < self.layout().count(): widget = self.layout().itemAt(0).widget() widget.close() widget.setParent(None) widget.deleteLater() # depends on [control=['while'], data=[]]
def makedir(dir_name): """ "Strong" directory maker. "Strong" version of `os.mkdir`. If `dir_name` already exists, this deletes it first. **Parameters** **dir_name** : string Path to a file directory that may or may not already exist. **See Also:** :func:`tabular.io.delete`, `os <http://docs.python.org/library/os.html>`_ """ if os.path.exists(dir_name): delete(dir_name) os.mkdir(dir_name)
def function[makedir, parameter[dir_name]]: constant[ "Strong" directory maker. "Strong" version of `os.mkdir`. If `dir_name` already exists, this deletes it first. **Parameters** **dir_name** : string Path to a file directory that may or may not already exist. **See Also:** :func:`tabular.io.delete`, `os <http://docs.python.org/library/os.html>`_ ] if call[name[os].path.exists, parameter[name[dir_name]]] begin[:] call[name[delete], parameter[name[dir_name]]] call[name[os].mkdir, parameter[name[dir_name]]]
keyword[def] identifier[makedir] ( identifier[dir_name] ): literal[string] keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[dir_name] ): identifier[delete] ( identifier[dir_name] ) identifier[os] . identifier[mkdir] ( identifier[dir_name] )
def makedir(dir_name): """ "Strong" directory maker. "Strong" version of `os.mkdir`. If `dir_name` already exists, this deletes it first. **Parameters** **dir_name** : string Path to a file directory that may or may not already exist. **See Also:** :func:`tabular.io.delete`, `os <http://docs.python.org/library/os.html>`_ """ if os.path.exists(dir_name): delete(dir_name) # depends on [control=['if'], data=[]] os.mkdir(dir_name)
def get_arguments(self): """ Extracts the specific arguments of this CLI """ AlarmModify.get_arguments(self) self._alarm_id = self.args.alarm_id if self.args.alarm_id is not None else None self.get_api_parameters()
def function[get_arguments, parameter[self]]: constant[ Extracts the specific arguments of this CLI ] call[name[AlarmModify].get_arguments, parameter[name[self]]] name[self]._alarm_id assign[=] <ast.IfExp object at 0x7da1b0146470> call[name[self].get_api_parameters, parameter[]]
keyword[def] identifier[get_arguments] ( identifier[self] ): literal[string] identifier[AlarmModify] . identifier[get_arguments] ( identifier[self] ) identifier[self] . identifier[_alarm_id] = identifier[self] . identifier[args] . identifier[alarm_id] keyword[if] identifier[self] . identifier[args] . identifier[alarm_id] keyword[is] keyword[not] keyword[None] keyword[else] keyword[None] identifier[self] . identifier[get_api_parameters] ()
def get_arguments(self): """ Extracts the specific arguments of this CLI """ AlarmModify.get_arguments(self) self._alarm_id = self.args.alarm_id if self.args.alarm_id is not None else None self.get_api_parameters()
def _dict_to_obj(self, d): """Converts a dictionary of json object to a Python object.""" if JsonEncoder.TYPE_ID not in d: return d type_name = d.pop(JsonEncoder.TYPE_ID) if type_name in _TYPE_NAME_TO_DECODER: decoder = _TYPE_NAME_TO_DECODER[type_name] return decoder(d) else: raise TypeError("Invalid type %s.", type_name)
def function[_dict_to_obj, parameter[self, d]]: constant[Converts a dictionary of json object to a Python object.] if compare[name[JsonEncoder].TYPE_ID <ast.NotIn object at 0x7da2590d7190> name[d]] begin[:] return[name[d]] variable[type_name] assign[=] call[name[d].pop, parameter[name[JsonEncoder].TYPE_ID]] if compare[name[type_name] in name[_TYPE_NAME_TO_DECODER]] begin[:] variable[decoder] assign[=] call[name[_TYPE_NAME_TO_DECODER]][name[type_name]] return[call[name[decoder], parameter[name[d]]]]
keyword[def] identifier[_dict_to_obj] ( identifier[self] , identifier[d] ): literal[string] keyword[if] identifier[JsonEncoder] . identifier[TYPE_ID] keyword[not] keyword[in] identifier[d] : keyword[return] identifier[d] identifier[type_name] = identifier[d] . identifier[pop] ( identifier[JsonEncoder] . identifier[TYPE_ID] ) keyword[if] identifier[type_name] keyword[in] identifier[_TYPE_NAME_TO_DECODER] : identifier[decoder] = identifier[_TYPE_NAME_TO_DECODER] [ identifier[type_name] ] keyword[return] identifier[decoder] ( identifier[d] ) keyword[else] : keyword[raise] identifier[TypeError] ( literal[string] , identifier[type_name] )
def _dict_to_obj(self, d): """Converts a dictionary of json object to a Python object.""" if JsonEncoder.TYPE_ID not in d: return d # depends on [control=['if'], data=['d']] type_name = d.pop(JsonEncoder.TYPE_ID) if type_name in _TYPE_NAME_TO_DECODER: decoder = _TYPE_NAME_TO_DECODER[type_name] return decoder(d) # depends on [control=['if'], data=['type_name', '_TYPE_NAME_TO_DECODER']] else: raise TypeError('Invalid type %s.', type_name)
def getHighOrderSequenceChunk(it, switchover=1000, w=40, n=2048): """ Given an iteration index, returns a list of vectors to be appended to the input stream, as well as a string label identifying the sequence. This version generates a bunch of high order sequences. The first element always provides sufficient context to predict the rest of the elements. After switchover iterations, it will generate a different set of sequences. """ if it%10==3: s = numpy.random.randint(5) if it <= switchover: if s==0: label="XABCDE" elif s==1: label="YCBEAF" elif s==2: label="GHIJKL" elif s==3: label="WABCMN" else: label="ZDBCAE" else: if s==0: label="XCBEAF" elif s==1: label="YABCDE" elif s==2: label="GABCMN" elif s==3: label="WHIJKL" else: label="ZDHICF" vecs = letterSequence(label) else: vecs= [getRandomVector(w, n)] label="." return vecs,label
def function[getHighOrderSequenceChunk, parameter[it, switchover, w, n]]: constant[ Given an iteration index, returns a list of vectors to be appended to the input stream, as well as a string label identifying the sequence. This version generates a bunch of high order sequences. The first element always provides sufficient context to predict the rest of the elements. After switchover iterations, it will generate a different set of sequences. ] if compare[binary_operation[name[it] <ast.Mod object at 0x7da2590d6920> constant[10]] equal[==] constant[3]] begin[:] variable[s] assign[=] call[name[numpy].random.randint, parameter[constant[5]]] if compare[name[it] less_or_equal[<=] name[switchover]] begin[:] if compare[name[s] equal[==] constant[0]] begin[:] variable[label] assign[=] constant[XABCDE] variable[vecs] assign[=] call[name[letterSequence], parameter[name[label]]] return[tuple[[<ast.Name object at 0x7da1b0925540>, <ast.Name object at 0x7da1b09251e0>]]]
keyword[def] identifier[getHighOrderSequenceChunk] ( identifier[it] , identifier[switchover] = literal[int] , identifier[w] = literal[int] , identifier[n] = literal[int] ): literal[string] keyword[if] identifier[it] % literal[int] == literal[int] : identifier[s] = identifier[numpy] . identifier[random] . identifier[randint] ( literal[int] ) keyword[if] identifier[it] <= identifier[switchover] : keyword[if] identifier[s] == literal[int] : identifier[label] = literal[string] keyword[elif] identifier[s] == literal[int] : identifier[label] = literal[string] keyword[elif] identifier[s] == literal[int] : identifier[label] = literal[string] keyword[elif] identifier[s] == literal[int] : identifier[label] = literal[string] keyword[else] : identifier[label] = literal[string] keyword[else] : keyword[if] identifier[s] == literal[int] : identifier[label] = literal[string] keyword[elif] identifier[s] == literal[int] : identifier[label] = literal[string] keyword[elif] identifier[s] == literal[int] : identifier[label] = literal[string] keyword[elif] identifier[s] == literal[int] : identifier[label] = literal[string] keyword[else] : identifier[label] = literal[string] identifier[vecs] = identifier[letterSequence] ( identifier[label] ) keyword[else] : identifier[vecs] =[ identifier[getRandomVector] ( identifier[w] , identifier[n] )] identifier[label] = literal[string] keyword[return] identifier[vecs] , identifier[label]
def getHighOrderSequenceChunk(it, switchover=1000, w=40, n=2048): """ Given an iteration index, returns a list of vectors to be appended to the input stream, as well as a string label identifying the sequence. This version generates a bunch of high order sequences. The first element always provides sufficient context to predict the rest of the elements. After switchover iterations, it will generate a different set of sequences. """ if it % 10 == 3: s = numpy.random.randint(5) if it <= switchover: if s == 0: label = 'XABCDE' # depends on [control=['if'], data=[]] elif s == 1: label = 'YCBEAF' # depends on [control=['if'], data=[]] elif s == 2: label = 'GHIJKL' # depends on [control=['if'], data=[]] elif s == 3: label = 'WABCMN' # depends on [control=['if'], data=[]] else: label = 'ZDBCAE' # depends on [control=['if'], data=[]] elif s == 0: label = 'XCBEAF' # depends on [control=['if'], data=[]] elif s == 1: label = 'YABCDE' # depends on [control=['if'], data=[]] elif s == 2: label = 'GABCMN' # depends on [control=['if'], data=[]] elif s == 3: label = 'WHIJKL' # depends on [control=['if'], data=[]] else: label = 'ZDHICF' vecs = letterSequence(label) # depends on [control=['if'], data=[]] else: vecs = [getRandomVector(w, n)] label = '.' return (vecs, label)
def generate_py_abilities(data): """Generate the list of functions in actions.py.""" def print_action(func_id, name, func, ab_id, general_id): args = [func_id, '"%s"' % name, func, ab_id] if general_id: args.append(general_id) print(" Function.ability(%s)," % ", ".join(str(v) for v in args)) func_ids = itertools.count(12) # Leave room for the ui funcs. for ability in sorted(six.itervalues(data.abilities), key=lambda a: sort_key(data, a)): ab_id = ability.ability_id if ab_id in skip_abilities or (ab_id not in data.general_abilities and ab_id not in used_abilities): continue name = generate_name(ability).replace(" ", "_") if ability.target in (sc_data.AbilityData.Target.Value("None"), sc_data.AbilityData.PointOrNone): print_action(next(func_ids), name + "_quick", "cmd_quick", ab_id, ability.remaps_to_ability_id) if ability.target != sc_data.AbilityData.Target.Value("None"): print_action(next(func_ids), name+ "_screen", "cmd_screen", ab_id, ability.remaps_to_ability_id) if ability.allow_minimap: print_action(next(func_ids), name + "_minimap", "cmd_minimap", ab_id, ability.remaps_to_ability_id) if ability.allow_autocast: print_action(next(func_ids), name + "_autocast", "autocast", ab_id, ability.remaps_to_ability_id)
def function[generate_py_abilities, parameter[data]]: constant[Generate the list of functions in actions.py.] def function[print_action, parameter[func_id, name, func, ab_id, general_id]]: variable[args] assign[=] list[[<ast.Name object at 0x7da2041d8f10>, <ast.BinOp object at 0x7da2041d90f0>, <ast.Name object at 0x7da2041db100>, <ast.Name object at 0x7da2041d8d30>]] if name[general_id] begin[:] call[name[args].append, parameter[name[general_id]]] call[name[print], parameter[binary_operation[constant[ Function.ability(%s),] <ast.Mod object at 0x7da2590d6920> call[constant[, ].join, parameter[<ast.GeneratorExp object at 0x7da2041dac80>]]]]] variable[func_ids] assign[=] call[name[itertools].count, parameter[constant[12]]] for taget[name[ability]] in starred[call[name[sorted], parameter[call[name[six].itervalues, parameter[name[data].abilities]]]]] begin[:] variable[ab_id] assign[=] name[ability].ability_id if <ast.BoolOp object at 0x7da2041db1c0> begin[:] continue variable[name] assign[=] call[call[name[generate_name], parameter[name[ability]]].replace, parameter[constant[ ], constant[_]]] if compare[name[ability].target in tuple[[<ast.Call object at 0x7da2041d9930>, <ast.Attribute object at 0x7da2041d8df0>]]] begin[:] call[name[print_action], parameter[call[name[next], parameter[name[func_ids]]], binary_operation[name[name] + constant[_quick]], constant[cmd_quick], name[ab_id], name[ability].remaps_to_ability_id]] if compare[name[ability].target not_equal[!=] call[name[sc_data].AbilityData.Target.Value, parameter[constant[None]]]] begin[:] call[name[print_action], parameter[call[name[next], parameter[name[func_ids]]], binary_operation[name[name] + constant[_screen]], constant[cmd_screen], name[ab_id], name[ability].remaps_to_ability_id]] if name[ability].allow_minimap begin[:] call[name[print_action], parameter[call[name[next], parameter[name[func_ids]]], binary_operation[name[name] + constant[_minimap]], constant[cmd_minimap], name[ab_id], name[ability].remaps_to_ability_id]] if name[ability].allow_autocast begin[:] call[name[print_action], parameter[call[name[next], parameter[name[func_ids]]], binary_operation[name[name] + constant[_autocast]], constant[autocast], name[ab_id], name[ability].remaps_to_ability_id]]
keyword[def] identifier[generate_py_abilities] ( identifier[data] ): literal[string] keyword[def] identifier[print_action] ( identifier[func_id] , identifier[name] , identifier[func] , identifier[ab_id] , identifier[general_id] ): identifier[args] =[ identifier[func_id] , literal[string] % identifier[name] , identifier[func] , identifier[ab_id] ] keyword[if] identifier[general_id] : identifier[args] . identifier[append] ( identifier[general_id] ) identifier[print] ( literal[string] % literal[string] . identifier[join] ( identifier[str] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[args] )) identifier[func_ids] = identifier[itertools] . identifier[count] ( literal[int] ) keyword[for] identifier[ability] keyword[in] identifier[sorted] ( identifier[six] . identifier[itervalues] ( identifier[data] . identifier[abilities] ), identifier[key] = keyword[lambda] identifier[a] : identifier[sort_key] ( identifier[data] , identifier[a] )): identifier[ab_id] = identifier[ability] . identifier[ability_id] keyword[if] identifier[ab_id] keyword[in] identifier[skip_abilities] keyword[or] ( identifier[ab_id] keyword[not] keyword[in] identifier[data] . identifier[general_abilities] keyword[and] identifier[ab_id] keyword[not] keyword[in] identifier[used_abilities] ): keyword[continue] identifier[name] = identifier[generate_name] ( identifier[ability] ). identifier[replace] ( literal[string] , literal[string] ) keyword[if] identifier[ability] . identifier[target] keyword[in] ( identifier[sc_data] . identifier[AbilityData] . identifier[Target] . identifier[Value] ( literal[string] ), identifier[sc_data] . identifier[AbilityData] . identifier[PointOrNone] ): identifier[print_action] ( identifier[next] ( identifier[func_ids] ), identifier[name] + literal[string] , literal[string] , identifier[ab_id] , identifier[ability] . identifier[remaps_to_ability_id] ) keyword[if] identifier[ability] . identifier[target] != identifier[sc_data] . identifier[AbilityData] . identifier[Target] . identifier[Value] ( literal[string] ): identifier[print_action] ( identifier[next] ( identifier[func_ids] ), identifier[name] + literal[string] , literal[string] , identifier[ab_id] , identifier[ability] . identifier[remaps_to_ability_id] ) keyword[if] identifier[ability] . identifier[allow_minimap] : identifier[print_action] ( identifier[next] ( identifier[func_ids] ), identifier[name] + literal[string] , literal[string] , identifier[ab_id] , identifier[ability] . identifier[remaps_to_ability_id] ) keyword[if] identifier[ability] . identifier[allow_autocast] : identifier[print_action] ( identifier[next] ( identifier[func_ids] ), identifier[name] + literal[string] , literal[string] , identifier[ab_id] , identifier[ability] . identifier[remaps_to_ability_id] )
def generate_py_abilities(data): """Generate the list of functions in actions.py.""" def print_action(func_id, name, func, ab_id, general_id): args = [func_id, '"%s"' % name, func, ab_id] if general_id: args.append(general_id) # depends on [control=['if'], data=[]] print(' Function.ability(%s),' % ', '.join((str(v) for v in args))) func_ids = itertools.count(12) # Leave room for the ui funcs. for ability in sorted(six.itervalues(data.abilities), key=lambda a: sort_key(data, a)): ab_id = ability.ability_id if ab_id in skip_abilities or (ab_id not in data.general_abilities and ab_id not in used_abilities): continue # depends on [control=['if'], data=[]] name = generate_name(ability).replace(' ', '_') if ability.target in (sc_data.AbilityData.Target.Value('None'), sc_data.AbilityData.PointOrNone): print_action(next(func_ids), name + '_quick', 'cmd_quick', ab_id, ability.remaps_to_ability_id) # depends on [control=['if'], data=[]] if ability.target != sc_data.AbilityData.Target.Value('None'): print_action(next(func_ids), name + '_screen', 'cmd_screen', ab_id, ability.remaps_to_ability_id) if ability.allow_minimap: print_action(next(func_ids), name + '_minimap', 'cmd_minimap', ab_id, ability.remaps_to_ability_id) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if ability.allow_autocast: print_action(next(func_ids), name + '_autocast', 'autocast', ab_id, ability.remaps_to_ability_id) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ability']]
def _build_google_client(service, api_version, http_auth): """ Google build client helper. :param service: service to build client for :type service: ``str`` :param api_version: API version to use. :type api_version: ``str`` :param http_auth: Initialized HTTP client to use. :type http_auth: ``object`` :return: google-python-api client initialized to use 'service' :rtype: ``object`` """ client = build(service, api_version, http=http_auth) return client
def function[_build_google_client, parameter[service, api_version, http_auth]]: constant[ Google build client helper. :param service: service to build client for :type service: ``str`` :param api_version: API version to use. :type api_version: ``str`` :param http_auth: Initialized HTTP client to use. :type http_auth: ``object`` :return: google-python-api client initialized to use 'service' :rtype: ``object`` ] variable[client] assign[=] call[name[build], parameter[name[service], name[api_version]]] return[name[client]]
keyword[def] identifier[_build_google_client] ( identifier[service] , identifier[api_version] , identifier[http_auth] ): literal[string] identifier[client] = identifier[build] ( identifier[service] , identifier[api_version] , identifier[http] = identifier[http_auth] ) keyword[return] identifier[client]
def _build_google_client(service, api_version, http_auth): """ Google build client helper. :param service: service to build client for :type service: ``str`` :param api_version: API version to use. :type api_version: ``str`` :param http_auth: Initialized HTTP client to use. :type http_auth: ``object`` :return: google-python-api client initialized to use 'service' :rtype: ``object`` """ client = build(service, api_version, http=http_auth) return client
def sum(a, axis=-1): """Sum TT-vector over specified axes""" d = a.d crs = _vector.vector.to_list(a.tt if isinstance(a, _matrix.matrix) else a) if axis < 0: axis = range(a.d) elif isinstance(axis, int): axis = [axis] axis = list(axis)[::-1] for ax in axis: crs[ax] = _np.sum(crs[ax], axis=1) rleft, rright = crs[ax].shape if (rleft >= rright or rleft < rright and ax + 1 >= d) and ax > 0: crs[ax - 1] = _np.tensordot(crs[ax - 1], crs[ax], axes=(2, 0)) elif ax + 1 < d: crs[ax + 1] = _np.tensordot(crs[ax], crs[ax + 1], axes=(1, 0)) else: return _np.sum(crs[ax]) crs.pop(ax) d -= 1 return _vector.vector.from_list(crs)
def function[sum, parameter[a, axis]]: constant[Sum TT-vector over specified axes] variable[d] assign[=] name[a].d variable[crs] assign[=] call[name[_vector].vector.to_list, parameter[<ast.IfExp object at 0x7da20c6c6b60>]] if compare[name[axis] less[<] constant[0]] begin[:] variable[axis] assign[=] call[name[range], parameter[name[a].d]] variable[axis] assign[=] call[call[name[list], parameter[name[axis]]]][<ast.Slice object at 0x7da1b06741c0>] for taget[name[ax]] in starred[name[axis]] begin[:] call[name[crs]][name[ax]] assign[=] call[name[_np].sum, parameter[call[name[crs]][name[ax]]]] <ast.Tuple object at 0x7da1b0674fd0> assign[=] call[name[crs]][name[ax]].shape if <ast.BoolOp object at 0x7da1b0675c90> begin[:] call[name[crs]][binary_operation[name[ax] - constant[1]]] assign[=] call[name[_np].tensordot, parameter[call[name[crs]][binary_operation[name[ax] - constant[1]]], call[name[crs]][name[ax]]]] call[name[crs].pop, parameter[name[ax]]] <ast.AugAssign object at 0x7da1b0659540> return[call[name[_vector].vector.from_list, parameter[name[crs]]]]
keyword[def] identifier[sum] ( identifier[a] , identifier[axis] =- literal[int] ): literal[string] identifier[d] = identifier[a] . identifier[d] identifier[crs] = identifier[_vector] . identifier[vector] . identifier[to_list] ( identifier[a] . identifier[tt] keyword[if] identifier[isinstance] ( identifier[a] , identifier[_matrix] . identifier[matrix] ) keyword[else] identifier[a] ) keyword[if] identifier[axis] < literal[int] : identifier[axis] = identifier[range] ( identifier[a] . identifier[d] ) keyword[elif] identifier[isinstance] ( identifier[axis] , identifier[int] ): identifier[axis] =[ identifier[axis] ] identifier[axis] = identifier[list] ( identifier[axis] )[::- literal[int] ] keyword[for] identifier[ax] keyword[in] identifier[axis] : identifier[crs] [ identifier[ax] ]= identifier[_np] . identifier[sum] ( identifier[crs] [ identifier[ax] ], identifier[axis] = literal[int] ) identifier[rleft] , identifier[rright] = identifier[crs] [ identifier[ax] ]. identifier[shape] keyword[if] ( identifier[rleft] >= identifier[rright] keyword[or] identifier[rleft] < identifier[rright] keyword[and] identifier[ax] + literal[int] >= identifier[d] ) keyword[and] identifier[ax] > literal[int] : identifier[crs] [ identifier[ax] - literal[int] ]= identifier[_np] . identifier[tensordot] ( identifier[crs] [ identifier[ax] - literal[int] ], identifier[crs] [ identifier[ax] ], identifier[axes] =( literal[int] , literal[int] )) keyword[elif] identifier[ax] + literal[int] < identifier[d] : identifier[crs] [ identifier[ax] + literal[int] ]= identifier[_np] . identifier[tensordot] ( identifier[crs] [ identifier[ax] ], identifier[crs] [ identifier[ax] + literal[int] ], identifier[axes] =( literal[int] , literal[int] )) keyword[else] : keyword[return] identifier[_np] . identifier[sum] ( identifier[crs] [ identifier[ax] ]) identifier[crs] . identifier[pop] ( identifier[ax] ) identifier[d] -= literal[int] keyword[return] identifier[_vector] . identifier[vector] . identifier[from_list] ( identifier[crs] )
def sum(a, axis=-1): """Sum TT-vector over specified axes""" d = a.d crs = _vector.vector.to_list(a.tt if isinstance(a, _matrix.matrix) else a) if axis < 0: axis = range(a.d) # depends on [control=['if'], data=['axis']] elif isinstance(axis, int): axis = [axis] # depends on [control=['if'], data=[]] axis = list(axis)[::-1] for ax in axis: crs[ax] = _np.sum(crs[ax], axis=1) (rleft, rright) = crs[ax].shape if (rleft >= rright or (rleft < rright and ax + 1 >= d)) and ax > 0: crs[ax - 1] = _np.tensordot(crs[ax - 1], crs[ax], axes=(2, 0)) # depends on [control=['if'], data=[]] elif ax + 1 < d: crs[ax + 1] = _np.tensordot(crs[ax], crs[ax + 1], axes=(1, 0)) # depends on [control=['if'], data=[]] else: return _np.sum(crs[ax]) crs.pop(ax) d -= 1 # depends on [control=['for'], data=['ax']] return _vector.vector.from_list(crs)
def load_feedback(): """ Open existing feedback file """ result = {} if os.path.exists(_feedback_file): f = open(_feedback_file, 'r') cont = f.read() f.close() else: cont = '{}' try: result = json.loads(cont) if cont else {} except ValueError as e: result = {"result":"crash", "text":"Feedback file has been modified by user !"} return result
def function[load_feedback, parameter[]]: constant[ Open existing feedback file ] variable[result] assign[=] dictionary[[], []] if call[name[os].path.exists, parameter[name[_feedback_file]]] begin[:] variable[f] assign[=] call[name[open], parameter[name[_feedback_file], constant[r]]] variable[cont] assign[=] call[name[f].read, parameter[]] call[name[f].close, parameter[]] <ast.Try object at 0x7da20c6e4dc0> return[name[result]]
keyword[def] identifier[load_feedback] (): literal[string] identifier[result] ={} keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[_feedback_file] ): identifier[f] = identifier[open] ( identifier[_feedback_file] , literal[string] ) identifier[cont] = identifier[f] . identifier[read] () identifier[f] . identifier[close] () keyword[else] : identifier[cont] = literal[string] keyword[try] : identifier[result] = identifier[json] . identifier[loads] ( identifier[cont] ) keyword[if] identifier[cont] keyword[else] {} keyword[except] identifier[ValueError] keyword[as] identifier[e] : identifier[result] ={ literal[string] : literal[string] , literal[string] : literal[string] } keyword[return] identifier[result]
def load_feedback(): """ Open existing feedback file """ result = {} if os.path.exists(_feedback_file): f = open(_feedback_file, 'r') cont = f.read() f.close() # depends on [control=['if'], data=[]] else: cont = '{}' try: result = json.loads(cont) if cont else {} # depends on [control=['try'], data=[]] except ValueError as e: result = {'result': 'crash', 'text': 'Feedback file has been modified by user !'} # depends on [control=['except'], data=[]] return result
def parse(cls, resource, direction="children", **additional_parameters) -> "DtsCollection": """ Given a dict representation of a json object, generate a DTS Collection :param resource: :type resource: dict :param direction: Direction of the hydra:members value :return: DTSCollection parsed :rtype: DtsCollection """ data = jsonld.expand(resource) if len(data) == 0: raise JsonLdCollectionMissing("Missing collection in JSON") data = data[0] obj = cls( identifier=resource["@id"], **additional_parameters ) obj._parse_metadata(data) obj._parse_members(data, direction=direction, **additional_parameters) return obj
def function[parse, parameter[cls, resource, direction]]: constant[ Given a dict representation of a json object, generate a DTS Collection :param resource: :type resource: dict :param direction: Direction of the hydra:members value :return: DTSCollection parsed :rtype: DtsCollection ] variable[data] assign[=] call[name[jsonld].expand, parameter[name[resource]]] if compare[call[name[len], parameter[name[data]]] equal[==] constant[0]] begin[:] <ast.Raise object at 0x7da18bccba00> variable[data] assign[=] call[name[data]][constant[0]] variable[obj] assign[=] call[name[cls], parameter[]] call[name[obj]._parse_metadata, parameter[name[data]]] call[name[obj]._parse_members, parameter[name[data]]] return[name[obj]]
keyword[def] identifier[parse] ( identifier[cls] , identifier[resource] , identifier[direction] = literal[string] ,** identifier[additional_parameters] )-> literal[string] : literal[string] identifier[data] = identifier[jsonld] . identifier[expand] ( identifier[resource] ) keyword[if] identifier[len] ( identifier[data] )== literal[int] : keyword[raise] identifier[JsonLdCollectionMissing] ( literal[string] ) identifier[data] = identifier[data] [ literal[int] ] identifier[obj] = identifier[cls] ( identifier[identifier] = identifier[resource] [ literal[string] ], ** identifier[additional_parameters] ) identifier[obj] . identifier[_parse_metadata] ( identifier[data] ) identifier[obj] . identifier[_parse_members] ( identifier[data] , identifier[direction] = identifier[direction] ,** identifier[additional_parameters] ) keyword[return] identifier[obj]
def parse(cls, resource, direction='children', **additional_parameters) -> 'DtsCollection': """ Given a dict representation of a json object, generate a DTS Collection :param resource: :type resource: dict :param direction: Direction of the hydra:members value :return: DTSCollection parsed :rtype: DtsCollection """ data = jsonld.expand(resource) if len(data) == 0: raise JsonLdCollectionMissing('Missing collection in JSON') # depends on [control=['if'], data=[]] data = data[0] obj = cls(identifier=resource['@id'], **additional_parameters) obj._parse_metadata(data) obj._parse_members(data, direction=direction, **additional_parameters) return obj
def login(self, user, password, token=None, callback=None): """Login with a username and password Arguments: user - username or email address password - the password for the account Keyword Arguments: token - meteor resume token callback - callback function containing error as first argument and login data""" # TODO: keep the tokenExpires around so we know the next time # we need to authenticate # hash the password hashed = hashlib.sha256(password).hexdigest() # handle username or email address if '@' in user: user_object = { 'email': user } else: user_object = { 'username': user } password_object = { 'algorithm': 'sha-256', 'digest': hashed } self._login_token = token self._login_data = {'user': user_object, 'password': password_object} if token: self._resume(token, callback=callback) else: self._login(self._login_data, callback=callback)
def function[login, parameter[self, user, password, token, callback]]: constant[Login with a username and password Arguments: user - username or email address password - the password for the account Keyword Arguments: token - meteor resume token callback - callback function containing error as first argument and login data] variable[hashed] assign[=] call[call[name[hashlib].sha256, parameter[name[password]]].hexdigest, parameter[]] if compare[constant[@] in name[user]] begin[:] variable[user_object] assign[=] dictionary[[<ast.Constant object at 0x7da20c990130>], [<ast.Name object at 0x7da20c992950>]] variable[password_object] assign[=] dictionary[[<ast.Constant object at 0x7da20c992a40>, <ast.Constant object at 0x7da20c990eb0>], [<ast.Constant object at 0x7da20c993760>, <ast.Name object at 0x7da20c993640>]] name[self]._login_token assign[=] name[token] name[self]._login_data assign[=] dictionary[[<ast.Constant object at 0x7da20c990670>, <ast.Constant object at 0x7da20c9913f0>], [<ast.Name object at 0x7da20c991330>, <ast.Name object at 0x7da20c993d90>]] if name[token] begin[:] call[name[self]._resume, parameter[name[token]]]
keyword[def] identifier[login] ( identifier[self] , identifier[user] , identifier[password] , identifier[token] = keyword[None] , identifier[callback] = keyword[None] ): literal[string] identifier[hashed] = identifier[hashlib] . identifier[sha256] ( identifier[password] ). identifier[hexdigest] () keyword[if] literal[string] keyword[in] identifier[user] : identifier[user_object] ={ literal[string] : identifier[user] } keyword[else] : identifier[user_object] ={ literal[string] : identifier[user] } identifier[password_object] ={ literal[string] : literal[string] , literal[string] : identifier[hashed] } identifier[self] . identifier[_login_token] = identifier[token] identifier[self] . identifier[_login_data] ={ literal[string] : identifier[user_object] , literal[string] : identifier[password_object] } keyword[if] identifier[token] : identifier[self] . identifier[_resume] ( identifier[token] , identifier[callback] = identifier[callback] ) keyword[else] : identifier[self] . identifier[_login] ( identifier[self] . identifier[_login_data] , identifier[callback] = identifier[callback] )
def login(self, user, password, token=None, callback=None): """Login with a username and password Arguments: user - username or email address password - the password for the account Keyword Arguments: token - meteor resume token callback - callback function containing error as first argument and login data""" # TODO: keep the tokenExpires around so we know the next time # we need to authenticate # hash the password hashed = hashlib.sha256(password).hexdigest() # handle username or email address if '@' in user: user_object = {'email': user} # depends on [control=['if'], data=['user']] else: user_object = {'username': user} password_object = {'algorithm': 'sha-256', 'digest': hashed} self._login_token = token self._login_data = {'user': user_object, 'password': password_object} if token: self._resume(token, callback=callback) # depends on [control=['if'], data=[]] else: self._login(self._login_data, callback=callback)
def DbDeleteDevice(self, argin): """ Delete a devcie from database :param argin: device name :type: tango.DevString :return: :rtype: tango.DevVoid """ self._log.debug("In DbDeleteDevice()") ret, dev_name, dfm = check_device_name(argin) if not ret: self.warn_stream("DataBase::db_delete_device(): device name " + argin + " incorrect ") th_exc(DB_IncorrectDeviceName, "failed to delete device, device name incorrect", "DataBase::DeleteDevice()") self.db.delete_device(dev_name)
def function[DbDeleteDevice, parameter[self, argin]]: constant[ Delete a devcie from database :param argin: device name :type: tango.DevString :return: :rtype: tango.DevVoid ] call[name[self]._log.debug, parameter[constant[In DbDeleteDevice()]]] <ast.Tuple object at 0x7da18dc06140> assign[=] call[name[check_device_name], parameter[name[argin]]] if <ast.UnaryOp object at 0x7da18dc061a0> begin[:] call[name[self].warn_stream, parameter[binary_operation[binary_operation[constant[DataBase::db_delete_device(): device name ] + name[argin]] + constant[ incorrect ]]]] call[name[th_exc], parameter[name[DB_IncorrectDeviceName], constant[failed to delete device, device name incorrect], constant[DataBase::DeleteDevice()]]] call[name[self].db.delete_device, parameter[name[dev_name]]]
keyword[def] identifier[DbDeleteDevice] ( identifier[self] , identifier[argin] ): literal[string] identifier[self] . identifier[_log] . identifier[debug] ( literal[string] ) identifier[ret] , identifier[dev_name] , identifier[dfm] = identifier[check_device_name] ( identifier[argin] ) keyword[if] keyword[not] identifier[ret] : identifier[self] . identifier[warn_stream] ( literal[string] + identifier[argin] + literal[string] ) identifier[th_exc] ( identifier[DB_IncorrectDeviceName] , literal[string] , literal[string] ) identifier[self] . identifier[db] . identifier[delete_device] ( identifier[dev_name] )
def DbDeleteDevice(self, argin): """ Delete a devcie from database :param argin: device name :type: tango.DevString :return: :rtype: tango.DevVoid """ self._log.debug('In DbDeleteDevice()') (ret, dev_name, dfm) = check_device_name(argin) if not ret: self.warn_stream('DataBase::db_delete_device(): device name ' + argin + ' incorrect ') th_exc(DB_IncorrectDeviceName, 'failed to delete device, device name incorrect', 'DataBase::DeleteDevice()') # depends on [control=['if'], data=[]] self.db.delete_device(dev_name)
def get_requests(self): """ Creates product structure and returns list of files for download :return: list of download requests :rtype: list(download.DownloadRequest) """ safe = self.get_safe_struct() self.download_list = [] self.structure_recursion(safe, self.parent_folder) self.sort_download_list() return self.download_list, self.folder_list
def function[get_requests, parameter[self]]: constant[ Creates product structure and returns list of files for download :return: list of download requests :rtype: list(download.DownloadRequest) ] variable[safe] assign[=] call[name[self].get_safe_struct, parameter[]] name[self].download_list assign[=] list[[]] call[name[self].structure_recursion, parameter[name[safe], name[self].parent_folder]] call[name[self].sort_download_list, parameter[]] return[tuple[[<ast.Attribute object at 0x7da1b18b5180>, <ast.Attribute object at 0x7da1b18b46a0>]]]
keyword[def] identifier[get_requests] ( identifier[self] ): literal[string] identifier[safe] = identifier[self] . identifier[get_safe_struct] () identifier[self] . identifier[download_list] =[] identifier[self] . identifier[structure_recursion] ( identifier[safe] , identifier[self] . identifier[parent_folder] ) identifier[self] . identifier[sort_download_list] () keyword[return] identifier[self] . identifier[download_list] , identifier[self] . identifier[folder_list]
def get_requests(self): """ Creates product structure and returns list of files for download :return: list of download requests :rtype: list(download.DownloadRequest) """ safe = self.get_safe_struct() self.download_list = [] self.structure_recursion(safe, self.parent_folder) self.sort_download_list() return (self.download_list, self.folder_list)
def saveWeights(sim): ''' Save the weights for each plastic synapse ''' with open(sim.weightsfilename,'w') as fid: for weightdata in sim.allWeights: fid.write('%0.0f' % weightdata[0]) # Time for i in range(1,len(weightdata)): fid.write('\t%0.8f' % weightdata[i]) fid.write('\n') print(('Saved weights as %s' % sim.weightsfilename))
def function[saveWeights, parameter[sim]]: constant[ Save the weights for each plastic synapse ] with call[name[open], parameter[name[sim].weightsfilename, constant[w]]] begin[:] for taget[name[weightdata]] in starred[name[sim].allWeights] begin[:] call[name[fid].write, parameter[binary_operation[constant[%0.0f] <ast.Mod object at 0x7da2590d6920> call[name[weightdata]][constant[0]]]]] for taget[name[i]] in starred[call[name[range], parameter[constant[1], call[name[len], parameter[name[weightdata]]]]]] begin[:] call[name[fid].write, parameter[binary_operation[constant[ %0.8f] <ast.Mod object at 0x7da2590d6920> call[name[weightdata]][name[i]]]]] call[name[fid].write, parameter[constant[ ]]] call[name[print], parameter[binary_operation[constant[Saved weights as %s] <ast.Mod object at 0x7da2590d6920> name[sim].weightsfilename]]]
keyword[def] identifier[saveWeights] ( identifier[sim] ): literal[string] keyword[with] identifier[open] ( identifier[sim] . identifier[weightsfilename] , literal[string] ) keyword[as] identifier[fid] : keyword[for] identifier[weightdata] keyword[in] identifier[sim] . identifier[allWeights] : identifier[fid] . identifier[write] ( literal[string] % identifier[weightdata] [ literal[int] ]) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[weightdata] )): identifier[fid] . identifier[write] ( literal[string] % identifier[weightdata] [ identifier[i] ]) identifier[fid] . identifier[write] ( literal[string] ) identifier[print] (( literal[string] % identifier[sim] . identifier[weightsfilename] ))
def saveWeights(sim): """ Save the weights for each plastic synapse """ with open(sim.weightsfilename, 'w') as fid: for weightdata in sim.allWeights: fid.write('%0.0f' % weightdata[0]) # Time for i in range(1, len(weightdata)): fid.write('\t%0.8f' % weightdata[i]) # depends on [control=['for'], data=['i']] fid.write('\n') # depends on [control=['for'], data=['weightdata']] # depends on [control=['with'], data=['fid']] print('Saved weights as %s' % sim.weightsfilename)
def suspend_all(self): """ Suspend all nodes """ pool = Pool(concurrency=3) for node in self.nodes.values(): pool.append(node.suspend) yield from pool.join()
def function[suspend_all, parameter[self]]: constant[ Suspend all nodes ] variable[pool] assign[=] call[name[Pool], parameter[]] for taget[name[node]] in starred[call[name[self].nodes.values, parameter[]]] begin[:] call[name[pool].append, parameter[name[node].suspend]] <ast.YieldFrom object at 0x7da204962a10>
keyword[def] identifier[suspend_all] ( identifier[self] ): literal[string] identifier[pool] = identifier[Pool] ( identifier[concurrency] = literal[int] ) keyword[for] identifier[node] keyword[in] identifier[self] . identifier[nodes] . identifier[values] (): identifier[pool] . identifier[append] ( identifier[node] . identifier[suspend] ) keyword[yield] keyword[from] identifier[pool] . identifier[join] ()
def suspend_all(self): """ Suspend all nodes """ pool = Pool(concurrency=3) for node in self.nodes.values(): pool.append(node.suspend) # depends on [control=['for'], data=['node']] yield from pool.join()
def execute_cmdLine_instructions(instructions, m, l): """ Applies the instructions given via <instructions> on the manager <m> """ opt_lut = dict() inst_lut = dict() for k, v in six.iteritems(instructions): bits = k.split('-', 1) if len(bits) == 1: if v not in m.modules: raise KeyError("No such module: %s" % v) inst_lut[bits[0]] = v else: if not bits[0] in opt_lut: opt_lut[bits[0]] = list() opt_lut[bits[0]].append((bits[1], v)) inst_list = sort_by_successors( six.viewkeys(inst_lut), lambda inst: [v for (k, v) in opt_lut.get(inst, ()) if k in m.modules[inst_lut[inst]].deps] ) for k in reversed(tuple(inst_list)): if k in m.insts: raise NotImplementedError( "Overwriting instancens not yet supported") settings = dict() if k in opt_lut: for k2, v2 in opt_lut[k]: settings[k2] = v2 m.create_instance(k, inst_lut[k], settings) for k in opt_lut: if k in inst_lut: continue for k2, v2 in opt_lut[k]: if k not in m.insts: raise ValueError("No such instance %s" % k) m.change_setting(k, k2, v2)
def function[execute_cmdLine_instructions, parameter[instructions, m, l]]: constant[ Applies the instructions given via <instructions> on the manager <m> ] variable[opt_lut] assign[=] call[name[dict], parameter[]] variable[inst_lut] assign[=] call[name[dict], parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b1348670>, <ast.Name object at 0x7da1b13486a0>]]] in starred[call[name[six].iteritems, parameter[name[instructions]]]] begin[:] variable[bits] assign[=] call[name[k].split, parameter[constant[-], constant[1]]] if compare[call[name[len], parameter[name[bits]]] equal[==] constant[1]] begin[:] if compare[name[v] <ast.NotIn object at 0x7da2590d7190> name[m].modules] begin[:] <ast.Raise object at 0x7da1b1348af0> call[name[inst_lut]][call[name[bits]][constant[0]]] assign[=] name[v] variable[inst_list] assign[=] call[name[sort_by_successors], parameter[call[name[six].viewkeys, parameter[name[inst_lut]]], <ast.Lambda object at 0x7da1b1349930>]] for taget[name[k]] in starred[call[name[reversed], parameter[call[name[tuple], parameter[name[inst_list]]]]]] begin[:] if compare[name[k] in name[m].insts] begin[:] <ast.Raise object at 0x7da1b1349ff0> variable[settings] assign[=] call[name[dict], parameter[]] if compare[name[k] in name[opt_lut]] begin[:] for taget[tuple[[<ast.Name object at 0x7da1b134a290>, <ast.Name object at 0x7da1b134a2c0>]]] in starred[call[name[opt_lut]][name[k]]] begin[:] call[name[settings]][name[k2]] assign[=] name[v2] call[name[m].create_instance, parameter[name[k], call[name[inst_lut]][name[k]], name[settings]]] for taget[name[k]] in starred[name[opt_lut]] begin[:] if compare[name[k] in name[inst_lut]] begin[:] continue for taget[tuple[[<ast.Name object at 0x7da1b134a860>, <ast.Name object at 0x7da1b134a890>]]] in starred[call[name[opt_lut]][name[k]]] begin[:] if compare[name[k] <ast.NotIn object at 0x7da2590d7190> name[m].insts] begin[:] <ast.Raise object at 0x7da1b134aa40> call[name[m].change_setting, parameter[name[k], name[k2], name[v2]]]
keyword[def] identifier[execute_cmdLine_instructions] ( identifier[instructions] , identifier[m] , identifier[l] ): literal[string] identifier[opt_lut] = identifier[dict] () identifier[inst_lut] = identifier[dict] () keyword[for] identifier[k] , identifier[v] keyword[in] identifier[six] . identifier[iteritems] ( identifier[instructions] ): identifier[bits] = identifier[k] . identifier[split] ( literal[string] , literal[int] ) keyword[if] identifier[len] ( identifier[bits] )== literal[int] : keyword[if] identifier[v] keyword[not] keyword[in] identifier[m] . identifier[modules] : keyword[raise] identifier[KeyError] ( literal[string] % identifier[v] ) identifier[inst_lut] [ identifier[bits] [ literal[int] ]]= identifier[v] keyword[else] : keyword[if] keyword[not] identifier[bits] [ literal[int] ] keyword[in] identifier[opt_lut] : identifier[opt_lut] [ identifier[bits] [ literal[int] ]]= identifier[list] () identifier[opt_lut] [ identifier[bits] [ literal[int] ]]. identifier[append] (( identifier[bits] [ literal[int] ], identifier[v] )) identifier[inst_list] = identifier[sort_by_successors] ( identifier[six] . identifier[viewkeys] ( identifier[inst_lut] ), keyword[lambda] identifier[inst] :[ identifier[v] keyword[for] ( identifier[k] , identifier[v] ) keyword[in] identifier[opt_lut] . identifier[get] ( identifier[inst] ,()) keyword[if] identifier[k] keyword[in] identifier[m] . identifier[modules] [ identifier[inst_lut] [ identifier[inst] ]]. identifier[deps] ] ) keyword[for] identifier[k] keyword[in] identifier[reversed] ( identifier[tuple] ( identifier[inst_list] )): keyword[if] identifier[k] keyword[in] identifier[m] . identifier[insts] : keyword[raise] identifier[NotImplementedError] ( literal[string] ) identifier[settings] = identifier[dict] () keyword[if] identifier[k] keyword[in] identifier[opt_lut] : keyword[for] identifier[k2] , identifier[v2] keyword[in] identifier[opt_lut] [ identifier[k] ]: identifier[settings] [ identifier[k2] ]= identifier[v2] identifier[m] . identifier[create_instance] ( identifier[k] , identifier[inst_lut] [ identifier[k] ], identifier[settings] ) keyword[for] identifier[k] keyword[in] identifier[opt_lut] : keyword[if] identifier[k] keyword[in] identifier[inst_lut] : keyword[continue] keyword[for] identifier[k2] , identifier[v2] keyword[in] identifier[opt_lut] [ identifier[k] ]: keyword[if] identifier[k] keyword[not] keyword[in] identifier[m] . identifier[insts] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[k] ) identifier[m] . identifier[change_setting] ( identifier[k] , identifier[k2] , identifier[v2] )
def execute_cmdLine_instructions(instructions, m, l): """ Applies the instructions given via <instructions> on the manager <m> """ opt_lut = dict() inst_lut = dict() for (k, v) in six.iteritems(instructions): bits = k.split('-', 1) if len(bits) == 1: if v not in m.modules: raise KeyError('No such module: %s' % v) # depends on [control=['if'], data=['v']] inst_lut[bits[0]] = v # depends on [control=['if'], data=[]] else: if not bits[0] in opt_lut: opt_lut[bits[0]] = list() # depends on [control=['if'], data=[]] opt_lut[bits[0]].append((bits[1], v)) # depends on [control=['for'], data=[]] inst_list = sort_by_successors(six.viewkeys(inst_lut), lambda inst: [v for (k, v) in opt_lut.get(inst, ()) if k in m.modules[inst_lut[inst]].deps]) for k in reversed(tuple(inst_list)): if k in m.insts: raise NotImplementedError('Overwriting instancens not yet supported') # depends on [control=['if'], data=[]] settings = dict() if k in opt_lut: for (k2, v2) in opt_lut[k]: settings[k2] = v2 # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['k', 'opt_lut']] m.create_instance(k, inst_lut[k], settings) # depends on [control=['for'], data=['k']] for k in opt_lut: if k in inst_lut: continue # depends on [control=['if'], data=[]] for (k2, v2) in opt_lut[k]: if k not in m.insts: raise ValueError('No such instance %s' % k) # depends on [control=['if'], data=['k']] m.change_setting(k, k2, v2) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['k']]
def currency(money): """ Фильтр валюты. Форматирует цену в соответствии с установленным количеством знаков после запятой, а также добавлеят символ валюты. :param money: :return: """ decimals = getattr(settings, 'MIDNIGHT_CATALOG_DECIMALS', 2) money = round(float(money), decimals) symbol = getattr(settings, 'MIDNIGHT_CATALOG_CURRENCY', 'руб') if decimals > 0: formatted = (str("%0."+str(decimals)+"f") % money)[-decimals-1:] else: formatted = "" return "%s%s %s" % (intcomma(int(money)), formatted, symbol)
def function[currency, parameter[money]]: constant[ Фильтр валюты. Форматирует цену в соответствии с установленным количеством знаков после запятой, а также добавлеят символ валюты. :param money: :return: ] variable[decimals] assign[=] call[name[getattr], parameter[name[settings], constant[MIDNIGHT_CATALOG_DECIMALS], constant[2]]] variable[money] assign[=] call[name[round], parameter[call[name[float], parameter[name[money]]], name[decimals]]] variable[symbol] assign[=] call[name[getattr], parameter[name[settings], constant[MIDNIGHT_CATALOG_CURRENCY], constant[руб]]] if compare[name[decimals] greater[>] constant[0]] begin[:] variable[formatted] assign[=] call[binary_operation[call[name[str], parameter[binary_operation[binary_operation[constant[%0.] + call[name[str], parameter[name[decimals]]]] + constant[f]]]] <ast.Mod object at 0x7da2590d6920> name[money]]][<ast.Slice object at 0x7da2054a7af0>] return[binary_operation[constant[%s%s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da18f813040>, <ast.Name object at 0x7da18f810e20>, <ast.Name object at 0x7da18f8110f0>]]]]
keyword[def] identifier[currency] ( identifier[money] ): literal[string] identifier[decimals] = identifier[getattr] ( identifier[settings] , literal[string] , literal[int] ) identifier[money] = identifier[round] ( identifier[float] ( identifier[money] ), identifier[decimals] ) identifier[symbol] = identifier[getattr] ( identifier[settings] , literal[string] , literal[string] ) keyword[if] identifier[decimals] > literal[int] : identifier[formatted] =( identifier[str] ( literal[string] + identifier[str] ( identifier[decimals] )+ literal[string] )% identifier[money] )[- identifier[decimals] - literal[int] :] keyword[else] : identifier[formatted] = literal[string] keyword[return] literal[string] %( identifier[intcomma] ( identifier[int] ( identifier[money] )), identifier[formatted] , identifier[symbol] )
def currency(money): """ Фильтр валюты. Форматирует цену в соответствии с установленным количеством знаков после запятой, а также добавлеят символ валюты. :param money: :return: """ decimals = getattr(settings, 'MIDNIGHT_CATALOG_DECIMALS', 2) money = round(float(money), decimals) symbol = getattr(settings, 'MIDNIGHT_CATALOG_CURRENCY', 'руб') if decimals > 0: formatted = (str('%0.' + str(decimals) + 'f') % money)[-decimals - 1:] # depends on [control=['if'], data=['decimals']] else: formatted = '' return '%s%s %s' % (intcomma(int(money)), formatted, symbol)
def write(self, filename=None): """Write the PE file. This function will process all headers and components of the PE file and include all changes made (by just assigning to attributes in the PE objects) and write the changes back to a file whose name is provided as an argument. The filename is optional, if not provided the data will be returned as a 'str' object. """ file_data = list(self.__data__) for structure in self.__structures__: struct_data = list(structure.__pack__()) offset = structure.get_file_offset() file_data[offset:offset+len(struct_data)] = struct_data if hasattr(self, 'VS_VERSIONINFO'): if hasattr(self, 'FileInfo'): for entry in self.FileInfo: if hasattr(entry, 'StringTable'): for st_entry in entry.StringTable: for key, entry in st_entry.entries.items(): offsets = st_entry.entries_offsets[key] lengths = st_entry.entries_lengths[key] if len( entry ) > lengths[1]: l = list() for idx, c in enumerate(entry): if ord(c) > 256: l.extend( [ chr(ord(c) & 0xff), chr( (ord(c) & 0xff00) >>8) ] ) else: l.extend( [chr( ord(c) ), '\0'] ) file_data[ offsets[1] : offsets[1] + lengths[1]*2 ] = l else: l = list() for idx, c in enumerate(entry): if ord(c) > 256: l.extend( [ chr(ord(c) & 0xff), chr( (ord(c) & 0xff00) >>8) ] ) else: l.extend( [chr( ord(c) ), '\0'] ) file_data[ offsets[1] : offsets[1] + len(entry)*2 ] = l remainder = lengths[1] - len(entry) file_data[ offsets[1] + len(entry)*2 : offsets[1] + lengths[1]*2 ] = [ u'\0' ] * remainder*2 new_file_data = ''.join( [ chr(ord(c)) for c in file_data] ) if filename: f = file(filename, 'wb+') f.write(new_file_data) f.close() else: return new_file_data
def function[write, parameter[self, filename]]: constant[Write the PE file. This function will process all headers and components of the PE file and include all changes made (by just assigning to attributes in the PE objects) and write the changes back to a file whose name is provided as an argument. The filename is optional, if not provided the data will be returned as a 'str' object. ] variable[file_data] assign[=] call[name[list], parameter[name[self].__data__]] for taget[name[structure]] in starred[name[self].__structures__] begin[:] variable[struct_data] assign[=] call[name[list], parameter[call[name[structure].__pack__, parameter[]]]] variable[offset] assign[=] call[name[structure].get_file_offset, parameter[]] call[name[file_data]][<ast.Slice object at 0x7da20c76e440>] assign[=] name[struct_data] if call[name[hasattr], parameter[name[self], constant[VS_VERSIONINFO]]] begin[:] if call[name[hasattr], parameter[name[self], constant[FileInfo]]] begin[:] for taget[name[entry]] in starred[name[self].FileInfo] begin[:] if call[name[hasattr], parameter[name[entry], constant[StringTable]]] begin[:] for taget[name[st_entry]] in starred[name[entry].StringTable] begin[:] for taget[tuple[[<ast.Name object at 0x7da20c76cd00>, <ast.Name object at 0x7da20c76f370>]]] in starred[call[name[st_entry].entries.items, parameter[]]] begin[:] variable[offsets] assign[=] call[name[st_entry].entries_offsets][name[key]] variable[lengths] assign[=] call[name[st_entry].entries_lengths][name[key]] if compare[call[name[len], parameter[name[entry]]] greater[>] call[name[lengths]][constant[1]]] begin[:] variable[l] assign[=] call[name[list], parameter[]] for taget[tuple[[<ast.Name object at 0x7da20c7c9f00>, <ast.Name object at 0x7da18f7216f0>]]] in starred[call[name[enumerate], parameter[name[entry]]]] begin[:] if compare[call[name[ord], parameter[name[c]]] greater[>] constant[256]] begin[:] call[name[l].extend, parameter[list[[<ast.Call object at 0x7da18f723730>, <ast.Call object at 0x7da18f721d80>]]]] call[name[file_data]][<ast.Slice object at 0x7da18f7223e0>] assign[=] name[l] variable[new_file_data] assign[=] call[constant[].join, parameter[<ast.ListComp object at 0x7da1b0ed3ac0>]] if name[filename] begin[:] variable[f] assign[=] call[name[file], parameter[name[filename], constant[wb+]]] call[name[f].write, parameter[name[new_file_data]]] call[name[f].close, parameter[]]
keyword[def] identifier[write] ( identifier[self] , identifier[filename] = keyword[None] ): literal[string] identifier[file_data] = identifier[list] ( identifier[self] . identifier[__data__] ) keyword[for] identifier[structure] keyword[in] identifier[self] . identifier[__structures__] : identifier[struct_data] = identifier[list] ( identifier[structure] . identifier[__pack__] ()) identifier[offset] = identifier[structure] . identifier[get_file_offset] () identifier[file_data] [ identifier[offset] : identifier[offset] + identifier[len] ( identifier[struct_data] )]= identifier[struct_data] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): keyword[for] identifier[entry] keyword[in] identifier[self] . identifier[FileInfo] : keyword[if] identifier[hasattr] ( identifier[entry] , literal[string] ): keyword[for] identifier[st_entry] keyword[in] identifier[entry] . identifier[StringTable] : keyword[for] identifier[key] , identifier[entry] keyword[in] identifier[st_entry] . identifier[entries] . identifier[items] (): identifier[offsets] = identifier[st_entry] . identifier[entries_offsets] [ identifier[key] ] identifier[lengths] = identifier[st_entry] . identifier[entries_lengths] [ identifier[key] ] keyword[if] identifier[len] ( identifier[entry] )> identifier[lengths] [ literal[int] ]: identifier[l] = identifier[list] () keyword[for] identifier[idx] , identifier[c] keyword[in] identifier[enumerate] ( identifier[entry] ): keyword[if] identifier[ord] ( identifier[c] )> literal[int] : identifier[l] . identifier[extend] ([ identifier[chr] ( identifier[ord] ( identifier[c] )& literal[int] ), identifier[chr] (( identifier[ord] ( identifier[c] )& literal[int] )>> literal[int] )]) keyword[else] : identifier[l] . identifier[extend] ([ identifier[chr] ( identifier[ord] ( identifier[c] )), literal[string] ]) identifier[file_data] [ identifier[offsets] [ literal[int] ]: identifier[offsets] [ literal[int] ]+ identifier[lengths] [ literal[int] ]* literal[int] ]= identifier[l] keyword[else] : identifier[l] = identifier[list] () keyword[for] identifier[idx] , identifier[c] keyword[in] identifier[enumerate] ( identifier[entry] ): keyword[if] identifier[ord] ( identifier[c] )> literal[int] : identifier[l] . identifier[extend] ([ identifier[chr] ( identifier[ord] ( identifier[c] )& literal[int] ), identifier[chr] (( identifier[ord] ( identifier[c] )& literal[int] )>> literal[int] )]) keyword[else] : identifier[l] . identifier[extend] ([ identifier[chr] ( identifier[ord] ( identifier[c] )), literal[string] ]) identifier[file_data] [ identifier[offsets] [ literal[int] ]: identifier[offsets] [ literal[int] ]+ identifier[len] ( identifier[entry] )* literal[int] ]= identifier[l] identifier[remainder] = identifier[lengths] [ literal[int] ]- identifier[len] ( identifier[entry] ) identifier[file_data] [ identifier[offsets] [ literal[int] ]+ identifier[len] ( identifier[entry] )* literal[int] : identifier[offsets] [ literal[int] ]+ identifier[lengths] [ literal[int] ]* literal[int] ]=[ literal[string] ]* identifier[remainder] * literal[int] identifier[new_file_data] = literal[string] . identifier[join] ([ identifier[chr] ( identifier[ord] ( identifier[c] )) keyword[for] identifier[c] keyword[in] identifier[file_data] ]) keyword[if] identifier[filename] : identifier[f] = identifier[file] ( identifier[filename] , literal[string] ) identifier[f] . identifier[write] ( identifier[new_file_data] ) identifier[f] . identifier[close] () keyword[else] : keyword[return] identifier[new_file_data]
def write(self, filename=None): """Write the PE file. This function will process all headers and components of the PE file and include all changes made (by just assigning to attributes in the PE objects) and write the changes back to a file whose name is provided as an argument. The filename is optional, if not provided the data will be returned as a 'str' object. """ file_data = list(self.__data__) for structure in self.__structures__: struct_data = list(structure.__pack__()) offset = structure.get_file_offset() file_data[offset:offset + len(struct_data)] = struct_data # depends on [control=['for'], data=['structure']] if hasattr(self, 'VS_VERSIONINFO'): if hasattr(self, 'FileInfo'): for entry in self.FileInfo: if hasattr(entry, 'StringTable'): for st_entry in entry.StringTable: for (key, entry) in st_entry.entries.items(): offsets = st_entry.entries_offsets[key] lengths = st_entry.entries_lengths[key] if len(entry) > lengths[1]: l = list() for (idx, c) in enumerate(entry): if ord(c) > 256: l.extend([chr(ord(c) & 255), chr((ord(c) & 65280) >> 8)]) # depends on [control=['if'], data=[]] else: l.extend([chr(ord(c)), '\x00']) # depends on [control=['for'], data=[]] file_data[offsets[1]:offsets[1] + lengths[1] * 2] = l # depends on [control=['if'], data=[]] else: l = list() for (idx, c) in enumerate(entry): if ord(c) > 256: l.extend([chr(ord(c) & 255), chr((ord(c) & 65280) >> 8)]) # depends on [control=['if'], data=[]] else: l.extend([chr(ord(c)), '\x00']) # depends on [control=['for'], data=[]] file_data[offsets[1]:offsets[1] + len(entry) * 2] = l remainder = lengths[1] - len(entry) file_data[offsets[1] + len(entry) * 2:offsets[1] + lengths[1] * 2] = [u'\x00'] * remainder * 2 # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['st_entry']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['entry']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] new_file_data = ''.join([chr(ord(c)) for c in file_data]) if filename: f = file(filename, 'wb+') f.write(new_file_data) f.close() # depends on [control=['if'], data=[]] else: return new_file_data
def fan_speed(self, speed: int = None) -> bool: """Adjust Fan Speed by Specifying 1,2,3 as argument or cycle through speeds increasing by one""" body = helpers.req_body(self.manager, 'devicestatus') body['uuid'] = self.uuid head = helpers.req_headers(self.manager) if self.details.get('mode') != 'manual': self.mode_toggle('manual') else: if speed is not None: level = int(self.details.get('level')) if speed == level: return False elif speed in [1, 2, 3]: body['level'] = speed else: if (level + 1) > 3: body['level'] = 1 else: body['level'] = int(level + 1) r, _ = helpers.call_api('/131airPurifier/v1/device/updateSpeed', 'put', json=body, headers=head) if r is not None and helpers.check_response(r, 'airpur_status'): self.details['level'] = body['level'] return True else: return False
def function[fan_speed, parameter[self, speed]]: constant[Adjust Fan Speed by Specifying 1,2,3 as argument or cycle through speeds increasing by one] variable[body] assign[=] call[name[helpers].req_body, parameter[name[self].manager, constant[devicestatus]]] call[name[body]][constant[uuid]] assign[=] name[self].uuid variable[head] assign[=] call[name[helpers].req_headers, parameter[name[self].manager]] if compare[call[name[self].details.get, parameter[constant[mode]]] not_equal[!=] constant[manual]] begin[:] call[name[self].mode_toggle, parameter[constant[manual]]]
keyword[def] identifier[fan_speed] ( identifier[self] , identifier[speed] : identifier[int] = keyword[None] )-> identifier[bool] : literal[string] identifier[body] = identifier[helpers] . identifier[req_body] ( identifier[self] . identifier[manager] , literal[string] ) identifier[body] [ literal[string] ]= identifier[self] . identifier[uuid] identifier[head] = identifier[helpers] . identifier[req_headers] ( identifier[self] . identifier[manager] ) keyword[if] identifier[self] . identifier[details] . identifier[get] ( literal[string] )!= literal[string] : identifier[self] . identifier[mode_toggle] ( literal[string] ) keyword[else] : keyword[if] identifier[speed] keyword[is] keyword[not] keyword[None] : identifier[level] = identifier[int] ( identifier[self] . identifier[details] . identifier[get] ( literal[string] )) keyword[if] identifier[speed] == identifier[level] : keyword[return] keyword[False] keyword[elif] identifier[speed] keyword[in] [ literal[int] , literal[int] , literal[int] ]: identifier[body] [ literal[string] ]= identifier[speed] keyword[else] : keyword[if] ( identifier[level] + literal[int] )> literal[int] : identifier[body] [ literal[string] ]= literal[int] keyword[else] : identifier[body] [ literal[string] ]= identifier[int] ( identifier[level] + literal[int] ) identifier[r] , identifier[_] = identifier[helpers] . identifier[call_api] ( literal[string] , literal[string] , identifier[json] = identifier[body] , identifier[headers] = identifier[head] ) keyword[if] identifier[r] keyword[is] keyword[not] keyword[None] keyword[and] identifier[helpers] . identifier[check_response] ( identifier[r] , literal[string] ): identifier[self] . identifier[details] [ literal[string] ]= identifier[body] [ literal[string] ] keyword[return] keyword[True] keyword[else] : keyword[return] keyword[False]
def fan_speed(self, speed: int=None) -> bool: """Adjust Fan Speed by Specifying 1,2,3 as argument or cycle through speeds increasing by one""" body = helpers.req_body(self.manager, 'devicestatus') body['uuid'] = self.uuid head = helpers.req_headers(self.manager) if self.details.get('mode') != 'manual': self.mode_toggle('manual') # depends on [control=['if'], data=[]] else: if speed is not None: level = int(self.details.get('level')) if speed == level: return False # depends on [control=['if'], data=[]] elif speed in [1, 2, 3]: body['level'] = speed # depends on [control=['if'], data=['speed']] # depends on [control=['if'], data=['speed']] elif level + 1 > 3: body['level'] = 1 # depends on [control=['if'], data=[]] else: body['level'] = int(level + 1) (r, _) = helpers.call_api('/131airPurifier/v1/device/updateSpeed', 'put', json=body, headers=head) if r is not None and helpers.check_response(r, 'airpur_status'): self.details['level'] = body['level'] return True # depends on [control=['if'], data=[]] else: return False
def ColumnTypeParser(description): """Parses a single column description. Internal helper method. Args: description: a column description in the possible formats: 'id' ('id',) ('id', 'type') ('id', 'type', 'label') ('id', 'type', 'label', {'custom_prop1': 'custom_val1'}) Returns: Dictionary with the following keys: id, label, type, and custom_properties where: - If label not given, it equals the id. - If type not given, string is used by default. - If custom properties are not given, an empty dictionary is used by default. Raises: DataTableException: The column description did not match the RE, or unsupported type was passed. """ if not description: raise DataTableException("Description error: empty description given") if not isinstance(description, (six.string_types, tuple)): raise DataTableException("Description error: expected either string or " "tuple, got %s." % type(description)) if isinstance(description, six.string_types): description = (description,) # According to the tuple's length, we fill the keys # We verify everything is of type string for elem in description[:3]: if not isinstance(elem, six.string_types): raise DataTableException("Description error: expected tuple of " "strings, current element of type %s." % type(elem)) desc_dict = {"id": description[0], "label": description[0], "type": "string", "custom_properties": {}} if len(description) > 1: desc_dict["type"] = description[1].lower() if len(description) > 2: desc_dict["label"] = description[2] if len(description) > 3: if not isinstance(description[3], dict): raise DataTableException("Description error: expected custom " "properties of type dict, current element " "of type %s." % type(description[3])) desc_dict["custom_properties"] = description[3] if len(description) > 4: raise DataTableException("Description error: tuple of length > 4") if desc_dict["type"] not in ["string", "number", "boolean", "date", "datetime", "timeofday"]: raise DataTableException( "Description error: unsupported type '%s'" % desc_dict["type"]) return desc_dict
def function[ColumnTypeParser, parameter[description]]: constant[Parses a single column description. Internal helper method. Args: description: a column description in the possible formats: 'id' ('id',) ('id', 'type') ('id', 'type', 'label') ('id', 'type', 'label', {'custom_prop1': 'custom_val1'}) Returns: Dictionary with the following keys: id, label, type, and custom_properties where: - If label not given, it equals the id. - If type not given, string is used by default. - If custom properties are not given, an empty dictionary is used by default. Raises: DataTableException: The column description did not match the RE, or unsupported type was passed. ] if <ast.UnaryOp object at 0x7da2054a60e0> begin[:] <ast.Raise object at 0x7da2054a52d0> if <ast.UnaryOp object at 0x7da2054a5930> begin[:] <ast.Raise object at 0x7da2054a4670> if call[name[isinstance], parameter[name[description], name[six].string_types]] begin[:] variable[description] assign[=] tuple[[<ast.Name object at 0x7da18c4ce740>]] for taget[name[elem]] in starred[call[name[description]][<ast.Slice object at 0x7da18c4cf8e0>]] begin[:] if <ast.UnaryOp object at 0x7da18c4cefb0> begin[:] <ast.Raise object at 0x7da18c4cc5b0> variable[desc_dict] assign[=] dictionary[[<ast.Constant object at 0x7da18c4ce530>, <ast.Constant object at 0x7da18c4ce710>, <ast.Constant object at 0x7da18c4cfd90>, <ast.Constant object at 0x7da18c4cc430>], [<ast.Subscript object at 0x7da18c4ce440>, <ast.Subscript object at 0x7da18c4cedd0>, <ast.Constant object at 0x7da18c4cf9a0>, <ast.Dict object at 0x7da18c4cd180>]] if compare[call[name[len], parameter[name[description]]] greater[>] constant[1]] begin[:] call[name[desc_dict]][constant[type]] assign[=] call[call[name[description]][constant[1]].lower, parameter[]] if compare[call[name[len], parameter[name[description]]] greater[>] constant[2]] begin[:] call[name[desc_dict]][constant[label]] assign[=] call[name[description]][constant[2]] if compare[call[name[len], parameter[name[description]]] greater[>] constant[3]] begin[:] if <ast.UnaryOp object at 0x7da18c4ce620> begin[:] <ast.Raise object at 0x7da18c4cc130> call[name[desc_dict]][constant[custom_properties]] assign[=] call[name[description]][constant[3]] if compare[call[name[len], parameter[name[description]]] greater[>] constant[4]] begin[:] <ast.Raise object at 0x7da18c4cf2b0> if compare[call[name[desc_dict]][constant[type]] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da18c4ceb30>, <ast.Constant object at 0x7da18c4cfd00>, <ast.Constant object at 0x7da18c4cf700>, <ast.Constant object at 0x7da18c4cddb0>, <ast.Constant object at 0x7da18c4cf0d0>, <ast.Constant object at 0x7da18c4cf4f0>]]] begin[:] <ast.Raise object at 0x7da18c4ccbe0> return[name[desc_dict]]
keyword[def] identifier[ColumnTypeParser] ( identifier[description] ): literal[string] keyword[if] keyword[not] identifier[description] : keyword[raise] identifier[DataTableException] ( literal[string] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[description] ,( identifier[six] . identifier[string_types] , identifier[tuple] )): keyword[raise] identifier[DataTableException] ( literal[string] literal[string] % identifier[type] ( identifier[description] )) keyword[if] identifier[isinstance] ( identifier[description] , identifier[six] . identifier[string_types] ): identifier[description] =( identifier[description] ,) keyword[for] identifier[elem] keyword[in] identifier[description] [: literal[int] ]: keyword[if] keyword[not] identifier[isinstance] ( identifier[elem] , identifier[six] . identifier[string_types] ): keyword[raise] identifier[DataTableException] ( literal[string] literal[string] % identifier[type] ( identifier[elem] )) identifier[desc_dict] ={ literal[string] : identifier[description] [ literal[int] ], literal[string] : identifier[description] [ literal[int] ], literal[string] : literal[string] , literal[string] :{}} keyword[if] identifier[len] ( identifier[description] )> literal[int] : identifier[desc_dict] [ literal[string] ]= identifier[description] [ literal[int] ]. identifier[lower] () keyword[if] identifier[len] ( identifier[description] )> literal[int] : identifier[desc_dict] [ literal[string] ]= identifier[description] [ literal[int] ] keyword[if] identifier[len] ( identifier[description] )> literal[int] : keyword[if] keyword[not] identifier[isinstance] ( identifier[description] [ literal[int] ], identifier[dict] ): keyword[raise] identifier[DataTableException] ( literal[string] literal[string] literal[string] % identifier[type] ( identifier[description] [ literal[int] ])) identifier[desc_dict] [ literal[string] ]= identifier[description] [ literal[int] ] keyword[if] identifier[len] ( identifier[description] )> literal[int] : keyword[raise] identifier[DataTableException] ( literal[string] ) keyword[if] identifier[desc_dict] [ literal[string] ] keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]: keyword[raise] identifier[DataTableException] ( literal[string] % identifier[desc_dict] [ literal[string] ]) keyword[return] identifier[desc_dict]
def ColumnTypeParser(description): """Parses a single column description. Internal helper method. Args: description: a column description in the possible formats: 'id' ('id',) ('id', 'type') ('id', 'type', 'label') ('id', 'type', 'label', {'custom_prop1': 'custom_val1'}) Returns: Dictionary with the following keys: id, label, type, and custom_properties where: - If label not given, it equals the id. - If type not given, string is used by default. - If custom properties are not given, an empty dictionary is used by default. Raises: DataTableException: The column description did not match the RE, or unsupported type was passed. """ if not description: raise DataTableException('Description error: empty description given') # depends on [control=['if'], data=[]] if not isinstance(description, (six.string_types, tuple)): raise DataTableException('Description error: expected either string or tuple, got %s.' % type(description)) # depends on [control=['if'], data=[]] if isinstance(description, six.string_types): description = (description,) # depends on [control=['if'], data=[]] # According to the tuple's length, we fill the keys # We verify everything is of type string for elem in description[:3]: if not isinstance(elem, six.string_types): raise DataTableException('Description error: expected tuple of strings, current element of type %s.' % type(elem)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['elem']] desc_dict = {'id': description[0], 'label': description[0], 'type': 'string', 'custom_properties': {}} if len(description) > 1: desc_dict['type'] = description[1].lower() if len(description) > 2: desc_dict['label'] = description[2] if len(description) > 3: if not isinstance(description[3], dict): raise DataTableException('Description error: expected custom properties of type dict, current element of type %s.' % type(description[3])) # depends on [control=['if'], data=[]] desc_dict['custom_properties'] = description[3] if len(description) > 4: raise DataTableException('Description error: tuple of length > 4') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if desc_dict['type'] not in ['string', 'number', 'boolean', 'date', 'datetime', 'timeofday']: raise DataTableException("Description error: unsupported type '%s'" % desc_dict['type']) # depends on [control=['if'], data=[]] return desc_dict
def ignore_file_extension(self, extension): """ Configure a file extension to be ignored. :param extension: file extension to be ignored (ex. .less, .scss, etc) """ logger.info('Ignoring file extension: {}'.format(extension)) self.watcher.ignore_file_extension(extension)
def function[ignore_file_extension, parameter[self, extension]]: constant[ Configure a file extension to be ignored. :param extension: file extension to be ignored (ex. .less, .scss, etc) ] call[name[logger].info, parameter[call[constant[Ignoring file extension: {}].format, parameter[name[extension]]]]] call[name[self].watcher.ignore_file_extension, parameter[name[extension]]]
keyword[def] identifier[ignore_file_extension] ( identifier[self] , identifier[extension] ): literal[string] identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[extension] )) identifier[self] . identifier[watcher] . identifier[ignore_file_extension] ( identifier[extension] )
def ignore_file_extension(self, extension): """ Configure a file extension to be ignored. :param extension: file extension to be ignored (ex. .less, .scss, etc) """ logger.info('Ignoring file extension: {}'.format(extension)) self.watcher.ignore_file_extension(extension)
def boolean_sparse(a, b, operation=np.logical_and): """ Find common rows between two arrays very quickly using 3D boolean sparse matrices. Parameters ----------- a: (n, d) int, coordinates in space b: (m, d) int, coordinates in space operation: numpy operation function, ie: np.logical_and np.logical_or Returns ----------- coords: (q, d) int, coordinates in space """ # 3D sparse arrays, using wrapped scipy.sparse # pip install sparse import sparse # find the bounding box of both arrays extrema = np.array([a.min(axis=0), a.max(axis=0), b.min(axis=0), b.max(axis=0)]) origin = extrema.min(axis=0) - 1 size = tuple(extrema.ptp(axis=0) + 2) # put nearby voxel arrays into same shape sparse array sp_a = sparse.COO((a - origin).T, data=np.ones(len(a), dtype=np.bool), shape=size) sp_b = sparse.COO((b - origin).T, data=np.ones(len(b), dtype=np.bool), shape=size) # apply the logical operation # get a sparse matrix out applied = operation(sp_a, sp_b) # reconstruct the original coordinates coords = np.column_stack(applied.coords) + origin return coords
def function[boolean_sparse, parameter[a, b, operation]]: constant[ Find common rows between two arrays very quickly using 3D boolean sparse matrices. Parameters ----------- a: (n, d) int, coordinates in space b: (m, d) int, coordinates in space operation: numpy operation function, ie: np.logical_and np.logical_or Returns ----------- coords: (q, d) int, coordinates in space ] import module[sparse] variable[extrema] assign[=] call[name[np].array, parameter[list[[<ast.Call object at 0x7da18bc70b50>, <ast.Call object at 0x7da18bc725c0>, <ast.Call object at 0x7da18bc724d0>, <ast.Call object at 0x7da18bc71600>]]]] variable[origin] assign[=] binary_operation[call[name[extrema].min, parameter[]] - constant[1]] variable[size] assign[=] call[name[tuple], parameter[binary_operation[call[name[extrema].ptp, parameter[]] + constant[2]]]] variable[sp_a] assign[=] call[name[sparse].COO, parameter[binary_operation[name[a] - name[origin]].T]] variable[sp_b] assign[=] call[name[sparse].COO, parameter[binary_operation[name[b] - name[origin]].T]] variable[applied] assign[=] call[name[operation], parameter[name[sp_a], name[sp_b]]] variable[coords] assign[=] binary_operation[call[name[np].column_stack, parameter[name[applied].coords]] + name[origin]] return[name[coords]]
keyword[def] identifier[boolean_sparse] ( identifier[a] , identifier[b] , identifier[operation] = identifier[np] . identifier[logical_and] ): literal[string] keyword[import] identifier[sparse] identifier[extrema] = identifier[np] . identifier[array] ([ identifier[a] . identifier[min] ( identifier[axis] = literal[int] ), identifier[a] . identifier[max] ( identifier[axis] = literal[int] ), identifier[b] . identifier[min] ( identifier[axis] = literal[int] ), identifier[b] . identifier[max] ( identifier[axis] = literal[int] )]) identifier[origin] = identifier[extrema] . identifier[min] ( identifier[axis] = literal[int] )- literal[int] identifier[size] = identifier[tuple] ( identifier[extrema] . identifier[ptp] ( identifier[axis] = literal[int] )+ literal[int] ) identifier[sp_a] = identifier[sparse] . identifier[COO] (( identifier[a] - identifier[origin] ). identifier[T] , identifier[data] = identifier[np] . identifier[ones] ( identifier[len] ( identifier[a] ), identifier[dtype] = identifier[np] . identifier[bool] ), identifier[shape] = identifier[size] ) identifier[sp_b] = identifier[sparse] . identifier[COO] (( identifier[b] - identifier[origin] ). identifier[T] , identifier[data] = identifier[np] . identifier[ones] ( identifier[len] ( identifier[b] ), identifier[dtype] = identifier[np] . identifier[bool] ), identifier[shape] = identifier[size] ) identifier[applied] = identifier[operation] ( identifier[sp_a] , identifier[sp_b] ) identifier[coords] = identifier[np] . identifier[column_stack] ( identifier[applied] . identifier[coords] )+ identifier[origin] keyword[return] identifier[coords]
def boolean_sparse(a, b, operation=np.logical_and): """ Find common rows between two arrays very quickly using 3D boolean sparse matrices. Parameters ----------- a: (n, d) int, coordinates in space b: (m, d) int, coordinates in space operation: numpy operation function, ie: np.logical_and np.logical_or Returns ----------- coords: (q, d) int, coordinates in space """ # 3D sparse arrays, using wrapped scipy.sparse # pip install sparse import sparse # find the bounding box of both arrays extrema = np.array([a.min(axis=0), a.max(axis=0), b.min(axis=0), b.max(axis=0)]) origin = extrema.min(axis=0) - 1 size = tuple(extrema.ptp(axis=0) + 2) # put nearby voxel arrays into same shape sparse array sp_a = sparse.COO((a - origin).T, data=np.ones(len(a), dtype=np.bool), shape=size) sp_b = sparse.COO((b - origin).T, data=np.ones(len(b), dtype=np.bool), shape=size) # apply the logical operation # get a sparse matrix out applied = operation(sp_a, sp_b) # reconstruct the original coordinates coords = np.column_stack(applied.coords) + origin return coords
def symmetric_strength_of_connection(A, theta=0): """Symmetric Strength Measure. Compute strength of connection matrix using the standard symmetric measure An off-diagonal connection A[i,j] is strong iff:: abs(A[i,j]) >= theta * sqrt( abs(A[i,i]) * abs(A[j,j]) ) Parameters ---------- A : csr_matrix Matrix graph defined in sparse format. Entry A[i,j] describes the strength of edge [i,j] theta : float Threshold parameter (positive). Returns ------- S : csr_matrix Matrix graph defining strong connections. S[i,j]=1 if vertex i is strongly influenced by vertex j. See Also -------- symmetric_strength_of_connection : symmetric measure used in SA evolution_strength_of_connection : relaxation based strength measure Notes ----- - For vector problems, standard strength measures may produce undesirable aggregates. A "block approach" from Vanek et al. is used to replace vertex comparisons with block-type comparisons. A connection between nodes i and j in the block case is strong if:: ||AB[i,j]|| >= theta * sqrt( ||AB[i,i]||*||AB[j,j]|| ) where AB[k,l] is the matrix block (degrees of freedom) associated with nodes k and l and ||.|| is a matrix norm, such a Frobenius. - See [1996bVaMaBr]_ for more details. References ---------- .. [1996bVaMaBr] Vanek, P. and Mandel, J. and Brezina, M., "Algebraic Multigrid by Smoothed Aggregation for Second and Fourth Order Elliptic Problems", Computing, vol. 56, no. 3, pp. 179--196, 1996. http://citeseer.ist.psu.edu/vanek96algebraic.html Examples -------- >>> import numpy as np >>> from pyamg.gallery import stencil_grid >>> from pyamg.strength import symmetric_strength_of_connection >>> n=3 >>> stencil = np.array([[-1.0,-1.0,-1.0], ... [-1.0, 8.0,-1.0], ... [-1.0,-1.0,-1.0]]) >>> A = stencil_grid(stencil, (n,n), format='csr') >>> S = symmetric_strength_of_connection(A, 0.0) """ if theta < 0: raise ValueError('expected a positive theta') if sparse.isspmatrix_csr(A): # if theta == 0: # return A Sp = np.empty_like(A.indptr) Sj = np.empty_like(A.indices) Sx = np.empty_like(A.data) fn = amg_core.symmetric_strength_of_connection fn(A.shape[0], theta, A.indptr, A.indices, A.data, Sp, Sj, Sx) S = sparse.csr_matrix((Sx, Sj, Sp), shape=A.shape) elif sparse.isspmatrix_bsr(A): M, N = A.shape R, C = A.blocksize if R != C: raise ValueError('matrix must have square blocks') if theta == 0: data = np.ones(len(A.indices), dtype=A.dtype) S = sparse.csr_matrix((data, A.indices.copy(), A.indptr.copy()), shape=(int(M / R), int(N / C))) else: # the strength of connection matrix is based on the # Frobenius norms of the blocks data = (np.conjugate(A.data) * A.data).reshape(-1, R * C) data = data.sum(axis=1) A = sparse.csr_matrix((data, A.indices, A.indptr), shape=(int(M / R), int(N / C))) return symmetric_strength_of_connection(A, theta) else: raise TypeError('expected csr_matrix or bsr_matrix') # Strength represents "distance", so take the magnitude S.data = np.abs(S.data) # Scale S by the largest magnitude entry in each row S = scale_rows_by_largest_entry(S) return S
def function[symmetric_strength_of_connection, parameter[A, theta]]: constant[Symmetric Strength Measure. Compute strength of connection matrix using the standard symmetric measure An off-diagonal connection A[i,j] is strong iff:: abs(A[i,j]) >= theta * sqrt( abs(A[i,i]) * abs(A[j,j]) ) Parameters ---------- A : csr_matrix Matrix graph defined in sparse format. Entry A[i,j] describes the strength of edge [i,j] theta : float Threshold parameter (positive). Returns ------- S : csr_matrix Matrix graph defining strong connections. S[i,j]=1 if vertex i is strongly influenced by vertex j. See Also -------- symmetric_strength_of_connection : symmetric measure used in SA evolution_strength_of_connection : relaxation based strength measure Notes ----- - For vector problems, standard strength measures may produce undesirable aggregates. A "block approach" from Vanek et al. is used to replace vertex comparisons with block-type comparisons. A connection between nodes i and j in the block case is strong if:: ||AB[i,j]|| >= theta * sqrt( ||AB[i,i]||*||AB[j,j]|| ) where AB[k,l] is the matrix block (degrees of freedom) associated with nodes k and l and ||.|| is a matrix norm, such a Frobenius. - See [1996bVaMaBr]_ for more details. References ---------- .. [1996bVaMaBr] Vanek, P. and Mandel, J. and Brezina, M., "Algebraic Multigrid by Smoothed Aggregation for Second and Fourth Order Elliptic Problems", Computing, vol. 56, no. 3, pp. 179--196, 1996. http://citeseer.ist.psu.edu/vanek96algebraic.html Examples -------- >>> import numpy as np >>> from pyamg.gallery import stencil_grid >>> from pyamg.strength import symmetric_strength_of_connection >>> n=3 >>> stencil = np.array([[-1.0,-1.0,-1.0], ... [-1.0, 8.0,-1.0], ... [-1.0,-1.0,-1.0]]) >>> A = stencil_grid(stencil, (n,n), format='csr') >>> S = symmetric_strength_of_connection(A, 0.0) ] if compare[name[theta] less[<] constant[0]] begin[:] <ast.Raise object at 0x7da2046235e0> if call[name[sparse].isspmatrix_csr, parameter[name[A]]] begin[:] variable[Sp] assign[=] call[name[np].empty_like, parameter[name[A].indptr]] variable[Sj] assign[=] call[name[np].empty_like, parameter[name[A].indices]] variable[Sx] assign[=] call[name[np].empty_like, parameter[name[A].data]] variable[fn] assign[=] name[amg_core].symmetric_strength_of_connection call[name[fn], parameter[call[name[A].shape][constant[0]], name[theta], name[A].indptr, name[A].indices, name[A].data, name[Sp], name[Sj], name[Sx]]] variable[S] assign[=] call[name[sparse].csr_matrix, parameter[tuple[[<ast.Name object at 0x7da2046214e0>, <ast.Name object at 0x7da204623280>, <ast.Name object at 0x7da2046212d0>]]]] name[S].data assign[=] call[name[np].abs, parameter[name[S].data]] variable[S] assign[=] call[name[scale_rows_by_largest_entry], parameter[name[S]]] return[name[S]]
keyword[def] identifier[symmetric_strength_of_connection] ( identifier[A] , identifier[theta] = literal[int] ): literal[string] keyword[if] identifier[theta] < literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[sparse] . identifier[isspmatrix_csr] ( identifier[A] ): identifier[Sp] = identifier[np] . identifier[empty_like] ( identifier[A] . identifier[indptr] ) identifier[Sj] = identifier[np] . identifier[empty_like] ( identifier[A] . identifier[indices] ) identifier[Sx] = identifier[np] . identifier[empty_like] ( identifier[A] . identifier[data] ) identifier[fn] = identifier[amg_core] . identifier[symmetric_strength_of_connection] identifier[fn] ( identifier[A] . identifier[shape] [ literal[int] ], identifier[theta] , identifier[A] . identifier[indptr] , identifier[A] . identifier[indices] , identifier[A] . identifier[data] , identifier[Sp] , identifier[Sj] , identifier[Sx] ) identifier[S] = identifier[sparse] . identifier[csr_matrix] (( identifier[Sx] , identifier[Sj] , identifier[Sp] ), identifier[shape] = identifier[A] . identifier[shape] ) keyword[elif] identifier[sparse] . identifier[isspmatrix_bsr] ( identifier[A] ): identifier[M] , identifier[N] = identifier[A] . identifier[shape] identifier[R] , identifier[C] = identifier[A] . identifier[blocksize] keyword[if] identifier[R] != identifier[C] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[theta] == literal[int] : identifier[data] = identifier[np] . identifier[ones] ( identifier[len] ( identifier[A] . identifier[indices] ), identifier[dtype] = identifier[A] . identifier[dtype] ) identifier[S] = identifier[sparse] . identifier[csr_matrix] (( identifier[data] , identifier[A] . identifier[indices] . identifier[copy] (), identifier[A] . identifier[indptr] . identifier[copy] ()), identifier[shape] =( identifier[int] ( identifier[M] / identifier[R] ), identifier[int] ( identifier[N] / identifier[C] ))) keyword[else] : identifier[data] =( identifier[np] . identifier[conjugate] ( identifier[A] . identifier[data] )* identifier[A] . identifier[data] ). identifier[reshape] (- literal[int] , identifier[R] * identifier[C] ) identifier[data] = identifier[data] . identifier[sum] ( identifier[axis] = literal[int] ) identifier[A] = identifier[sparse] . identifier[csr_matrix] (( identifier[data] , identifier[A] . identifier[indices] , identifier[A] . identifier[indptr] ), identifier[shape] =( identifier[int] ( identifier[M] / identifier[R] ), identifier[int] ( identifier[N] / identifier[C] ))) keyword[return] identifier[symmetric_strength_of_connection] ( identifier[A] , identifier[theta] ) keyword[else] : keyword[raise] identifier[TypeError] ( literal[string] ) identifier[S] . identifier[data] = identifier[np] . identifier[abs] ( identifier[S] . identifier[data] ) identifier[S] = identifier[scale_rows_by_largest_entry] ( identifier[S] ) keyword[return] identifier[S]
def symmetric_strength_of_connection(A, theta=0): """Symmetric Strength Measure. Compute strength of connection matrix using the standard symmetric measure An off-diagonal connection A[i,j] is strong iff:: abs(A[i,j]) >= theta * sqrt( abs(A[i,i]) * abs(A[j,j]) ) Parameters ---------- A : csr_matrix Matrix graph defined in sparse format. Entry A[i,j] describes the strength of edge [i,j] theta : float Threshold parameter (positive). Returns ------- S : csr_matrix Matrix graph defining strong connections. S[i,j]=1 if vertex i is strongly influenced by vertex j. See Also -------- symmetric_strength_of_connection : symmetric measure used in SA evolution_strength_of_connection : relaxation based strength measure Notes ----- - For vector problems, standard strength measures may produce undesirable aggregates. A "block approach" from Vanek et al. is used to replace vertex comparisons with block-type comparisons. A connection between nodes i and j in the block case is strong if:: ||AB[i,j]|| >= theta * sqrt( ||AB[i,i]||*||AB[j,j]|| ) where AB[k,l] is the matrix block (degrees of freedom) associated with nodes k and l and ||.|| is a matrix norm, such a Frobenius. - See [1996bVaMaBr]_ for more details. References ---------- .. [1996bVaMaBr] Vanek, P. and Mandel, J. and Brezina, M., "Algebraic Multigrid by Smoothed Aggregation for Second and Fourth Order Elliptic Problems", Computing, vol. 56, no. 3, pp. 179--196, 1996. http://citeseer.ist.psu.edu/vanek96algebraic.html Examples -------- >>> import numpy as np >>> from pyamg.gallery import stencil_grid >>> from pyamg.strength import symmetric_strength_of_connection >>> n=3 >>> stencil = np.array([[-1.0,-1.0,-1.0], ... [-1.0, 8.0,-1.0], ... [-1.0,-1.0,-1.0]]) >>> A = stencil_grid(stencil, (n,n), format='csr') >>> S = symmetric_strength_of_connection(A, 0.0) """ if theta < 0: raise ValueError('expected a positive theta') # depends on [control=['if'], data=[]] if sparse.isspmatrix_csr(A): # if theta == 0: # return A Sp = np.empty_like(A.indptr) Sj = np.empty_like(A.indices) Sx = np.empty_like(A.data) fn = amg_core.symmetric_strength_of_connection fn(A.shape[0], theta, A.indptr, A.indices, A.data, Sp, Sj, Sx) S = sparse.csr_matrix((Sx, Sj, Sp), shape=A.shape) # depends on [control=['if'], data=[]] elif sparse.isspmatrix_bsr(A): (M, N) = A.shape (R, C) = A.blocksize if R != C: raise ValueError('matrix must have square blocks') # depends on [control=['if'], data=[]] if theta == 0: data = np.ones(len(A.indices), dtype=A.dtype) S = sparse.csr_matrix((data, A.indices.copy(), A.indptr.copy()), shape=(int(M / R), int(N / C))) # depends on [control=['if'], data=[]] else: # the strength of connection matrix is based on the # Frobenius norms of the blocks data = (np.conjugate(A.data) * A.data).reshape(-1, R * C) data = data.sum(axis=1) A = sparse.csr_matrix((data, A.indices, A.indptr), shape=(int(M / R), int(N / C))) return symmetric_strength_of_connection(A, theta) # depends on [control=['if'], data=[]] else: raise TypeError('expected csr_matrix or bsr_matrix') # Strength represents "distance", so take the magnitude S.data = np.abs(S.data) # Scale S by the largest magnitude entry in each row S = scale_rows_by_largest_entry(S) return S
def StartFlowAndWait(client_id, token=None, timeout=DEFAULT_TIMEOUT, **flow_args): """Runs a flow and waits for it to finish. Args: client_id: The client id of the client to run on. token: The datastore access token. timeout: How long to wait for a flow to complete, maximum. **flow_args: Pass through to flow. Returns: The urn of the flow that was run. """ flow_urn = flow.StartAFF4Flow( client_id=client_id, token=token, sync=True, **flow_args) WaitForFlow(flow_urn, token=token, timeout=timeout) return flow_urn
def function[StartFlowAndWait, parameter[client_id, token, timeout]]: constant[Runs a flow and waits for it to finish. Args: client_id: The client id of the client to run on. token: The datastore access token. timeout: How long to wait for a flow to complete, maximum. **flow_args: Pass through to flow. Returns: The urn of the flow that was run. ] variable[flow_urn] assign[=] call[name[flow].StartAFF4Flow, parameter[]] call[name[WaitForFlow], parameter[name[flow_urn]]] return[name[flow_urn]]
keyword[def] identifier[StartFlowAndWait] ( identifier[client_id] , identifier[token] = keyword[None] , identifier[timeout] = identifier[DEFAULT_TIMEOUT] , ** identifier[flow_args] ): literal[string] identifier[flow_urn] = identifier[flow] . identifier[StartAFF4Flow] ( identifier[client_id] = identifier[client_id] , identifier[token] = identifier[token] , identifier[sync] = keyword[True] ,** identifier[flow_args] ) identifier[WaitForFlow] ( identifier[flow_urn] , identifier[token] = identifier[token] , identifier[timeout] = identifier[timeout] ) keyword[return] identifier[flow_urn]
def StartFlowAndWait(client_id, token=None, timeout=DEFAULT_TIMEOUT, **flow_args): """Runs a flow and waits for it to finish. Args: client_id: The client id of the client to run on. token: The datastore access token. timeout: How long to wait for a flow to complete, maximum. **flow_args: Pass through to flow. Returns: The urn of the flow that was run. """ flow_urn = flow.StartAFF4Flow(client_id=client_id, token=token, sync=True, **flow_args) WaitForFlow(flow_urn, token=token, timeout=timeout) return flow_urn
def tokenize_asdl(buf): """Tokenize the given buffer. Yield Token objects.""" for lineno, line in enumerate(buf.splitlines(), 1): for m in re.finditer(r'\s*(\w+|--.*|.)', line.strip()): c = m.group(1) if c[0].isalpha(): # Some kind of identifier if c[0].isupper(): yield Token(TokenKind.ConstructorId, c, lineno) else: yield Token(TokenKind.TypeId, c, lineno) elif c[:2] == '--': # Comment break else: # Operators try: op_kind = TokenKind.operator_table[c] except KeyError: raise ASDLSyntaxError('Invalid operator %s' % c, lineno) yield Token(op_kind, c, lineno)
def function[tokenize_asdl, parameter[buf]]: constant[Tokenize the given buffer. Yield Token objects.] for taget[tuple[[<ast.Name object at 0x7da2043460b0>, <ast.Name object at 0x7da204346dd0>]]] in starred[call[name[enumerate], parameter[call[name[buf].splitlines, parameter[]], constant[1]]]] begin[:] for taget[name[m]] in starred[call[name[re].finditer, parameter[constant[\s*(\w+|--.*|.)], call[name[line].strip, parameter[]]]]] begin[:] variable[c] assign[=] call[name[m].group, parameter[constant[1]]] if call[call[name[c]][constant[0]].isalpha, parameter[]] begin[:] if call[call[name[c]][constant[0]].isupper, parameter[]] begin[:] <ast.Yield object at 0x7da204347c70>
keyword[def] identifier[tokenize_asdl] ( identifier[buf] ): literal[string] keyword[for] identifier[lineno] , identifier[line] keyword[in] identifier[enumerate] ( identifier[buf] . identifier[splitlines] (), literal[int] ): keyword[for] identifier[m] keyword[in] identifier[re] . identifier[finditer] ( literal[string] , identifier[line] . identifier[strip] ()): identifier[c] = identifier[m] . identifier[group] ( literal[int] ) keyword[if] identifier[c] [ literal[int] ]. identifier[isalpha] (): keyword[if] identifier[c] [ literal[int] ]. identifier[isupper] (): keyword[yield] identifier[Token] ( identifier[TokenKind] . identifier[ConstructorId] , identifier[c] , identifier[lineno] ) keyword[else] : keyword[yield] identifier[Token] ( identifier[TokenKind] . identifier[TypeId] , identifier[c] , identifier[lineno] ) keyword[elif] identifier[c] [: literal[int] ]== literal[string] : keyword[break] keyword[else] : keyword[try] : identifier[op_kind] = identifier[TokenKind] . identifier[operator_table] [ identifier[c] ] keyword[except] identifier[KeyError] : keyword[raise] identifier[ASDLSyntaxError] ( literal[string] % identifier[c] , identifier[lineno] ) keyword[yield] identifier[Token] ( identifier[op_kind] , identifier[c] , identifier[lineno] )
def tokenize_asdl(buf): """Tokenize the given buffer. Yield Token objects.""" for (lineno, line) in enumerate(buf.splitlines(), 1): for m in re.finditer('\\s*(\\w+|--.*|.)', line.strip()): c = m.group(1) if c[0].isalpha(): # Some kind of identifier if c[0].isupper(): yield Token(TokenKind.ConstructorId, c, lineno) # depends on [control=['if'], data=[]] else: yield Token(TokenKind.TypeId, c, lineno) # depends on [control=['if'], data=[]] elif c[:2] == '--': # Comment break # depends on [control=['if'], data=[]] else: # Operators try: op_kind = TokenKind.operator_table[c] # depends on [control=['try'], data=[]] except KeyError: raise ASDLSyntaxError('Invalid operator %s' % c, lineno) # depends on [control=['except'], data=[]] yield Token(op_kind, c, lineno) # depends on [control=['for'], data=['m']] # depends on [control=['for'], data=[]]
def as_single_element(self): """ Processes the response as a single-element response, like config_get or system_counters_get. If there is more then one element in the response or no elements this raises a ResponseError """ if self.as_return_etree is None: return None if len(self.as_return_etree.getchildren()) == 1: return _populate_bunch_with_element(self.as_return_etree. getchildren()[0]) return _populate_bunch_with_element(self.as_return_etree)
def function[as_single_element, parameter[self]]: constant[ Processes the response as a single-element response, like config_get or system_counters_get. If there is more then one element in the response or no elements this raises a ResponseError ] if compare[name[self].as_return_etree is constant[None]] begin[:] return[constant[None]] if compare[call[name[len], parameter[call[name[self].as_return_etree.getchildren, parameter[]]]] equal[==] constant[1]] begin[:] return[call[name[_populate_bunch_with_element], parameter[call[call[name[self].as_return_etree.getchildren, parameter[]]][constant[0]]]]] return[call[name[_populate_bunch_with_element], parameter[name[self].as_return_etree]]]
keyword[def] identifier[as_single_element] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[as_return_etree] keyword[is] keyword[None] : keyword[return] keyword[None] keyword[if] identifier[len] ( identifier[self] . identifier[as_return_etree] . identifier[getchildren] ())== literal[int] : keyword[return] identifier[_populate_bunch_with_element] ( identifier[self] . identifier[as_return_etree] . identifier[getchildren] ()[ literal[int] ]) keyword[return] identifier[_populate_bunch_with_element] ( identifier[self] . identifier[as_return_etree] )
def as_single_element(self): """ Processes the response as a single-element response, like config_get or system_counters_get. If there is more then one element in the response or no elements this raises a ResponseError """ if self.as_return_etree is None: return None # depends on [control=['if'], data=[]] if len(self.as_return_etree.getchildren()) == 1: return _populate_bunch_with_element(self.as_return_etree.getchildren()[0]) # depends on [control=['if'], data=[]] return _populate_bunch_with_element(self.as_return_etree)
def word_ngrams(s, n=3, token_fn=tokens.on_whitespace): """ Word-level n-grams in a string By default, whitespace is assumed to be a word boundary. >>> ng.word_ngrams('This is not a test!') [('This', 'is', 'not'), ('is', 'not', 'a'), ('not', 'a', 'test!')] If the sequence's length is less than or equal to n, the n-grams are simply the sequence itself. >>> ng.word_ngrams('Test!') [('Test!')] Args: s: a string Returns: list: tuples of word-level n-grams """ tokens = token_fn(s) return __ngrams(tokens, n=min(len(tokens), n))
def function[word_ngrams, parameter[s, n, token_fn]]: constant[ Word-level n-grams in a string By default, whitespace is assumed to be a word boundary. >>> ng.word_ngrams('This is not a test!') [('This', 'is', 'not'), ('is', 'not', 'a'), ('not', 'a', 'test!')] If the sequence's length is less than or equal to n, the n-grams are simply the sequence itself. >>> ng.word_ngrams('Test!') [('Test!')] Args: s: a string Returns: list: tuples of word-level n-grams ] variable[tokens] assign[=] call[name[token_fn], parameter[name[s]]] return[call[name[__ngrams], parameter[name[tokens]]]]
keyword[def] identifier[word_ngrams] ( identifier[s] , identifier[n] = literal[int] , identifier[token_fn] = identifier[tokens] . identifier[on_whitespace] ): literal[string] identifier[tokens] = identifier[token_fn] ( identifier[s] ) keyword[return] identifier[__ngrams] ( identifier[tokens] , identifier[n] = identifier[min] ( identifier[len] ( identifier[tokens] ), identifier[n] ))
def word_ngrams(s, n=3, token_fn=tokens.on_whitespace): """ Word-level n-grams in a string By default, whitespace is assumed to be a word boundary. >>> ng.word_ngrams('This is not a test!') [('This', 'is', 'not'), ('is', 'not', 'a'), ('not', 'a', 'test!')] If the sequence's length is less than or equal to n, the n-grams are simply the sequence itself. >>> ng.word_ngrams('Test!') [('Test!')] Args: s: a string Returns: list: tuples of word-level n-grams """ tokens = token_fn(s) return __ngrams(tokens, n=min(len(tokens), n))
def parse_lit(self, lines): ''' Parse a string line-by-line delineating comments and code :returns: An tuple of boolean/list-of-string pairs. True designates a comment; False designates code. ''' comment_char = '#' # TODO: move this into a directive option comment = re.compile(r'^\s*{0}[ \n]'.format(comment_char)) section_test = lambda val: bool(comment.match(val)) sections = [] for is_doc, group in itertools.groupby(lines, section_test): if is_doc: text = [comment.sub('', i).rstrip('\r\n') for i in group] else: text = [i.rstrip('\r\n') for i in group] sections.append((is_doc, text)) return sections
def function[parse_lit, parameter[self, lines]]: constant[ Parse a string line-by-line delineating comments and code :returns: An tuple of boolean/list-of-string pairs. True designates a comment; False designates code. ] variable[comment_char] assign[=] constant[#] variable[comment] assign[=] call[name[re].compile, parameter[call[constant[^\s*{0}[ \n]].format, parameter[name[comment_char]]]]] variable[section_test] assign[=] <ast.Lambda object at 0x7da204622230> variable[sections] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da204623d60>, <ast.Name object at 0x7da2046221d0>]]] in starred[call[name[itertools].groupby, parameter[name[lines], name[section_test]]]] begin[:] if name[is_doc] begin[:] variable[text] assign[=] <ast.ListComp object at 0x7da204623580> call[name[sections].append, parameter[tuple[[<ast.Name object at 0x7da204623640>, <ast.Name object at 0x7da204622aa0>]]]] return[name[sections]]
keyword[def] identifier[parse_lit] ( identifier[self] , identifier[lines] ): literal[string] identifier[comment_char] = literal[string] identifier[comment] = identifier[re] . identifier[compile] ( literal[string] . identifier[format] ( identifier[comment_char] )) identifier[section_test] = keyword[lambda] identifier[val] : identifier[bool] ( identifier[comment] . identifier[match] ( identifier[val] )) identifier[sections] =[] keyword[for] identifier[is_doc] , identifier[group] keyword[in] identifier[itertools] . identifier[groupby] ( identifier[lines] , identifier[section_test] ): keyword[if] identifier[is_doc] : identifier[text] =[ identifier[comment] . identifier[sub] ( literal[string] , identifier[i] ). identifier[rstrip] ( literal[string] ) keyword[for] identifier[i] keyword[in] identifier[group] ] keyword[else] : identifier[text] =[ identifier[i] . identifier[rstrip] ( literal[string] ) keyword[for] identifier[i] keyword[in] identifier[group] ] identifier[sections] . identifier[append] (( identifier[is_doc] , identifier[text] )) keyword[return] identifier[sections]
def parse_lit(self, lines): """ Parse a string line-by-line delineating comments and code :returns: An tuple of boolean/list-of-string pairs. True designates a comment; False designates code. """ comment_char = '#' # TODO: move this into a directive option comment = re.compile('^\\s*{0}[ \\n]'.format(comment_char)) section_test = lambda val: bool(comment.match(val)) sections = [] for (is_doc, group) in itertools.groupby(lines, section_test): if is_doc: text = [comment.sub('', i).rstrip('\r\n') for i in group] # depends on [control=['if'], data=[]] else: text = [i.rstrip('\r\n') for i in group] sections.append((is_doc, text)) # depends on [control=['for'], data=[]] return sections
def extract_script(embedded_hex): """ Given a hex file containing the MicroPython runtime and an embedded Python script, will extract the original Python script. Returns a string containing the original embedded script. """ hex_lines = embedded_hex.split('\n') script_addr_high = hex((_SCRIPT_ADDR >> 16) & 0xffff)[2:].upper().zfill(4) script_addr_low = hex(_SCRIPT_ADDR & 0xffff)[2:].upper().zfill(4) start_script = None within_range = False # Look for the script start address for loc, val in enumerate(hex_lines): if val[0:9] == ':02000004': # Reached an extended address record, check if within script range within_range = val[9:13].upper() == script_addr_high elif within_range and val[0:3] == ':10' and \ val[3:7].upper() == script_addr_low: start_script = loc break if start_script: # Find the end of the script end_script = None for loc, val in enumerate(hex_lines[start_script:]): if val[9:41] == 'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF': end_script = loc + start_script break # Pass the extracted hex through unhexlify return unhexlify('\n'.join( hex_lines[start_script - 1:end_script if end_script else -6])) return ''
def function[extract_script, parameter[embedded_hex]]: constant[ Given a hex file containing the MicroPython runtime and an embedded Python script, will extract the original Python script. Returns a string containing the original embedded script. ] variable[hex_lines] assign[=] call[name[embedded_hex].split, parameter[constant[ ]]] variable[script_addr_high] assign[=] call[call[call[call[name[hex], parameter[binary_operation[binary_operation[name[_SCRIPT_ADDR] <ast.RShift object at 0x7da2590d6a40> constant[16]] <ast.BitAnd object at 0x7da2590d6b60> constant[65535]]]]][<ast.Slice object at 0x7da20c6e5360>].upper, parameter[]].zfill, parameter[constant[4]]] variable[script_addr_low] assign[=] call[call[call[call[name[hex], parameter[binary_operation[name[_SCRIPT_ADDR] <ast.BitAnd object at 0x7da2590d6b60> constant[65535]]]]][<ast.Slice object at 0x7da20c6e6710>].upper, parameter[]].zfill, parameter[constant[4]]] variable[start_script] assign[=] constant[None] variable[within_range] assign[=] constant[False] for taget[tuple[[<ast.Name object at 0x7da20c6e5630>, <ast.Name object at 0x7da20c6e7790>]]] in starred[call[name[enumerate], parameter[name[hex_lines]]]] begin[:] if compare[call[name[val]][<ast.Slice object at 0x7da18bc72b30>] equal[==] constant[:02000004]] begin[:] variable[within_range] assign[=] compare[call[call[name[val]][<ast.Slice object at 0x7da18bc73190>].upper, parameter[]] equal[==] name[script_addr_high]] if name[start_script] begin[:] variable[end_script] assign[=] constant[None] for taget[tuple[[<ast.Name object at 0x7da1b1b61f30>, <ast.Name object at 0x7da1b1b615d0>]]] in starred[call[name[enumerate], parameter[call[name[hex_lines]][<ast.Slice object at 0x7da1b1b638e0>]]]] begin[:] if compare[call[name[val]][<ast.Slice object at 0x7da18bc72cb0>] equal[==] constant[FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF]] begin[:] variable[end_script] assign[=] binary_operation[name[loc] + name[start_script]] break return[call[name[unhexlify], parameter[call[constant[ ].join, parameter[call[name[hex_lines]][<ast.Slice object at 0x7da18bc72bc0>]]]]]] return[constant[]]
keyword[def] identifier[extract_script] ( identifier[embedded_hex] ): literal[string] identifier[hex_lines] = identifier[embedded_hex] . identifier[split] ( literal[string] ) identifier[script_addr_high] = identifier[hex] (( identifier[_SCRIPT_ADDR] >> literal[int] )& literal[int] )[ literal[int] :]. identifier[upper] (). identifier[zfill] ( literal[int] ) identifier[script_addr_low] = identifier[hex] ( identifier[_SCRIPT_ADDR] & literal[int] )[ literal[int] :]. identifier[upper] (). identifier[zfill] ( literal[int] ) identifier[start_script] = keyword[None] identifier[within_range] = keyword[False] keyword[for] identifier[loc] , identifier[val] keyword[in] identifier[enumerate] ( identifier[hex_lines] ): keyword[if] identifier[val] [ literal[int] : literal[int] ]== literal[string] : identifier[within_range] = identifier[val] [ literal[int] : literal[int] ]. identifier[upper] ()== identifier[script_addr_high] keyword[elif] identifier[within_range] keyword[and] identifier[val] [ literal[int] : literal[int] ]== literal[string] keyword[and] identifier[val] [ literal[int] : literal[int] ]. identifier[upper] ()== identifier[script_addr_low] : identifier[start_script] = identifier[loc] keyword[break] keyword[if] identifier[start_script] : identifier[end_script] = keyword[None] keyword[for] identifier[loc] , identifier[val] keyword[in] identifier[enumerate] ( identifier[hex_lines] [ identifier[start_script] :]): keyword[if] identifier[val] [ literal[int] : literal[int] ]== literal[string] : identifier[end_script] = identifier[loc] + identifier[start_script] keyword[break] keyword[return] identifier[unhexlify] ( literal[string] . identifier[join] ( identifier[hex_lines] [ identifier[start_script] - literal[int] : identifier[end_script] keyword[if] identifier[end_script] keyword[else] - literal[int] ])) keyword[return] literal[string]
def extract_script(embedded_hex): """ Given a hex file containing the MicroPython runtime and an embedded Python script, will extract the original Python script. Returns a string containing the original embedded script. """ hex_lines = embedded_hex.split('\n') script_addr_high = hex(_SCRIPT_ADDR >> 16 & 65535)[2:].upper().zfill(4) script_addr_low = hex(_SCRIPT_ADDR & 65535)[2:].upper().zfill(4) start_script = None within_range = False # Look for the script start address for (loc, val) in enumerate(hex_lines): if val[0:9] == ':02000004': # Reached an extended address record, check if within script range within_range = val[9:13].upper() == script_addr_high # depends on [control=['if'], data=[]] elif within_range and val[0:3] == ':10' and (val[3:7].upper() == script_addr_low): start_script = loc break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] if start_script: # Find the end of the script end_script = None for (loc, val) in enumerate(hex_lines[start_script:]): if val[9:41] == 'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF': end_script = loc + start_script break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # Pass the extracted hex through unhexlify return unhexlify('\n'.join(hex_lines[start_script - 1:end_script if end_script else -6])) # depends on [control=['if'], data=[]] return ''
def add_membership(self, member, role=github.GithubObject.NotSet): """ :calls: `PUT /teams/:id/memberships/:user <http://developer.github.com/v3/orgs/teams>`_ :param member: :class:`github.Nameduser.NamedUser` :param role: string :rtype: None """ assert isinstance(member, github.NamedUser.NamedUser), member assert role is github.GithubObject.NotSet or isinstance( role, (str, unicode)), role if role is not github.GithubObject.NotSet: assert role in ['member', 'maintainer'] put_parameters = { "role": role, } else: put_parameters = { "role": "member", } headers, data = self._requester.requestJsonAndCheck( "PUT", self.url + "/memberships/" + member._identity, input=put_parameters )
def function[add_membership, parameter[self, member, role]]: constant[ :calls: `PUT /teams/:id/memberships/:user <http://developer.github.com/v3/orgs/teams>`_ :param member: :class:`github.Nameduser.NamedUser` :param role: string :rtype: None ] assert[call[name[isinstance], parameter[name[member], name[github].NamedUser.NamedUser]]] assert[<ast.BoolOp object at 0x7da1b21a3bb0>] if compare[name[role] is_not name[github].GithubObject.NotSet] begin[:] assert[compare[name[role] in list[[<ast.Constant object at 0x7da1b21a3df0>, <ast.Constant object at 0x7da1b21a01f0>]]]] variable[put_parameters] assign[=] dictionary[[<ast.Constant object at 0x7da1b21a1660>], [<ast.Name object at 0x7da1b21a0b80>]] <ast.Tuple object at 0x7da1b21a1f30> assign[=] call[name[self]._requester.requestJsonAndCheck, parameter[constant[PUT], binary_operation[binary_operation[name[self].url + constant[/memberships/]] + name[member]._identity]]]
keyword[def] identifier[add_membership] ( identifier[self] , identifier[member] , identifier[role] = identifier[github] . identifier[GithubObject] . identifier[NotSet] ): literal[string] keyword[assert] identifier[isinstance] ( identifier[member] , identifier[github] . identifier[NamedUser] . identifier[NamedUser] ), identifier[member] keyword[assert] identifier[role] keyword[is] identifier[github] . identifier[GithubObject] . identifier[NotSet] keyword[or] identifier[isinstance] ( identifier[role] ,( identifier[str] , identifier[unicode] )), identifier[role] keyword[if] identifier[role] keyword[is] keyword[not] identifier[github] . identifier[GithubObject] . identifier[NotSet] : keyword[assert] identifier[role] keyword[in] [ literal[string] , literal[string] ] identifier[put_parameters] ={ literal[string] : identifier[role] , } keyword[else] : identifier[put_parameters] ={ literal[string] : literal[string] , } identifier[headers] , identifier[data] = identifier[self] . identifier[_requester] . identifier[requestJsonAndCheck] ( literal[string] , identifier[self] . identifier[url] + literal[string] + identifier[member] . identifier[_identity] , identifier[input] = identifier[put_parameters] )
def add_membership(self, member, role=github.GithubObject.NotSet): """ :calls: `PUT /teams/:id/memberships/:user <http://developer.github.com/v3/orgs/teams>`_ :param member: :class:`github.Nameduser.NamedUser` :param role: string :rtype: None """ assert isinstance(member, github.NamedUser.NamedUser), member assert role is github.GithubObject.NotSet or isinstance(role, (str, unicode)), role if role is not github.GithubObject.NotSet: assert role in ['member', 'maintainer'] put_parameters = {'role': role} # depends on [control=['if'], data=['role']] else: put_parameters = {'role': 'member'} (headers, data) = self._requester.requestJsonAndCheck('PUT', self.url + '/memberships/' + member._identity, input=put_parameters)
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120): """Insert a new job into a specific queue. Wrapper around :func:`put_job`. :param tube_name: Tube name :type tube_name: str :param data: Job body :type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8 :param pri: Priority for the job :type pri: int :param delay: Delay in seconds before the job should be placed on the ready queue :type delay: int :param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked and give the job to another worker :type ttr: int .. seealso:: :func:`put_job()` Put a job into whatever the current tube is :func:`using()` Insert a job using an external guard """ with self.using(tube_name) as inserter: return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr)
def function[put_job_into, parameter[self, tube_name, data, pri, delay, ttr]]: constant[Insert a new job into a specific queue. Wrapper around :func:`put_job`. :param tube_name: Tube name :type tube_name: str :param data: Job body :type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8 :param pri: Priority for the job :type pri: int :param delay: Delay in seconds before the job should be placed on the ready queue :type delay: int :param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked and give the job to another worker :type ttr: int .. seealso:: :func:`put_job()` Put a job into whatever the current tube is :func:`using()` Insert a job using an external guard ] with call[name[self].using, parameter[name[tube_name]]] begin[:] return[call[name[inserter].put_job, parameter[]]]
keyword[def] identifier[put_job_into] ( identifier[self] , identifier[tube_name] , identifier[data] , identifier[pri] = literal[int] , identifier[delay] = literal[int] , identifier[ttr] = literal[int] ): literal[string] keyword[with] identifier[self] . identifier[using] ( identifier[tube_name] ) keyword[as] identifier[inserter] : keyword[return] identifier[inserter] . identifier[put_job] ( identifier[data] = identifier[data] , identifier[pri] = identifier[pri] , identifier[delay] = identifier[delay] , identifier[ttr] = identifier[ttr] )
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120): """Insert a new job into a specific queue. Wrapper around :func:`put_job`. :param tube_name: Tube name :type tube_name: str :param data: Job body :type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8 :param pri: Priority for the job :type pri: int :param delay: Delay in seconds before the job should be placed on the ready queue :type delay: int :param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked and give the job to another worker :type ttr: int .. seealso:: :func:`put_job()` Put a job into whatever the current tube is :func:`using()` Insert a job using an external guard """ with self.using(tube_name) as inserter: return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr) # depends on [control=['with'], data=['inserter']]
def session_rollback(self, session): """Send session_rollback signal in sqlalchemy ``after_rollback``. This marks the failure of session so the session may enter commit phase. """ # this may happen when there's nothing to rollback if not hasattr(session, 'meepo_unique_id'): self.logger.debug("skipped - session_rollback") return # del session meepo id after rollback self.logger.debug("%s - after_rollback" % session.meepo_unique_id) signal("session_rollback").send(session) self._session_del(session)
def function[session_rollback, parameter[self, session]]: constant[Send session_rollback signal in sqlalchemy ``after_rollback``. This marks the failure of session so the session may enter commit phase. ] if <ast.UnaryOp object at 0x7da20cabc2e0> begin[:] call[name[self].logger.debug, parameter[constant[skipped - session_rollback]]] return[None] call[name[self].logger.debug, parameter[binary_operation[constant[%s - after_rollback] <ast.Mod object at 0x7da2590d6920> name[session].meepo_unique_id]]] call[call[name[signal], parameter[constant[session_rollback]]].send, parameter[name[session]]] call[name[self]._session_del, parameter[name[session]]]
keyword[def] identifier[session_rollback] ( identifier[self] , identifier[session] ): literal[string] keyword[if] keyword[not] identifier[hasattr] ( identifier[session] , literal[string] ): identifier[self] . identifier[logger] . identifier[debug] ( literal[string] ) keyword[return] identifier[self] . identifier[logger] . identifier[debug] ( literal[string] % identifier[session] . identifier[meepo_unique_id] ) identifier[signal] ( literal[string] ). identifier[send] ( identifier[session] ) identifier[self] . identifier[_session_del] ( identifier[session] )
def session_rollback(self, session): """Send session_rollback signal in sqlalchemy ``after_rollback``. This marks the failure of session so the session may enter commit phase. """ # this may happen when there's nothing to rollback if not hasattr(session, 'meepo_unique_id'): self.logger.debug('skipped - session_rollback') return # depends on [control=['if'], data=[]] # del session meepo id after rollback self.logger.debug('%s - after_rollback' % session.meepo_unique_id) signal('session_rollback').send(session) self._session_del(session)
def insert(self, index, *grids): """Return a copy with ``grids`` inserted before ``index``. The given grids are inserted (as a block) into ``self``, yielding a new grid whose number of dimensions is the sum of the numbers of dimensions of all involved grids. Note that no changes are made in-place. Parameters ---------- index : int The index of the dimension before which ``grids`` are to be inserted. Negative indices count backwards from ``self.ndim``. grid1, ..., gridN : `RectGrid` The grids to be inserted into ``self``. Returns ------- newgrid : `RectGrid` The enlarged grid. Examples -------- >>> g1 = RectGrid([0, 1], [-1, 0, 2]) >>> g2 = RectGrid([1], [-6, 15]) >>> g1.insert(1, g2) RectGrid( [ 0., 1.], [ 1.], [ -6., 15.], [-1., 0., 2.] ) >>> g1.insert(1, g2, g2) RectGrid( [ 0., 1.], [ 1.], [ -6., 15.], [ 1.], [ -6., 15.], [-1., 0., 2.] ) See Also -------- append """ index, index_in = safe_int_conv(index), index if not -self.ndim <= index <= self.ndim: raise IndexError('index {0} outside the valid range -{1} ... {1}' ''.format(index_in, self.ndim)) if index < 0: index += self.ndim if len(grids) == 0: # Copy of `self` return RectGrid(*self.coord_vectors) elif len(grids) == 1: # Insert single grid grid = grids[0] if not isinstance(grid, RectGrid): raise TypeError('{!r} is not a `RectGrid` instance' ''.format(grid)) new_vecs = (self.coord_vectors[:index] + grid.coord_vectors + self.coord_vectors[index:]) return RectGrid(*new_vecs) else: # Recursively insert first grid and the remaining into the result return self.insert(index, grids[0]).insert( index + grids[0].ndim, *(grids[1:]))
def function[insert, parameter[self, index]]: constant[Return a copy with ``grids`` inserted before ``index``. The given grids are inserted (as a block) into ``self``, yielding a new grid whose number of dimensions is the sum of the numbers of dimensions of all involved grids. Note that no changes are made in-place. Parameters ---------- index : int The index of the dimension before which ``grids`` are to be inserted. Negative indices count backwards from ``self.ndim``. grid1, ..., gridN : `RectGrid` The grids to be inserted into ``self``. Returns ------- newgrid : `RectGrid` The enlarged grid. Examples -------- >>> g1 = RectGrid([0, 1], [-1, 0, 2]) >>> g2 = RectGrid([1], [-6, 15]) >>> g1.insert(1, g2) RectGrid( [ 0., 1.], [ 1.], [ -6., 15.], [-1., 0., 2.] ) >>> g1.insert(1, g2, g2) RectGrid( [ 0., 1.], [ 1.], [ -6., 15.], [ 1.], [ -6., 15.], [-1., 0., 2.] ) See Also -------- append ] <ast.Tuple object at 0x7da1b1d0ef50> assign[=] tuple[[<ast.Call object at 0x7da1b1d0dea0>, <ast.Name object at 0x7da1b1d0c8e0>]] if <ast.UnaryOp object at 0x7da1b1d0f0d0> begin[:] <ast.Raise object at 0x7da1b1d0ead0> if compare[name[index] less[<] constant[0]] begin[:] <ast.AugAssign object at 0x7da1b1d0ec50> if compare[call[name[len], parameter[name[grids]]] equal[==] constant[0]] begin[:] return[call[name[RectGrid], parameter[<ast.Starred object at 0x7da18f58e830>]]]
keyword[def] identifier[insert] ( identifier[self] , identifier[index] ,* identifier[grids] ): literal[string] identifier[index] , identifier[index_in] = identifier[safe_int_conv] ( identifier[index] ), identifier[index] keyword[if] keyword[not] - identifier[self] . identifier[ndim] <= identifier[index] <= identifier[self] . identifier[ndim] : keyword[raise] identifier[IndexError] ( literal[string] literal[string] . identifier[format] ( identifier[index_in] , identifier[self] . identifier[ndim] )) keyword[if] identifier[index] < literal[int] : identifier[index] += identifier[self] . identifier[ndim] keyword[if] identifier[len] ( identifier[grids] )== literal[int] : keyword[return] identifier[RectGrid] (* identifier[self] . identifier[coord_vectors] ) keyword[elif] identifier[len] ( identifier[grids] )== literal[int] : identifier[grid] = identifier[grids] [ literal[int] ] keyword[if] keyword[not] identifier[isinstance] ( identifier[grid] , identifier[RectGrid] ): keyword[raise] identifier[TypeError] ( literal[string] literal[string] . identifier[format] ( identifier[grid] )) identifier[new_vecs] =( identifier[self] . identifier[coord_vectors] [: identifier[index] ]+ identifier[grid] . identifier[coord_vectors] + identifier[self] . identifier[coord_vectors] [ identifier[index] :]) keyword[return] identifier[RectGrid] (* identifier[new_vecs] ) keyword[else] : keyword[return] identifier[self] . identifier[insert] ( identifier[index] , identifier[grids] [ literal[int] ]). identifier[insert] ( identifier[index] + identifier[grids] [ literal[int] ]. identifier[ndim] ,*( identifier[grids] [ literal[int] :]))
def insert(self, index, *grids): """Return a copy with ``grids`` inserted before ``index``. The given grids are inserted (as a block) into ``self``, yielding a new grid whose number of dimensions is the sum of the numbers of dimensions of all involved grids. Note that no changes are made in-place. Parameters ---------- index : int The index of the dimension before which ``grids`` are to be inserted. Negative indices count backwards from ``self.ndim``. grid1, ..., gridN : `RectGrid` The grids to be inserted into ``self``. Returns ------- newgrid : `RectGrid` The enlarged grid. Examples -------- >>> g1 = RectGrid([0, 1], [-1, 0, 2]) >>> g2 = RectGrid([1], [-6, 15]) >>> g1.insert(1, g2) RectGrid( [ 0., 1.], [ 1.], [ -6., 15.], [-1., 0., 2.] ) >>> g1.insert(1, g2, g2) RectGrid( [ 0., 1.], [ 1.], [ -6., 15.], [ 1.], [ -6., 15.], [-1., 0., 2.] ) See Also -------- append """ (index, index_in) = (safe_int_conv(index), index) if not -self.ndim <= index <= self.ndim: raise IndexError('index {0} outside the valid range -{1} ... {1}'.format(index_in, self.ndim)) # depends on [control=['if'], data=[]] if index < 0: index += self.ndim # depends on [control=['if'], data=['index']] if len(grids) == 0: # Copy of `self` return RectGrid(*self.coord_vectors) # depends on [control=['if'], data=[]] elif len(grids) == 1: # Insert single grid grid = grids[0] if not isinstance(grid, RectGrid): raise TypeError('{!r} is not a `RectGrid` instance'.format(grid)) # depends on [control=['if'], data=[]] new_vecs = self.coord_vectors[:index] + grid.coord_vectors + self.coord_vectors[index:] return RectGrid(*new_vecs) # depends on [control=['if'], data=[]] else: # Recursively insert first grid and the remaining into the result return self.insert(index, grids[0]).insert(index + grids[0].ndim, *grids[1:])
def mfpt_sensitivity(T, target, i): r"""Sensitivity matrix of the mean first-passage time from specified state. Parameters ---------- T : (M, M) ndarray Transition matrix target : int or list Target state or set for mfpt computation i : int Compute the sensitivity for state `i` Returns ------- S : (M, M) ndarray Sensitivity matrix for specified state """ # check input T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric') target = _types.ensure_int_vector(target) # go if _issparse(T): _showSparseConversionWarning() mfpt_sensitivity(T.todense(), target, i) else: return dense.sensitivity.mfpt_sensitivity(T, target, i)
def function[mfpt_sensitivity, parameter[T, target, i]]: constant[Sensitivity matrix of the mean first-passage time from specified state. Parameters ---------- T : (M, M) ndarray Transition matrix target : int or list Target state or set for mfpt computation i : int Compute the sensitivity for state `i` Returns ------- S : (M, M) ndarray Sensitivity matrix for specified state ] variable[T] assign[=] call[name[_types].ensure_ndarray_or_sparse, parameter[name[T]]] variable[target] assign[=] call[name[_types].ensure_int_vector, parameter[name[target]]] if call[name[_issparse], parameter[name[T]]] begin[:] call[name[_showSparseConversionWarning], parameter[]] call[name[mfpt_sensitivity], parameter[call[name[T].todense, parameter[]], name[target], name[i]]]
keyword[def] identifier[mfpt_sensitivity] ( identifier[T] , identifier[target] , identifier[i] ): literal[string] identifier[T] = identifier[_types] . identifier[ensure_ndarray_or_sparse] ( identifier[T] , identifier[ndim] = literal[int] , identifier[uniform] = keyword[True] , identifier[kind] = literal[string] ) identifier[target] = identifier[_types] . identifier[ensure_int_vector] ( identifier[target] ) keyword[if] identifier[_issparse] ( identifier[T] ): identifier[_showSparseConversionWarning] () identifier[mfpt_sensitivity] ( identifier[T] . identifier[todense] (), identifier[target] , identifier[i] ) keyword[else] : keyword[return] identifier[dense] . identifier[sensitivity] . identifier[mfpt_sensitivity] ( identifier[T] , identifier[target] , identifier[i] )
def mfpt_sensitivity(T, target, i): """Sensitivity matrix of the mean first-passage time from specified state. Parameters ---------- T : (M, M) ndarray Transition matrix target : int or list Target state or set for mfpt computation i : int Compute the sensitivity for state `i` Returns ------- S : (M, M) ndarray Sensitivity matrix for specified state """ # check input T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric') target = _types.ensure_int_vector(target) # go if _issparse(T): _showSparseConversionWarning() mfpt_sensitivity(T.todense(), target, i) # depends on [control=['if'], data=[]] else: return dense.sensitivity.mfpt_sensitivity(T, target, i)
def _add_event_in_element(self, element, event): """ Add a type of event in element. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :param event: The type of event. :type event: str """ if not self.main_script_added: self._generate_main_scripts() if self.script_list is not None: self.id_generator.generate_id(element) self.script_list.append_text( event + "Elements.push('" + element.get_attribute('id') + "');" )
def function[_add_event_in_element, parameter[self, element, event]]: constant[ Add a type of event in element. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :param event: The type of event. :type event: str ] if <ast.UnaryOp object at 0x7da1b27e0850> begin[:] call[name[self]._generate_main_scripts, parameter[]] if compare[name[self].script_list is_not constant[None]] begin[:] call[name[self].id_generator.generate_id, parameter[name[element]]] call[name[self].script_list.append_text, parameter[binary_operation[binary_operation[binary_operation[name[event] + constant[Elements.push(']] + call[name[element].get_attribute, parameter[constant[id]]]] + constant[');]]]]
keyword[def] identifier[_add_event_in_element] ( identifier[self] , identifier[element] , identifier[event] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[main_script_added] : identifier[self] . identifier[_generate_main_scripts] () keyword[if] identifier[self] . identifier[script_list] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[id_generator] . identifier[generate_id] ( identifier[element] ) identifier[self] . identifier[script_list] . identifier[append_text] ( identifier[event] + literal[string] + identifier[element] . identifier[get_attribute] ( literal[string] ) + literal[string] )
def _add_event_in_element(self, element, event): """ Add a type of event in element. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :param event: The type of event. :type event: str """ if not self.main_script_added: self._generate_main_scripts() # depends on [control=['if'], data=[]] if self.script_list is not None: self.id_generator.generate_id(element) self.script_list.append_text(event + "Elements.push('" + element.get_attribute('id') + "');") # depends on [control=['if'], data=[]]
def enable_server(name, backend, socket=DEFAULT_SOCKET_URL): ''' Enable Server in haproxy name Server to enable backend haproxy backend, or all backends if "*" is supplied socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.enable_server web1.example.com www ''' if backend == '*': backends = show_backends(socket=socket).split('\n') else: backends = [backend] results = {} for backend in backends: ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.enableServer(server=name, backend=backend) ha_conn.sendCmd(ha_cmd) results[backend] = list_servers(backend, socket=socket) return results
def function[enable_server, parameter[name, backend, socket]]: constant[ Enable Server in haproxy name Server to enable backend haproxy backend, or all backends if "*" is supplied socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.enable_server web1.example.com www ] if compare[name[backend] equal[==] constant[*]] begin[:] variable[backends] assign[=] call[call[name[show_backends], parameter[]].split, parameter[constant[ ]]] variable[results] assign[=] dictionary[[], []] for taget[name[backend]] in starred[name[backends]] begin[:] variable[ha_conn] assign[=] call[name[_get_conn], parameter[name[socket]]] variable[ha_cmd] assign[=] call[name[haproxy].cmds.enableServer, parameter[]] call[name[ha_conn].sendCmd, parameter[name[ha_cmd]]] call[name[results]][name[backend]] assign[=] call[name[list_servers], parameter[name[backend]]] return[name[results]]
keyword[def] identifier[enable_server] ( identifier[name] , identifier[backend] , identifier[socket] = identifier[DEFAULT_SOCKET_URL] ): literal[string] keyword[if] identifier[backend] == literal[string] : identifier[backends] = identifier[show_backends] ( identifier[socket] = identifier[socket] ). identifier[split] ( literal[string] ) keyword[else] : identifier[backends] =[ identifier[backend] ] identifier[results] ={} keyword[for] identifier[backend] keyword[in] identifier[backends] : identifier[ha_conn] = identifier[_get_conn] ( identifier[socket] ) identifier[ha_cmd] = identifier[haproxy] . identifier[cmds] . identifier[enableServer] ( identifier[server] = identifier[name] , identifier[backend] = identifier[backend] ) identifier[ha_conn] . identifier[sendCmd] ( identifier[ha_cmd] ) identifier[results] [ identifier[backend] ]= identifier[list_servers] ( identifier[backend] , identifier[socket] = identifier[socket] ) keyword[return] identifier[results]
def enable_server(name, backend, socket=DEFAULT_SOCKET_URL): """ Enable Server in haproxy name Server to enable backend haproxy backend, or all backends if "*" is supplied socket haproxy stats socket, default ``/var/run/haproxy.sock`` CLI Example: .. code-block:: bash salt '*' haproxy.enable_server web1.example.com www """ if backend == '*': backends = show_backends(socket=socket).split('\n') # depends on [control=['if'], data=[]] else: backends = [backend] results = {} for backend in backends: ha_conn = _get_conn(socket) ha_cmd = haproxy.cmds.enableServer(server=name, backend=backend) ha_conn.sendCmd(ha_cmd) results[backend] = list_servers(backend, socket=socket) # depends on [control=['for'], data=['backend']] return results
def multibox(centers, pitch, colors=None): """ Return a Trimesh object with a box at every center. Doesn't do anything nice or fancy. Parameters ----------- centers: (n,3) float, center of boxes that are occupied pitch: float, the edge length of a voxel colors: (3,) or (4,) or (n,3) or (n, 4) float, color of boxes Returns --------- rough: Trimesh object representing inputs """ from . import primitives from .base import Trimesh b = primitives.Box(extents=[pitch, pitch, pitch]) v = np.tile(centers, (1, len(b.vertices))).reshape((-1, 3)) v += np.tile(b.vertices, (len(centers), 1)) f = np.tile(b.faces, (len(centers), 1)) f += np.tile(np.arange(len(centers)) * len(b.vertices), (len(b.faces), 1)).T.reshape((-1, 1)) face_colors = None if colors is not None: colors = np.asarray(colors) if colors.ndim == 1: colors = colors[None].repeat(len(centers), axis=0) if colors.ndim == 2 and len(colors) == len(centers): face_colors = colors.repeat(12, axis=0) mesh = Trimesh(vertices=v, faces=f, face_colors=face_colors) return mesh
def function[multibox, parameter[centers, pitch, colors]]: constant[ Return a Trimesh object with a box at every center. Doesn't do anything nice or fancy. Parameters ----------- centers: (n,3) float, center of boxes that are occupied pitch: float, the edge length of a voxel colors: (3,) or (4,) or (n,3) or (n, 4) float, color of boxes Returns --------- rough: Trimesh object representing inputs ] from relative_module[None] import module[primitives] from relative_module[base] import module[Trimesh] variable[b] assign[=] call[name[primitives].Box, parameter[]] variable[v] assign[=] call[call[name[np].tile, parameter[name[centers], tuple[[<ast.Constant object at 0x7da2044c1090>, <ast.Call object at 0x7da2044c2020>]]]].reshape, parameter[tuple[[<ast.UnaryOp object at 0x7da2044c17b0>, <ast.Constant object at 0x7da2044c0e20>]]]] <ast.AugAssign object at 0x7da2044c2680> variable[f] assign[=] call[name[np].tile, parameter[name[b].faces, tuple[[<ast.Call object at 0x7da2044c0ca0>, <ast.Constant object at 0x7da2044c0d30>]]]] <ast.AugAssign object at 0x7da2044c3cd0> variable[face_colors] assign[=] constant[None] if compare[name[colors] is_not constant[None]] begin[:] variable[colors] assign[=] call[name[np].asarray, parameter[name[colors]]] if compare[name[colors].ndim equal[==] constant[1]] begin[:] variable[colors] assign[=] call[call[name[colors]][constant[None]].repeat, parameter[call[name[len], parameter[name[centers]]]]] if <ast.BoolOp object at 0x7da2044c3a60> begin[:] variable[face_colors] assign[=] call[name[colors].repeat, parameter[constant[12]]] variable[mesh] assign[=] call[name[Trimesh], parameter[]] return[name[mesh]]
keyword[def] identifier[multibox] ( identifier[centers] , identifier[pitch] , identifier[colors] = keyword[None] ): literal[string] keyword[from] . keyword[import] identifier[primitives] keyword[from] . identifier[base] keyword[import] identifier[Trimesh] identifier[b] = identifier[primitives] . identifier[Box] ( identifier[extents] =[ identifier[pitch] , identifier[pitch] , identifier[pitch] ]) identifier[v] = identifier[np] . identifier[tile] ( identifier[centers] ,( literal[int] , identifier[len] ( identifier[b] . identifier[vertices] ))). identifier[reshape] ((- literal[int] , literal[int] )) identifier[v] += identifier[np] . identifier[tile] ( identifier[b] . identifier[vertices] ,( identifier[len] ( identifier[centers] ), literal[int] )) identifier[f] = identifier[np] . identifier[tile] ( identifier[b] . identifier[faces] ,( identifier[len] ( identifier[centers] ), literal[int] )) identifier[f] += identifier[np] . identifier[tile] ( identifier[np] . identifier[arange] ( identifier[len] ( identifier[centers] ))* identifier[len] ( identifier[b] . identifier[vertices] ), ( identifier[len] ( identifier[b] . identifier[faces] ), literal[int] )). identifier[T] . identifier[reshape] ((- literal[int] , literal[int] )) identifier[face_colors] = keyword[None] keyword[if] identifier[colors] keyword[is] keyword[not] keyword[None] : identifier[colors] = identifier[np] . identifier[asarray] ( identifier[colors] ) keyword[if] identifier[colors] . identifier[ndim] == literal[int] : identifier[colors] = identifier[colors] [ keyword[None] ]. identifier[repeat] ( identifier[len] ( identifier[centers] ), identifier[axis] = literal[int] ) keyword[if] identifier[colors] . identifier[ndim] == literal[int] keyword[and] identifier[len] ( identifier[colors] )== identifier[len] ( identifier[centers] ): identifier[face_colors] = identifier[colors] . identifier[repeat] ( literal[int] , identifier[axis] = literal[int] ) identifier[mesh] = identifier[Trimesh] ( identifier[vertices] = identifier[v] , identifier[faces] = identifier[f] , identifier[face_colors] = identifier[face_colors] ) keyword[return] identifier[mesh]
def multibox(centers, pitch, colors=None): """ Return a Trimesh object with a box at every center. Doesn't do anything nice or fancy. Parameters ----------- centers: (n,3) float, center of boxes that are occupied pitch: float, the edge length of a voxel colors: (3,) or (4,) or (n,3) or (n, 4) float, color of boxes Returns --------- rough: Trimesh object representing inputs """ from . import primitives from .base import Trimesh b = primitives.Box(extents=[pitch, pitch, pitch]) v = np.tile(centers, (1, len(b.vertices))).reshape((-1, 3)) v += np.tile(b.vertices, (len(centers), 1)) f = np.tile(b.faces, (len(centers), 1)) f += np.tile(np.arange(len(centers)) * len(b.vertices), (len(b.faces), 1)).T.reshape((-1, 1)) face_colors = None if colors is not None: colors = np.asarray(colors) if colors.ndim == 1: colors = colors[None].repeat(len(centers), axis=0) # depends on [control=['if'], data=[]] if colors.ndim == 2 and len(colors) == len(centers): face_colors = colors.repeat(12, axis=0) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['colors']] mesh = Trimesh(vertices=v, faces=f, face_colors=face_colors) return mesh
def p_single_statement_delays(self, p): 'single_statement : DELAY expression SEMICOLON' p[0] = SingleStatement(DelayStatement( p[2], lineno=p.lineno(1)), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
def function[p_single_statement_delays, parameter[self, p]]: constant[single_statement : DELAY expression SEMICOLON] call[name[p]][constant[0]] assign[=] call[name[SingleStatement], parameter[call[name[DelayStatement], parameter[call[name[p]][constant[2]]]]]] call[name[p].set_lineno, parameter[constant[0], call[name[p].lineno, parameter[constant[1]]]]]
keyword[def] identifier[p_single_statement_delays] ( identifier[self] , identifier[p] ): literal[string] identifier[p] [ literal[int] ]= identifier[SingleStatement] ( identifier[DelayStatement] ( identifier[p] [ literal[int] ], identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] )), identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] )) identifier[p] . identifier[set_lineno] ( literal[int] , identifier[p] . identifier[lineno] ( literal[int] ))
def p_single_statement_delays(self, p): """single_statement : DELAY expression SEMICOLON""" p[0] = SingleStatement(DelayStatement(p[2], lineno=p.lineno(1)), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
async def _send_scan_event(self, device): """Send a scan event from a device.""" conn_string = str(device.iotile_id) info = { 'connection_string': conn_string, 'uuid': device.iotile_id, 'signal_strength': 100, 'validity_period': self.ExpirationTime } await self.notify_event(conn_string, 'device_seen', info)
<ast.AsyncFunctionDef object at 0x7da18f723730>
keyword[async] keyword[def] identifier[_send_scan_event] ( identifier[self] , identifier[device] ): literal[string] identifier[conn_string] = identifier[str] ( identifier[device] . identifier[iotile_id] ) identifier[info] ={ literal[string] : identifier[conn_string] , literal[string] : identifier[device] . identifier[iotile_id] , literal[string] : literal[int] , literal[string] : identifier[self] . identifier[ExpirationTime] } keyword[await] identifier[self] . identifier[notify_event] ( identifier[conn_string] , literal[string] , identifier[info] )
async def _send_scan_event(self, device): """Send a scan event from a device.""" conn_string = str(device.iotile_id) info = {'connection_string': conn_string, 'uuid': device.iotile_id, 'signal_strength': 100, 'validity_period': self.ExpirationTime} await self.notify_event(conn_string, 'device_seen', info)
def _fit(self, col): """Create a map of the empirical probability for each category. Args: col(pandas.DataFrame): Data to transform. """ column = col[self.col_name].replace({np.nan: np.inf}) frequencies = column.groupby(column).count().rename({np.inf: None}).to_dict() # next set probability ranges on interval [0,1] start = 0 end = 0 num_vals = len(col) for val in frequencies: prob = frequencies[val] / num_vals end = start + prob interval = (start, end) mean = np.mean(interval) std = prob / 6 self.probability_map[val] = (interval, mean, std) start = end
def function[_fit, parameter[self, col]]: constant[Create a map of the empirical probability for each category. Args: col(pandas.DataFrame): Data to transform. ] variable[column] assign[=] call[call[name[col]][name[self].col_name].replace, parameter[dictionary[[<ast.Attribute object at 0x7da204347160>], [<ast.Attribute object at 0x7da2043446d0>]]]] variable[frequencies] assign[=] call[call[call[call[name[column].groupby, parameter[name[column]]].count, parameter[]].rename, parameter[dictionary[[<ast.Attribute object at 0x7da204346410>], [<ast.Constant object at 0x7da204345210>]]]].to_dict, parameter[]] variable[start] assign[=] constant[0] variable[end] assign[=] constant[0] variable[num_vals] assign[=] call[name[len], parameter[name[col]]] for taget[name[val]] in starred[name[frequencies]] begin[:] variable[prob] assign[=] binary_operation[call[name[frequencies]][name[val]] / name[num_vals]] variable[end] assign[=] binary_operation[name[start] + name[prob]] variable[interval] assign[=] tuple[[<ast.Name object at 0x7da204346200>, <ast.Name object at 0x7da204344490>]] variable[mean] assign[=] call[name[np].mean, parameter[name[interval]]] variable[std] assign[=] binary_operation[name[prob] / constant[6]] call[name[self].probability_map][name[val]] assign[=] tuple[[<ast.Name object at 0x7da204347a90>, <ast.Name object at 0x7da204347c70>, <ast.Name object at 0x7da2043467a0>]] variable[start] assign[=] name[end]
keyword[def] identifier[_fit] ( identifier[self] , identifier[col] ): literal[string] identifier[column] = identifier[col] [ identifier[self] . identifier[col_name] ]. identifier[replace] ({ identifier[np] . identifier[nan] : identifier[np] . identifier[inf] }) identifier[frequencies] = identifier[column] . identifier[groupby] ( identifier[column] ). identifier[count] (). identifier[rename] ({ identifier[np] . identifier[inf] : keyword[None] }). identifier[to_dict] () identifier[start] = literal[int] identifier[end] = literal[int] identifier[num_vals] = identifier[len] ( identifier[col] ) keyword[for] identifier[val] keyword[in] identifier[frequencies] : identifier[prob] = identifier[frequencies] [ identifier[val] ]/ identifier[num_vals] identifier[end] = identifier[start] + identifier[prob] identifier[interval] =( identifier[start] , identifier[end] ) identifier[mean] = identifier[np] . identifier[mean] ( identifier[interval] ) identifier[std] = identifier[prob] / literal[int] identifier[self] . identifier[probability_map] [ identifier[val] ]=( identifier[interval] , identifier[mean] , identifier[std] ) identifier[start] = identifier[end]
def _fit(self, col): """Create a map of the empirical probability for each category. Args: col(pandas.DataFrame): Data to transform. """ column = col[self.col_name].replace({np.nan: np.inf}) frequencies = column.groupby(column).count().rename({np.inf: None}).to_dict() # next set probability ranges on interval [0,1] start = 0 end = 0 num_vals = len(col) for val in frequencies: prob = frequencies[val] / num_vals end = start + prob interval = (start, end) mean = np.mean(interval) std = prob / 6 self.probability_map[val] = (interval, mean, std) start = end # depends on [control=['for'], data=['val']]
def _filter_deprecation_warnings(): """Apply filters to deprecation warnings. Force the `DeprecationWarning` warnings to be displayed for the qiskit module, overriding the system configuration as they are ignored by default [1] for end-users. Additionally, silence the `ChangedInMarshmallow3Warning` messages. TODO: on Python 3.7, this might not be needed due to PEP-0565 [2]. [1] https://docs.python.org/3/library/warnings.html#default-warning-filters [2] https://www.python.org/dev/peps/pep-0565/ """ deprecation_filter = ('always', None, DeprecationWarning, re.compile(r'^qiskit\.*', re.UNICODE), 0) # Instead of using warnings.simple_filter() directly, the internal # _add_filter() function is used for being able to match against the # module. try: warnings._add_filter(*deprecation_filter, append=False) except AttributeError: # ._add_filter is internal and not available in some Python versions. pass # Add a filter for ignoring ChangedInMarshmallow3Warning, as we depend on # marhsmallow 2 explicitly. 2.17.0 introduced new deprecation warnings that # are useful for eventually migrating, but too verbose for our purposes. warnings.simplefilter('ignore', category=ChangedInMarshmallow3Warning)
def function[_filter_deprecation_warnings, parameter[]]: constant[Apply filters to deprecation warnings. Force the `DeprecationWarning` warnings to be displayed for the qiskit module, overriding the system configuration as they are ignored by default [1] for end-users. Additionally, silence the `ChangedInMarshmallow3Warning` messages. TODO: on Python 3.7, this might not be needed due to PEP-0565 [2]. [1] https://docs.python.org/3/library/warnings.html#default-warning-filters [2] https://www.python.org/dev/peps/pep-0565/ ] variable[deprecation_filter] assign[=] tuple[[<ast.Constant object at 0x7da207f997b0>, <ast.Constant object at 0x7da207f98df0>, <ast.Name object at 0x7da207f997e0>, <ast.Call object at 0x7da207f98af0>, <ast.Constant object at 0x7da207f9ac80>]] <ast.Try object at 0x7da207f9b7c0> call[name[warnings].simplefilter, parameter[constant[ignore]]]
keyword[def] identifier[_filter_deprecation_warnings] (): literal[string] identifier[deprecation_filter] =( literal[string] , keyword[None] , identifier[DeprecationWarning] , identifier[re] . identifier[compile] ( literal[string] , identifier[re] . identifier[UNICODE] ), literal[int] ) keyword[try] : identifier[warnings] . identifier[_add_filter] (* identifier[deprecation_filter] , identifier[append] = keyword[False] ) keyword[except] identifier[AttributeError] : keyword[pass] identifier[warnings] . identifier[simplefilter] ( literal[string] , identifier[category] = identifier[ChangedInMarshmallow3Warning] )
def _filter_deprecation_warnings(): """Apply filters to deprecation warnings. Force the `DeprecationWarning` warnings to be displayed for the qiskit module, overriding the system configuration as they are ignored by default [1] for end-users. Additionally, silence the `ChangedInMarshmallow3Warning` messages. TODO: on Python 3.7, this might not be needed due to PEP-0565 [2]. [1] https://docs.python.org/3/library/warnings.html#default-warning-filters [2] https://www.python.org/dev/peps/pep-0565/ """ deprecation_filter = ('always', None, DeprecationWarning, re.compile('^qiskit\\.*', re.UNICODE), 0) # Instead of using warnings.simple_filter() directly, the internal # _add_filter() function is used for being able to match against the # module. try: warnings._add_filter(*deprecation_filter, append=False) # depends on [control=['try'], data=[]] except AttributeError: # ._add_filter is internal and not available in some Python versions. pass # depends on [control=['except'], data=[]] # Add a filter for ignoring ChangedInMarshmallow3Warning, as we depend on # marhsmallow 2 explicitly. 2.17.0 introduced new deprecation warnings that # are useful for eventually migrating, but too verbose for our purposes. warnings.simplefilter('ignore', category=ChangedInMarshmallow3Warning)
def reference_id(self, reference_id): """ Sets the reference_id of this Order. A client specified identifier to associate an entity in another system with this order. :param reference_id: The reference_id of this Order. :type: str """ if reference_id is None: raise ValueError("Invalid value for `reference_id`, must not be `None`") if len(reference_id) > 40: raise ValueError("Invalid value for `reference_id`, length must be less than `40`") self._reference_id = reference_id
def function[reference_id, parameter[self, reference_id]]: constant[ Sets the reference_id of this Order. A client specified identifier to associate an entity in another system with this order. :param reference_id: The reference_id of this Order. :type: str ] if compare[name[reference_id] is constant[None]] begin[:] <ast.Raise object at 0x7da1b2345c60> if compare[call[name[len], parameter[name[reference_id]]] greater[>] constant[40]] begin[:] <ast.Raise object at 0x7da1b1c1a8f0> name[self]._reference_id assign[=] name[reference_id]
keyword[def] identifier[reference_id] ( identifier[self] , identifier[reference_id] ): literal[string] keyword[if] identifier[reference_id] keyword[is] keyword[None] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[len] ( identifier[reference_id] )> literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[self] . identifier[_reference_id] = identifier[reference_id]
def reference_id(self, reference_id): """ Sets the reference_id of this Order. A client specified identifier to associate an entity in another system with this order. :param reference_id: The reference_id of this Order. :type: str """ if reference_id is None: raise ValueError('Invalid value for `reference_id`, must not be `None`') # depends on [control=['if'], data=[]] if len(reference_id) > 40: raise ValueError('Invalid value for `reference_id`, length must be less than `40`') # depends on [control=['if'], data=[]] self._reference_id = reference_id
def fetch_pkg_list(self): """Fetch and cache master list of package names from PYPI""" self.logger.debug("DEBUG: Fetching package name list from PyPI") package_list = self.list_packages() cPickle.dump(package_list, open(self.pkg_cache_file, "w")) self.pkg_list = package_list
def function[fetch_pkg_list, parameter[self]]: constant[Fetch and cache master list of package names from PYPI] call[name[self].logger.debug, parameter[constant[DEBUG: Fetching package name list from PyPI]]] variable[package_list] assign[=] call[name[self].list_packages, parameter[]] call[name[cPickle].dump, parameter[name[package_list], call[name[open], parameter[name[self].pkg_cache_file, constant[w]]]]] name[self].pkg_list assign[=] name[package_list]
keyword[def] identifier[fetch_pkg_list] ( identifier[self] ): literal[string] identifier[self] . identifier[logger] . identifier[debug] ( literal[string] ) identifier[package_list] = identifier[self] . identifier[list_packages] () identifier[cPickle] . identifier[dump] ( identifier[package_list] , identifier[open] ( identifier[self] . identifier[pkg_cache_file] , literal[string] )) identifier[self] . identifier[pkg_list] = identifier[package_list]
def fetch_pkg_list(self): """Fetch and cache master list of package names from PYPI""" self.logger.debug('DEBUG: Fetching package name list from PyPI') package_list = self.list_packages() cPickle.dump(package_list, open(self.pkg_cache_file, 'w')) self.pkg_list = package_list
def down_by_time(*filters, remote_dir=DEFAULT_REMOTE_DIR, local_dir=".", count=1): """Sync most recent file by date, time attribues""" files = command.list_files(*filters, remote_dir=remote_dir) most_recent = sorted(files, key=lambda f: f.datetime) to_sync = most_recent[-count:] _notify_sync(Direction.down, to_sync) down_by_files(to_sync[::-1], local_dir=local_dir)
def function[down_by_time, parameter[]]: constant[Sync most recent file by date, time attribues] variable[files] assign[=] call[name[command].list_files, parameter[<ast.Starred object at 0x7da18f721300>]] variable[most_recent] assign[=] call[name[sorted], parameter[name[files]]] variable[to_sync] assign[=] call[name[most_recent]][<ast.Slice object at 0x7da18f720130>] call[name[_notify_sync], parameter[name[Direction].down, name[to_sync]]] call[name[down_by_files], parameter[call[name[to_sync]][<ast.Slice object at 0x7da18f720fa0>]]]
keyword[def] identifier[down_by_time] (* identifier[filters] , identifier[remote_dir] = identifier[DEFAULT_REMOTE_DIR] , identifier[local_dir] = literal[string] , identifier[count] = literal[int] ): literal[string] identifier[files] = identifier[command] . identifier[list_files] (* identifier[filters] , identifier[remote_dir] = identifier[remote_dir] ) identifier[most_recent] = identifier[sorted] ( identifier[files] , identifier[key] = keyword[lambda] identifier[f] : identifier[f] . identifier[datetime] ) identifier[to_sync] = identifier[most_recent] [- identifier[count] :] identifier[_notify_sync] ( identifier[Direction] . identifier[down] , identifier[to_sync] ) identifier[down_by_files] ( identifier[to_sync] [::- literal[int] ], identifier[local_dir] = identifier[local_dir] )
def down_by_time(*filters, remote_dir=DEFAULT_REMOTE_DIR, local_dir='.', count=1): """Sync most recent file by date, time attribues""" files = command.list_files(*filters, remote_dir=remote_dir) most_recent = sorted(files, key=lambda f: f.datetime) to_sync = most_recent[-count:] _notify_sync(Direction.down, to_sync) down_by_files(to_sync[::-1], local_dir=local_dir)
def update_bucket(self, bucket_name, access_control='private', version_control=False, log_destination=None, lifecycle_rules=None, tag_list=None, notification_settings=None, region_replication=None, access_policy=None): ''' a method for updating the properties of a bucket in S3 :param bucket_name: string with name of bucket :param access_control: string with type of access control policy :param version_control: [optional] boolean to enable versioning of records :param log_destination: [optional] dictionary with bucket name and prefix of log bucket :param lifecycle_rules: [optional] list of dictionaries with rules for aging data :param tag_list: [optional] list of dictionaries with key and value for tag :param notification_settings: [optional] list of dictionaries with notification details :param region_replication: [optional] dictionary with replication settings (WIP) :param access_policy: [optional] dictionary with policy for user access (WIP) :return: list of dictionaries with changes to bucket ''' title = '%s.update_bucket' % self.__class__.__name__ # validate inputs input_fields = { 'bucket_name': bucket_name, 'access_control': access_control, 'version_control': version_control, 'log_destination': log_destination, 'lifecycle_rules': lifecycle_rules, 'tag_list': tag_list, 'notification_settings': notification_settings, 'region_replication': region_replication, 'access_policy': access_policy } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) if log_destination == None: input_fields['log_destination'] = {} if lifecycle_rules == None: input_fields['lifecycle_rules'] = [] if tag_list == None: input_fields['tag_list'] = [] if notification_settings == None: input_fields['notification_settings'] = [] if region_replication == None: input_fields['region_replication'] = {} if access_policy == None: input_fields['access_policy'] = {} # verify requirements and limits self.list_buckets() if not bucket_name in self.bucket_list: raise ValueError('S3 bucket "%s" does not exist in aws region %s. Update not applicable.' % (bucket_name, self.iam.region_name)) if log_destination: log_name = log_destination['name'] if not log_name in self.bucket_list: raise ValueError('S3 Bucket "%s" for logging does not exist in aws region %s.' % (log_name, self.iam.region_name)) else: log_details = self.read_bucket(log_name) if log_details['access_control'] != 'log-delivery-write': raise ValueError('S3 Bucket "%s" for logging does not have "log-delivery-write" access control.' % log_name) if not 'prefix' in log_destination.keys(): input_fields['log_destination']['prefix'] = '' # TODO: check to see if required notification arns exist if notification_settings: for notification in notification_settings: arn_id = notification['arn'] # retrieve existing bucket fields existing_fields = self.read_bucket(bucket_name) # alphabetize tag list if existing_fields['tag_list']: existing_fields['tag_list'] = sorted(existing_fields['tag_list'], key=lambda k: k['key']) if input_fields['tag_list']: input_fields['tag_list'] = sorted(input_fields['tag_list'], key=lambda k: k['key']) # determine difference between new and old versions from labpack.parsing.comparison import compare_records change_list = compare_records(input_fields, existing_fields) if not change_list: self.iam.printer('There are no changes to make to bucket "%s".' % bucket_name) return change_list # process changes self.iam.printer('Updating bucket "%s".' % bucket_name, flush=True) processed_list = [] for change in change_list: # replace access control if change['path'][0] == 'access_control' and 'access_control' not in processed_list: kw_args = { 'Bucket': bucket_name, 'ACL': input_fields['access_control'] } try: self.connection.put_bucket_acl(**kw_args) self.iam.printer('.', flush=True) except: raise AWSConnectionError(title) processed_list.append('access_control') # replace version control if change['path'][0] == 'version_control' and 'version_control' not in processed_list: if input_fields['version_control']: try: self.connection.put_bucket_versioning( Bucket=bucket_name, VersioningConfiguration={ 'Status': 'Enabled' } ) self.iam.printer('.', flush=True) except: raise AWSConnectionError(title) else: try: self.connection.put_bucket_versioning( Bucket=bucket_name, VersioningConfiguration={ 'Status': 'Suspended' } ) self.iam.printer('.', flush=True) except: raise AWSConnectionError(title) processed_list.append('version_control') # replace log destination if change['path'][0] == 'log_destination' and 'log_destination' not in processed_list: if input_fields['log_destination']: log_name = input_fields['log_destination']['name'] log_prefix = input_fields['log_destination']['prefix'] kw_args = { 'Bucket': bucket_name, 'BucketLoggingStatus': { 'LoggingEnabled': { 'TargetBucket': log_name } } } if log_prefix: kw_args['BucketLoggingStatus']['LoggingEnabled']['TargetPrefix'] = log_prefix else: kw_args = { 'Bucket': bucket_name, 'BucketLoggingStatus': {} } try: self.connection.put_bucket_logging(**kw_args) self.iam.printer('.', flush=True) except: raise AWSConnectionError(title) processed_list.append('log_destination') # replace lifecycle rules if change['path'][0] == 'lifecycle_rules' and 'lifecycle_rules' not in processed_list: if input_fields['lifecycle_rules']: kw_args = { 'Bucket': bucket_name, 'LifecycleConfiguration': { 'Rules': [ ] } } for rule in input_fields['lifecycle_rules']: details = { 'Prefix': rule['prefix'], 'Status': 'Enabled' } if rule['action'] == 'archive': if rule['current_version']: details['Transition'] = { 'Days': rule['longevity'], 'StorageClass': 'GLACIER' } else: details['NoncurrentVersionTransition'] = { 'NoncurrentDays': rule['longevity'], 'StorageClass': 'GLACIER' } else: if rule['current_version']: details['Expiration'] = { 'Days': rule['longevity'] } else: details['NoncurrentVersionExpiration'] = { 'NoncurrentDays': rule['longevity'] } kw_args['LifecycleConfiguration']['Rules'].append(details) try: self.connection.put_bucket_lifecycle(**kw_args) self.iam.printer('.', flush=True) except: raise AWSConnectionError(title) else: try: self.connection.delete_bucket_lifecycle( Bucket=bucket_name ) self.iam.printer('.', flush=True) except: raise AWSConnectionError(title) processed_list.append('lifecycle_rules') # replace bucket tags if change['path'][0] == 'tag_list' and 'tag_list' not in processed_list: if input_fields['tag_list']: try: self.connection.put_bucket_tagging( Bucket=bucket_name, Tagging={ 'TagSet': self.iam.prepare(input_fields['tag_list']) } ) self.iam.printer('.', flush=True) except: raise AWSConnectionError(title) else: try: self.connection.delete_bucket_tagging( Bucket=bucket_name ) self.iam.printer('.', flush=True) except: raise AWSConnectionError(title) processed_list.append('tag_list') # replace notification settings if change['path'][0] == 'notification_settings' and 'notification_settings' not in processed_list: kw_args = { 'Bucket': bucket_name, 'NotificationConfiguration': {} } if input_fields['notification_settings']: for notification in input_fields['notification_settings']: details = { 'Events': [], 'Filter': { 'Key': { 'FilterRules': [] } } } details['Events'].append(notification['event']) if notification['filters']: for key, value in notification['filters'].items(): filter_details = { 'Name': key, 'Value': value } details['Filter']['Key']['FilterRules'].append(filter_details) if notification['service'] == 'sns': details['TopicArn'] = notification['arn'] if not 'TopicConfigurations' in kw_args['NotificationConfiguration']: kw_args['NotificationConfiguration']['TopicConfigurations'] = [] kw_args['NotificationConfiguration']['TopicConfigurations'].append(details) elif notification['service'] == 'sqs': details['QueueArn'] = notification['arn'] if not 'QueueConfigurations' in kw_args['NotificationConfiguration']: kw_args['NotificationConfiguration']['QueueConfigurations'] = [] kw_args['NotificationConfiguration']['QueueConfigurations'].append(details) elif notification['service'] == 'lambda': if not 'LambdaFunctionConfigurations' in kw_args['NotificationConfiguration']: kw_args['NotificationConfiguration']['LambdaFunctionConfigurations'] = [] details['LambdaFunctionArn'] = notification['arn'] kw_args['NotificationConfiguration']['LambdaFunctionConfigurations'].append(details) try: # TODO: response = self.connection.put_bucket_notification_configuration(**kw_args) self.iam.printer('.', flush=True) except: raise AWSConnectionError(title) processed_list.append('notification_settings') # TODO: replace region replication if change['path'][0] == 'region_replication' and 'region_replication' not in processed_list: if input_fields['region_replication']: pass else: pass processed_list.append('region_replication') # TODO: replace access policy if change['path'][0] == 'access_policy' and 'access_policy' not in processed_list: if input_fields['access_policy']: pass else: pass processed_list.append('access_policy') # report and return change list self.iam.printer(' done.') return change_list
def function[update_bucket, parameter[self, bucket_name, access_control, version_control, log_destination, lifecycle_rules, tag_list, notification_settings, region_replication, access_policy]]: constant[ a method for updating the properties of a bucket in S3 :param bucket_name: string with name of bucket :param access_control: string with type of access control policy :param version_control: [optional] boolean to enable versioning of records :param log_destination: [optional] dictionary with bucket name and prefix of log bucket :param lifecycle_rules: [optional] list of dictionaries with rules for aging data :param tag_list: [optional] list of dictionaries with key and value for tag :param notification_settings: [optional] list of dictionaries with notification details :param region_replication: [optional] dictionary with replication settings (WIP) :param access_policy: [optional] dictionary with policy for user access (WIP) :return: list of dictionaries with changes to bucket ] variable[title] assign[=] binary_operation[constant[%s.update_bucket] <ast.Mod object at 0x7da2590d6920> name[self].__class__.__name__] variable[input_fields] assign[=] dictionary[[<ast.Constant object at 0x7da20e955b70>, <ast.Constant object at 0x7da20e954d90>, <ast.Constant object at 0x7da20e954b20>, <ast.Constant object at 0x7da20e955090>, <ast.Constant object at 0x7da20e9578b0>, <ast.Constant object at 0x7da20e956530>, <ast.Constant object at 0x7da20e954b50>, <ast.Constant object at 0x7da20e954cd0>, <ast.Constant object at 0x7da20e9576d0>], [<ast.Name object at 0x7da20e956500>, <ast.Name object at 0x7da20e954f40>, <ast.Name object at 0x7da20e9551e0>, <ast.Name object at 0x7da20e957160>, <ast.Name object at 0x7da20e955990>, <ast.Name object at 0x7da20e954c70>, <ast.Name object at 0x7da20e957490>, <ast.Name object at 0x7da20e955750>, <ast.Name object at 0x7da20e9544f0>]] for taget[tuple[[<ast.Name object at 0x7da20e954730>, <ast.Name object at 0x7da20e956b30>]]] in starred[call[name[input_fields].items, parameter[]]] begin[:] if name[value] begin[:] variable[object_title] assign[=] binary_operation[constant[%s(%s=%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e956ce0>, <ast.Name object at 0x7da20e955540>, <ast.Call object at 0x7da20e957df0>]]] call[name[self].fields.validate, parameter[name[value], binary_operation[constant[.%s] <ast.Mod object at 0x7da2590d6920> name[key]], name[object_title]]] if compare[name[log_destination] equal[==] constant[None]] begin[:] call[name[input_fields]][constant[log_destination]] assign[=] dictionary[[], []] if compare[name[lifecycle_rules] equal[==] constant[None]] begin[:] call[name[input_fields]][constant[lifecycle_rules]] assign[=] list[[]] if compare[name[tag_list] equal[==] constant[None]] begin[:] call[name[input_fields]][constant[tag_list]] assign[=] list[[]] if compare[name[notification_settings] equal[==] constant[None]] begin[:] call[name[input_fields]][constant[notification_settings]] assign[=] list[[]] if compare[name[region_replication] equal[==] constant[None]] begin[:] call[name[input_fields]][constant[region_replication]] assign[=] dictionary[[], []] if compare[name[access_policy] equal[==] constant[None]] begin[:] call[name[input_fields]][constant[access_policy]] assign[=] dictionary[[], []] call[name[self].list_buckets, parameter[]] if <ast.UnaryOp object at 0x7da20e957220> begin[:] <ast.Raise object at 0x7da20e957d90> if name[log_destination] begin[:] variable[log_name] assign[=] call[name[log_destination]][constant[name]] if <ast.UnaryOp object at 0x7da20e9558d0> begin[:] <ast.Raise object at 0x7da20e956620> if <ast.UnaryOp object at 0x7da20cabcbe0> begin[:] call[call[name[input_fields]][constant[log_destination]]][constant[prefix]] assign[=] constant[] if name[notification_settings] begin[:] for taget[name[notification]] in starred[name[notification_settings]] begin[:] variable[arn_id] assign[=] call[name[notification]][constant[arn]] variable[existing_fields] assign[=] call[name[self].read_bucket, parameter[name[bucket_name]]] if call[name[existing_fields]][constant[tag_list]] begin[:] call[name[existing_fields]][constant[tag_list]] assign[=] call[name[sorted], parameter[call[name[existing_fields]][constant[tag_list]]]] if call[name[input_fields]][constant[tag_list]] begin[:] call[name[input_fields]][constant[tag_list]] assign[=] call[name[sorted], parameter[call[name[input_fields]][constant[tag_list]]]] from relative_module[labpack.parsing.comparison] import module[compare_records] variable[change_list] assign[=] call[name[compare_records], parameter[name[input_fields], name[existing_fields]]] if <ast.UnaryOp object at 0x7da20cabf2b0> begin[:] call[name[self].iam.printer, parameter[binary_operation[constant[There are no changes to make to bucket "%s".] <ast.Mod object at 0x7da2590d6920> name[bucket_name]]]] return[name[change_list]] call[name[self].iam.printer, parameter[binary_operation[constant[Updating bucket "%s".] <ast.Mod object at 0x7da2590d6920> name[bucket_name]]]] variable[processed_list] assign[=] list[[]] for taget[name[change]] in starred[name[change_list]] begin[:] if <ast.BoolOp object at 0x7da20cabf9a0> begin[:] variable[kw_args] assign[=] dictionary[[<ast.Constant object at 0x7da20cabf910>, <ast.Constant object at 0x7da20cabfe50>], [<ast.Name object at 0x7da20cabc5e0>, <ast.Subscript object at 0x7da20cabc820>]] <ast.Try object at 0x7da20cabcb20> call[name[processed_list].append, parameter[constant[access_control]]] if <ast.BoolOp object at 0x7da18f58ce80> begin[:] if call[name[input_fields]][constant[version_control]] begin[:] <ast.Try object at 0x7da18f58cc10> call[name[processed_list].append, parameter[constant[version_control]]] if <ast.BoolOp object at 0x7da18f58e5f0> begin[:] if call[name[input_fields]][constant[log_destination]] begin[:] variable[log_name] assign[=] call[call[name[input_fields]][constant[log_destination]]][constant[name]] variable[log_prefix] assign[=] call[call[name[input_fields]][constant[log_destination]]][constant[prefix]] variable[kw_args] assign[=] dictionary[[<ast.Constant object at 0x7da18f58d3c0>, <ast.Constant object at 0x7da18f58e140>], [<ast.Name object at 0x7da18f58dfc0>, <ast.Dict object at 0x7da18f58e200>]] if name[log_prefix] begin[:] call[call[call[name[kw_args]][constant[BucketLoggingStatus]]][constant[LoggingEnabled]]][constant[TargetPrefix]] assign[=] name[log_prefix] <ast.Try object at 0x7da18f58d960> call[name[processed_list].append, parameter[constant[log_destination]]] if <ast.BoolOp object at 0x7da18f58ec50> begin[:] if call[name[input_fields]][constant[lifecycle_rules]] begin[:] variable[kw_args] assign[=] dictionary[[<ast.Constant object at 0x7da18f58f1f0>, <ast.Constant object at 0x7da18f58e0e0>], [<ast.Name object at 0x7da18f58f040>, <ast.Dict object at 0x7da18f58e440>]] for taget[name[rule]] in starred[call[name[input_fields]][constant[lifecycle_rules]]] begin[:] variable[details] assign[=] dictionary[[<ast.Constant object at 0x7da18f58c5e0>, <ast.Constant object at 0x7da18f58c250>], [<ast.Subscript object at 0x7da18f58ebc0>, <ast.Constant object at 0x7da18f58ded0>]] if compare[call[name[rule]][constant[action]] equal[==] constant[archive]] begin[:] if call[name[rule]][constant[current_version]] begin[:] call[name[details]][constant[Transition]] assign[=] dictionary[[<ast.Constant object at 0x7da18f58f640>, <ast.Constant object at 0x7da18f58c610>], [<ast.Subscript object at 0x7da18f58fc10>, <ast.Constant object at 0x7da18f58fdc0>]] call[call[call[name[kw_args]][constant[LifecycleConfiguration]]][constant[Rules]].append, parameter[name[details]]] <ast.Try object at 0x7da18f58f670> call[name[processed_list].append, parameter[constant[lifecycle_rules]]] if <ast.BoolOp object at 0x7da204346aa0> begin[:] if call[name[input_fields]][constant[tag_list]] begin[:] <ast.Try object at 0x7da204346e30> call[name[processed_list].append, parameter[constant[tag_list]]] if <ast.BoolOp object at 0x7da204346680> begin[:] variable[kw_args] assign[=] dictionary[[<ast.Constant object at 0x7da204346f50>, <ast.Constant object at 0x7da204347880>], [<ast.Name object at 0x7da204345fc0>, <ast.Dict object at 0x7da2043450f0>]] if call[name[input_fields]][constant[notification_settings]] begin[:] for taget[name[notification]] in starred[call[name[input_fields]][constant[notification_settings]]] begin[:] variable[details] assign[=] dictionary[[<ast.Constant object at 0x7da204345300>, <ast.Constant object at 0x7da2043446d0>], [<ast.List object at 0x7da204345f30>, <ast.Dict object at 0x7da204346bf0>]] call[call[name[details]][constant[Events]].append, parameter[call[name[notification]][constant[event]]]] if call[name[notification]][constant[filters]] begin[:] for taget[tuple[[<ast.Name object at 0x7da204347af0>, <ast.Name object at 0x7da204346050>]]] in starred[call[call[name[notification]][constant[filters]].items, parameter[]]] begin[:] variable[filter_details] assign[=] dictionary[[<ast.Constant object at 0x7da1b15292a0>, <ast.Constant object at 0x7da1b1528820>], [<ast.Name object at 0x7da1b1529a20>, <ast.Name object at 0x7da1b1529540>]] call[call[call[call[name[details]][constant[Filter]]][constant[Key]]][constant[FilterRules]].append, parameter[name[filter_details]]] if compare[call[name[notification]][constant[service]] equal[==] constant[sns]] begin[:] call[name[details]][constant[TopicArn]] assign[=] call[name[notification]][constant[arn]] if <ast.UnaryOp object at 0x7da1b15295a0> begin[:] call[call[name[kw_args]][constant[NotificationConfiguration]]][constant[TopicConfigurations]] assign[=] list[[]] call[call[call[name[kw_args]][constant[NotificationConfiguration]]][constant[TopicConfigurations]].append, parameter[name[details]]] <ast.Try object at 0x7da18ede4df0> call[name[processed_list].append, parameter[constant[notification_settings]]] if <ast.BoolOp object at 0x7da18ede6e00> begin[:] if call[name[input_fields]][constant[region_replication]] begin[:] pass call[name[processed_list].append, parameter[constant[region_replication]]] if <ast.BoolOp object at 0x7da18ede7a60> begin[:] if call[name[input_fields]][constant[access_policy]] begin[:] pass call[name[processed_list].append, parameter[constant[access_policy]]] call[name[self].iam.printer, parameter[constant[ done.]]] return[name[change_list]]
keyword[def] identifier[update_bucket] ( identifier[self] , identifier[bucket_name] , identifier[access_control] = literal[string] , identifier[version_control] = keyword[False] , identifier[log_destination] = keyword[None] , identifier[lifecycle_rules] = keyword[None] , identifier[tag_list] = keyword[None] , identifier[notification_settings] = keyword[None] , identifier[region_replication] = keyword[None] , identifier[access_policy] = keyword[None] ): literal[string] identifier[title] = literal[string] % identifier[self] . identifier[__class__] . identifier[__name__] identifier[input_fields] ={ literal[string] : identifier[bucket_name] , literal[string] : identifier[access_control] , literal[string] : identifier[version_control] , literal[string] : identifier[log_destination] , literal[string] : identifier[lifecycle_rules] , literal[string] : identifier[tag_list] , literal[string] : identifier[notification_settings] , literal[string] : identifier[region_replication] , literal[string] : identifier[access_policy] } keyword[for] identifier[key] , identifier[value] keyword[in] identifier[input_fields] . identifier[items] (): keyword[if] identifier[value] : identifier[object_title] = literal[string] %( identifier[title] , identifier[key] , identifier[str] ( identifier[value] )) identifier[self] . identifier[fields] . identifier[validate] ( identifier[value] , literal[string] % identifier[key] , identifier[object_title] ) keyword[if] identifier[log_destination] == keyword[None] : identifier[input_fields] [ literal[string] ]={} keyword[if] identifier[lifecycle_rules] == keyword[None] : identifier[input_fields] [ literal[string] ]=[] keyword[if] identifier[tag_list] == keyword[None] : identifier[input_fields] [ literal[string] ]=[] keyword[if] identifier[notification_settings] == keyword[None] : identifier[input_fields] [ literal[string] ]=[] keyword[if] identifier[region_replication] == keyword[None] : identifier[input_fields] [ literal[string] ]={} keyword[if] identifier[access_policy] == keyword[None] : identifier[input_fields] [ literal[string] ]={} identifier[self] . identifier[list_buckets] () keyword[if] keyword[not] identifier[bucket_name] keyword[in] identifier[self] . identifier[bucket_list] : keyword[raise] identifier[ValueError] ( literal[string] %( identifier[bucket_name] , identifier[self] . identifier[iam] . identifier[region_name] )) keyword[if] identifier[log_destination] : identifier[log_name] = identifier[log_destination] [ literal[string] ] keyword[if] keyword[not] identifier[log_name] keyword[in] identifier[self] . identifier[bucket_list] : keyword[raise] identifier[ValueError] ( literal[string] %( identifier[log_name] , identifier[self] . identifier[iam] . identifier[region_name] )) keyword[else] : identifier[log_details] = identifier[self] . identifier[read_bucket] ( identifier[log_name] ) keyword[if] identifier[log_details] [ literal[string] ]!= literal[string] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[log_name] ) keyword[if] keyword[not] literal[string] keyword[in] identifier[log_destination] . identifier[keys] (): identifier[input_fields] [ literal[string] ][ literal[string] ]= literal[string] keyword[if] identifier[notification_settings] : keyword[for] identifier[notification] keyword[in] identifier[notification_settings] : identifier[arn_id] = identifier[notification] [ literal[string] ] identifier[existing_fields] = identifier[self] . identifier[read_bucket] ( identifier[bucket_name] ) keyword[if] identifier[existing_fields] [ literal[string] ]: identifier[existing_fields] [ literal[string] ]= identifier[sorted] ( identifier[existing_fields] [ literal[string] ], identifier[key] = keyword[lambda] identifier[k] : identifier[k] [ literal[string] ]) keyword[if] identifier[input_fields] [ literal[string] ]: identifier[input_fields] [ literal[string] ]= identifier[sorted] ( identifier[input_fields] [ literal[string] ], identifier[key] = keyword[lambda] identifier[k] : identifier[k] [ literal[string] ]) keyword[from] identifier[labpack] . identifier[parsing] . identifier[comparison] keyword[import] identifier[compare_records] identifier[change_list] = identifier[compare_records] ( identifier[input_fields] , identifier[existing_fields] ) keyword[if] keyword[not] identifier[change_list] : identifier[self] . identifier[iam] . identifier[printer] ( literal[string] % identifier[bucket_name] ) keyword[return] identifier[change_list] identifier[self] . identifier[iam] . identifier[printer] ( literal[string] % identifier[bucket_name] , identifier[flush] = keyword[True] ) identifier[processed_list] =[] keyword[for] identifier[change] keyword[in] identifier[change_list] : keyword[if] identifier[change] [ literal[string] ][ literal[int] ]== literal[string] keyword[and] literal[string] keyword[not] keyword[in] identifier[processed_list] : identifier[kw_args] ={ literal[string] : identifier[bucket_name] , literal[string] : identifier[input_fields] [ literal[string] ] } keyword[try] : identifier[self] . identifier[connection] . identifier[put_bucket_acl] (** identifier[kw_args] ) identifier[self] . identifier[iam] . identifier[printer] ( literal[string] , identifier[flush] = keyword[True] ) keyword[except] : keyword[raise] identifier[AWSConnectionError] ( identifier[title] ) identifier[processed_list] . identifier[append] ( literal[string] ) keyword[if] identifier[change] [ literal[string] ][ literal[int] ]== literal[string] keyword[and] literal[string] keyword[not] keyword[in] identifier[processed_list] : keyword[if] identifier[input_fields] [ literal[string] ]: keyword[try] : identifier[self] . identifier[connection] . identifier[put_bucket_versioning] ( identifier[Bucket] = identifier[bucket_name] , identifier[VersioningConfiguration] ={ literal[string] : literal[string] } ) identifier[self] . identifier[iam] . identifier[printer] ( literal[string] , identifier[flush] = keyword[True] ) keyword[except] : keyword[raise] identifier[AWSConnectionError] ( identifier[title] ) keyword[else] : keyword[try] : identifier[self] . identifier[connection] . identifier[put_bucket_versioning] ( identifier[Bucket] = identifier[bucket_name] , identifier[VersioningConfiguration] ={ literal[string] : literal[string] } ) identifier[self] . identifier[iam] . identifier[printer] ( literal[string] , identifier[flush] = keyword[True] ) keyword[except] : keyword[raise] identifier[AWSConnectionError] ( identifier[title] ) identifier[processed_list] . identifier[append] ( literal[string] ) keyword[if] identifier[change] [ literal[string] ][ literal[int] ]== literal[string] keyword[and] literal[string] keyword[not] keyword[in] identifier[processed_list] : keyword[if] identifier[input_fields] [ literal[string] ]: identifier[log_name] = identifier[input_fields] [ literal[string] ][ literal[string] ] identifier[log_prefix] = identifier[input_fields] [ literal[string] ][ literal[string] ] identifier[kw_args] ={ literal[string] : identifier[bucket_name] , literal[string] :{ literal[string] :{ literal[string] : identifier[log_name] } } } keyword[if] identifier[log_prefix] : identifier[kw_args] [ literal[string] ][ literal[string] ][ literal[string] ]= identifier[log_prefix] keyword[else] : identifier[kw_args] ={ literal[string] : identifier[bucket_name] , literal[string] :{} } keyword[try] : identifier[self] . identifier[connection] . identifier[put_bucket_logging] (** identifier[kw_args] ) identifier[self] . identifier[iam] . identifier[printer] ( literal[string] , identifier[flush] = keyword[True] ) keyword[except] : keyword[raise] identifier[AWSConnectionError] ( identifier[title] ) identifier[processed_list] . identifier[append] ( literal[string] ) keyword[if] identifier[change] [ literal[string] ][ literal[int] ]== literal[string] keyword[and] literal[string] keyword[not] keyword[in] identifier[processed_list] : keyword[if] identifier[input_fields] [ literal[string] ]: identifier[kw_args] ={ literal[string] : identifier[bucket_name] , literal[string] :{ literal[string] :[]} } keyword[for] identifier[rule] keyword[in] identifier[input_fields] [ literal[string] ]: identifier[details] ={ literal[string] : identifier[rule] [ literal[string] ], literal[string] : literal[string] } keyword[if] identifier[rule] [ literal[string] ]== literal[string] : keyword[if] identifier[rule] [ literal[string] ]: identifier[details] [ literal[string] ]={ literal[string] : identifier[rule] [ literal[string] ], literal[string] : literal[string] } keyword[else] : identifier[details] [ literal[string] ]={ literal[string] : identifier[rule] [ literal[string] ], literal[string] : literal[string] } keyword[else] : keyword[if] identifier[rule] [ literal[string] ]: identifier[details] [ literal[string] ]={ literal[string] : identifier[rule] [ literal[string] ]} keyword[else] : identifier[details] [ literal[string] ]={ literal[string] : identifier[rule] [ literal[string] ]} identifier[kw_args] [ literal[string] ][ literal[string] ]. identifier[append] ( identifier[details] ) keyword[try] : identifier[self] . identifier[connection] . identifier[put_bucket_lifecycle] (** identifier[kw_args] ) identifier[self] . identifier[iam] . identifier[printer] ( literal[string] , identifier[flush] = keyword[True] ) keyword[except] : keyword[raise] identifier[AWSConnectionError] ( identifier[title] ) keyword[else] : keyword[try] : identifier[self] . identifier[connection] . identifier[delete_bucket_lifecycle] ( identifier[Bucket] = identifier[bucket_name] ) identifier[self] . identifier[iam] . identifier[printer] ( literal[string] , identifier[flush] = keyword[True] ) keyword[except] : keyword[raise] identifier[AWSConnectionError] ( identifier[title] ) identifier[processed_list] . identifier[append] ( literal[string] ) keyword[if] identifier[change] [ literal[string] ][ literal[int] ]== literal[string] keyword[and] literal[string] keyword[not] keyword[in] identifier[processed_list] : keyword[if] identifier[input_fields] [ literal[string] ]: keyword[try] : identifier[self] . identifier[connection] . identifier[put_bucket_tagging] ( identifier[Bucket] = identifier[bucket_name] , identifier[Tagging] ={ literal[string] : identifier[self] . identifier[iam] . identifier[prepare] ( identifier[input_fields] [ literal[string] ])} ) identifier[self] . identifier[iam] . identifier[printer] ( literal[string] , identifier[flush] = keyword[True] ) keyword[except] : keyword[raise] identifier[AWSConnectionError] ( identifier[title] ) keyword[else] : keyword[try] : identifier[self] . identifier[connection] . identifier[delete_bucket_tagging] ( identifier[Bucket] = identifier[bucket_name] ) identifier[self] . identifier[iam] . identifier[printer] ( literal[string] , identifier[flush] = keyword[True] ) keyword[except] : keyword[raise] identifier[AWSConnectionError] ( identifier[title] ) identifier[processed_list] . identifier[append] ( literal[string] ) keyword[if] identifier[change] [ literal[string] ][ literal[int] ]== literal[string] keyword[and] literal[string] keyword[not] keyword[in] identifier[processed_list] : identifier[kw_args] ={ literal[string] : identifier[bucket_name] , literal[string] :{} } keyword[if] identifier[input_fields] [ literal[string] ]: keyword[for] identifier[notification] keyword[in] identifier[input_fields] [ literal[string] ]: identifier[details] ={ literal[string] :[], literal[string] :{ literal[string] :{ literal[string] :[]}} } identifier[details] [ literal[string] ]. identifier[append] ( identifier[notification] [ literal[string] ]) keyword[if] identifier[notification] [ literal[string] ]: keyword[for] identifier[key] , identifier[value] keyword[in] identifier[notification] [ literal[string] ]. identifier[items] (): identifier[filter_details] ={ literal[string] : identifier[key] , literal[string] : identifier[value] } identifier[details] [ literal[string] ][ literal[string] ][ literal[string] ]. identifier[append] ( identifier[filter_details] ) keyword[if] identifier[notification] [ literal[string] ]== literal[string] : identifier[details] [ literal[string] ]= identifier[notification] [ literal[string] ] keyword[if] keyword[not] literal[string] keyword[in] identifier[kw_args] [ literal[string] ]: identifier[kw_args] [ literal[string] ][ literal[string] ]=[] identifier[kw_args] [ literal[string] ][ literal[string] ]. identifier[append] ( identifier[details] ) keyword[elif] identifier[notification] [ literal[string] ]== literal[string] : identifier[details] [ literal[string] ]= identifier[notification] [ literal[string] ] keyword[if] keyword[not] literal[string] keyword[in] identifier[kw_args] [ literal[string] ]: identifier[kw_args] [ literal[string] ][ literal[string] ]=[] identifier[kw_args] [ literal[string] ][ literal[string] ]. identifier[append] ( identifier[details] ) keyword[elif] identifier[notification] [ literal[string] ]== literal[string] : keyword[if] keyword[not] literal[string] keyword[in] identifier[kw_args] [ literal[string] ]: identifier[kw_args] [ literal[string] ][ literal[string] ]=[] identifier[details] [ literal[string] ]= identifier[notification] [ literal[string] ] identifier[kw_args] [ literal[string] ][ literal[string] ]. identifier[append] ( identifier[details] ) keyword[try] : identifier[self] . identifier[iam] . identifier[printer] ( literal[string] , identifier[flush] = keyword[True] ) keyword[except] : keyword[raise] identifier[AWSConnectionError] ( identifier[title] ) identifier[processed_list] . identifier[append] ( literal[string] ) keyword[if] identifier[change] [ literal[string] ][ literal[int] ]== literal[string] keyword[and] literal[string] keyword[not] keyword[in] identifier[processed_list] : keyword[if] identifier[input_fields] [ literal[string] ]: keyword[pass] keyword[else] : keyword[pass] identifier[processed_list] . identifier[append] ( literal[string] ) keyword[if] identifier[change] [ literal[string] ][ literal[int] ]== literal[string] keyword[and] literal[string] keyword[not] keyword[in] identifier[processed_list] : keyword[if] identifier[input_fields] [ literal[string] ]: keyword[pass] keyword[else] : keyword[pass] identifier[processed_list] . identifier[append] ( literal[string] ) identifier[self] . identifier[iam] . identifier[printer] ( literal[string] ) keyword[return] identifier[change_list]
def update_bucket(self, bucket_name, access_control='private', version_control=False, log_destination=None, lifecycle_rules=None, tag_list=None, notification_settings=None, region_replication=None, access_policy=None): """ a method for updating the properties of a bucket in S3 :param bucket_name: string with name of bucket :param access_control: string with type of access control policy :param version_control: [optional] boolean to enable versioning of records :param log_destination: [optional] dictionary with bucket name and prefix of log bucket :param lifecycle_rules: [optional] list of dictionaries with rules for aging data :param tag_list: [optional] list of dictionaries with key and value for tag :param notification_settings: [optional] list of dictionaries with notification details :param region_replication: [optional] dictionary with replication settings (WIP) :param access_policy: [optional] dictionary with policy for user access (WIP) :return: list of dictionaries with changes to bucket """ title = '%s.update_bucket' % self.__class__.__name__ # validate inputs input_fields = {'bucket_name': bucket_name, 'access_control': access_control, 'version_control': version_control, 'log_destination': log_destination, 'lifecycle_rules': lifecycle_rules, 'tag_list': tag_list, 'notification_settings': notification_settings, 'region_replication': region_replication, 'access_policy': access_policy} for (key, value) in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] if log_destination == None: input_fields['log_destination'] = {} # depends on [control=['if'], data=[]] if lifecycle_rules == None: input_fields['lifecycle_rules'] = [] # depends on [control=['if'], data=[]] if tag_list == None: input_fields['tag_list'] = [] # depends on [control=['if'], data=[]] if notification_settings == None: input_fields['notification_settings'] = [] # depends on [control=['if'], data=[]] if region_replication == None: input_fields['region_replication'] = {} # depends on [control=['if'], data=[]] if access_policy == None: input_fields['access_policy'] = {} # depends on [control=['if'], data=[]] # verify requirements and limits self.list_buckets() if not bucket_name in self.bucket_list: raise ValueError('S3 bucket "%s" does not exist in aws region %s. Update not applicable.' % (bucket_name, self.iam.region_name)) # depends on [control=['if'], data=[]] if log_destination: log_name = log_destination['name'] if not log_name in self.bucket_list: raise ValueError('S3 Bucket "%s" for logging does not exist in aws region %s.' % (log_name, self.iam.region_name)) # depends on [control=['if'], data=[]] else: log_details = self.read_bucket(log_name) if log_details['access_control'] != 'log-delivery-write': raise ValueError('S3 Bucket "%s" for logging does not have "log-delivery-write" access control.' % log_name) # depends on [control=['if'], data=[]] if not 'prefix' in log_destination.keys(): input_fields['log_destination']['prefix'] = '' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # TODO: check to see if required notification arns exist if notification_settings: for notification in notification_settings: arn_id = notification['arn'] # depends on [control=['for'], data=['notification']] # depends on [control=['if'], data=[]] # retrieve existing bucket fields existing_fields = self.read_bucket(bucket_name) # alphabetize tag list if existing_fields['tag_list']: existing_fields['tag_list'] = sorted(existing_fields['tag_list'], key=lambda k: k['key']) # depends on [control=['if'], data=[]] if input_fields['tag_list']: input_fields['tag_list'] = sorted(input_fields['tag_list'], key=lambda k: k['key']) # depends on [control=['if'], data=[]] # determine difference between new and old versions from labpack.parsing.comparison import compare_records change_list = compare_records(input_fields, existing_fields) if not change_list: self.iam.printer('There are no changes to make to bucket "%s".' % bucket_name) return change_list # depends on [control=['if'], data=[]] # process changes self.iam.printer('Updating bucket "%s".' % bucket_name, flush=True) processed_list = [] for change in change_list: # replace access control if change['path'][0] == 'access_control' and 'access_control' not in processed_list: kw_args = {'Bucket': bucket_name, 'ACL': input_fields['access_control']} try: self.connection.put_bucket_acl(**kw_args) self.iam.printer('.', flush=True) # depends on [control=['try'], data=[]] except: raise AWSConnectionError(title) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] processed_list.append('access_control') # replace version control if change['path'][0] == 'version_control' and 'version_control' not in processed_list: if input_fields['version_control']: try: self.connection.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': 'Enabled'}) self.iam.printer('.', flush=True) # depends on [control=['try'], data=[]] except: raise AWSConnectionError(title) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: try: self.connection.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': 'Suspended'}) self.iam.printer('.', flush=True) # depends on [control=['try'], data=[]] except: raise AWSConnectionError(title) # depends on [control=['except'], data=[]] processed_list.append('version_control') # depends on [control=['if'], data=[]] # replace log destination if change['path'][0] == 'log_destination' and 'log_destination' not in processed_list: if input_fields['log_destination']: log_name = input_fields['log_destination']['name'] log_prefix = input_fields['log_destination']['prefix'] kw_args = {'Bucket': bucket_name, 'BucketLoggingStatus': {'LoggingEnabled': {'TargetBucket': log_name}}} if log_prefix: kw_args['BucketLoggingStatus']['LoggingEnabled']['TargetPrefix'] = log_prefix # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: kw_args = {'Bucket': bucket_name, 'BucketLoggingStatus': {}} try: self.connection.put_bucket_logging(**kw_args) self.iam.printer('.', flush=True) # depends on [control=['try'], data=[]] except: raise AWSConnectionError(title) # depends on [control=['except'], data=[]] processed_list.append('log_destination') # depends on [control=['if'], data=[]] # replace lifecycle rules if change['path'][0] == 'lifecycle_rules' and 'lifecycle_rules' not in processed_list: if input_fields['lifecycle_rules']: kw_args = {'Bucket': bucket_name, 'LifecycleConfiguration': {'Rules': []}} for rule in input_fields['lifecycle_rules']: details = {'Prefix': rule['prefix'], 'Status': 'Enabled'} if rule['action'] == 'archive': if rule['current_version']: details['Transition'] = {'Days': rule['longevity'], 'StorageClass': 'GLACIER'} # depends on [control=['if'], data=[]] else: details['NoncurrentVersionTransition'] = {'NoncurrentDays': rule['longevity'], 'StorageClass': 'GLACIER'} # depends on [control=['if'], data=[]] elif rule['current_version']: details['Expiration'] = {'Days': rule['longevity']} # depends on [control=['if'], data=[]] else: details['NoncurrentVersionExpiration'] = {'NoncurrentDays': rule['longevity']} kw_args['LifecycleConfiguration']['Rules'].append(details) # depends on [control=['for'], data=['rule']] try: self.connection.put_bucket_lifecycle(**kw_args) self.iam.printer('.', flush=True) # depends on [control=['try'], data=[]] except: raise AWSConnectionError(title) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: try: self.connection.delete_bucket_lifecycle(Bucket=bucket_name) self.iam.printer('.', flush=True) # depends on [control=['try'], data=[]] except: raise AWSConnectionError(title) # depends on [control=['except'], data=[]] processed_list.append('lifecycle_rules') # depends on [control=['if'], data=[]] # replace bucket tags if change['path'][0] == 'tag_list' and 'tag_list' not in processed_list: if input_fields['tag_list']: try: self.connection.put_bucket_tagging(Bucket=bucket_name, Tagging={'TagSet': self.iam.prepare(input_fields['tag_list'])}) self.iam.printer('.', flush=True) # depends on [control=['try'], data=[]] except: raise AWSConnectionError(title) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: try: self.connection.delete_bucket_tagging(Bucket=bucket_name) self.iam.printer('.', flush=True) # depends on [control=['try'], data=[]] except: raise AWSConnectionError(title) # depends on [control=['except'], data=[]] processed_list.append('tag_list') # depends on [control=['if'], data=[]] # replace notification settings if change['path'][0] == 'notification_settings' and 'notification_settings' not in processed_list: kw_args = {'Bucket': bucket_name, 'NotificationConfiguration': {}} if input_fields['notification_settings']: for notification in input_fields['notification_settings']: details = {'Events': [], 'Filter': {'Key': {'FilterRules': []}}} details['Events'].append(notification['event']) if notification['filters']: for (key, value) in notification['filters'].items(): filter_details = {'Name': key, 'Value': value} details['Filter']['Key']['FilterRules'].append(filter_details) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] if notification['service'] == 'sns': details['TopicArn'] = notification['arn'] if not 'TopicConfigurations' in kw_args['NotificationConfiguration']: kw_args['NotificationConfiguration']['TopicConfigurations'] = [] # depends on [control=['if'], data=[]] kw_args['NotificationConfiguration']['TopicConfigurations'].append(details) # depends on [control=['if'], data=[]] elif notification['service'] == 'sqs': details['QueueArn'] = notification['arn'] if not 'QueueConfigurations' in kw_args['NotificationConfiguration']: kw_args['NotificationConfiguration']['QueueConfigurations'] = [] # depends on [control=['if'], data=[]] kw_args['NotificationConfiguration']['QueueConfigurations'].append(details) # depends on [control=['if'], data=[]] elif notification['service'] == 'lambda': if not 'LambdaFunctionConfigurations' in kw_args['NotificationConfiguration']: kw_args['NotificationConfiguration']['LambdaFunctionConfigurations'] = [] # depends on [control=['if'], data=[]] details['LambdaFunctionArn'] = notification['arn'] kw_args['NotificationConfiguration']['LambdaFunctionConfigurations'].append(details) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['notification']] # depends on [control=['if'], data=[]] try: # TODO: response = self.connection.put_bucket_notification_configuration(**kw_args) self.iam.printer('.', flush=True) # depends on [control=['try'], data=[]] except: raise AWSConnectionError(title) # depends on [control=['except'], data=[]] processed_list.append('notification_settings') # depends on [control=['if'], data=[]] # TODO: replace region replication if change['path'][0] == 'region_replication' and 'region_replication' not in processed_list: if input_fields['region_replication']: pass # depends on [control=['if'], data=[]] else: pass processed_list.append('region_replication') # depends on [control=['if'], data=[]] # TODO: replace access policy if change['path'][0] == 'access_policy' and 'access_policy' not in processed_list: if input_fields['access_policy']: pass # depends on [control=['if'], data=[]] else: pass processed_list.append('access_policy') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['change']] # report and return change list self.iam.printer(' done.') return change_list
def generate_reset_password_token(self, user): """ Generates a unique reset password token for the specified user. :param user: The user to work with """ password_hash = self.hash_data(user.password) if user.password else None data = [str(user.id), password_hash] return self.security.reset_serializer.dumps(data)
def function[generate_reset_password_token, parameter[self, user]]: constant[ Generates a unique reset password token for the specified user. :param user: The user to work with ] variable[password_hash] assign[=] <ast.IfExp object at 0x7da20c6c7c10> variable[data] assign[=] list[[<ast.Call object at 0x7da20c6c7cd0>, <ast.Name object at 0x7da207f99810>]] return[call[name[self].security.reset_serializer.dumps, parameter[name[data]]]]
keyword[def] identifier[generate_reset_password_token] ( identifier[self] , identifier[user] ): literal[string] identifier[password_hash] = identifier[self] . identifier[hash_data] ( identifier[user] . identifier[password] ) keyword[if] identifier[user] . identifier[password] keyword[else] keyword[None] identifier[data] =[ identifier[str] ( identifier[user] . identifier[id] ), identifier[password_hash] ] keyword[return] identifier[self] . identifier[security] . identifier[reset_serializer] . identifier[dumps] ( identifier[data] )
def generate_reset_password_token(self, user): """ Generates a unique reset password token for the specified user. :param user: The user to work with """ password_hash = self.hash_data(user.password) if user.password else None data = [str(user.id), password_hash] return self.security.reset_serializer.dumps(data)
def __we_c(cls, calib, tc, temp, we_v): """ Compute weC from sensor temperature compensation of weV, aeV """ we_t = we_v - (calib.we_elc_mv / 1000.0) # remove electronic we zero we_c = tc.correct(calib, temp, we_t) # print("D4Datum__we_c: we_t:%f we_c:%s" % (we_t, we_c), file=sys.stderr) return we_c
def function[__we_c, parameter[cls, calib, tc, temp, we_v]]: constant[ Compute weC from sensor temperature compensation of weV, aeV ] variable[we_t] assign[=] binary_operation[name[we_v] - binary_operation[name[calib].we_elc_mv / constant[1000.0]]] variable[we_c] assign[=] call[name[tc].correct, parameter[name[calib], name[temp], name[we_t]]] return[name[we_c]]
keyword[def] identifier[__we_c] ( identifier[cls] , identifier[calib] , identifier[tc] , identifier[temp] , identifier[we_v] ): literal[string] identifier[we_t] = identifier[we_v] -( identifier[calib] . identifier[we_elc_mv] / literal[int] ) identifier[we_c] = identifier[tc] . identifier[correct] ( identifier[calib] , identifier[temp] , identifier[we_t] ) keyword[return] identifier[we_c]
def __we_c(cls, calib, tc, temp, we_v): """ Compute weC from sensor temperature compensation of weV, aeV """ we_t = we_v - calib.we_elc_mv / 1000.0 # remove electronic we zero we_c = tc.correct(calib, temp, we_t) # print("D4Datum__we_c: we_t:%f we_c:%s" % (we_t, we_c), file=sys.stderr) return we_c
def precision_recall_by_user(observed_user_items, recommendations, cutoffs=[10]): """ Compute precision and recall at a given cutoff for each user. In information retrieval terms, precision represents the ratio of relevant, retrieved items to the number of relevant items. Recall represents the ratio of relevant, retrieved items to the number of relevant items. Let :math:`p_k` be a vector of the first :math:`k` elements in the recommendations for a particular user, and let :math:`a` be the set of items in ``observed_user_items`` for that user. The "precision at cutoff k" for this user is defined as .. math:: P(k) = \\frac{ | a \cap p_k | }{k}, while "recall at cutoff k" is defined as .. math:: R(k) = \\frac{ | a \cap p_k | }{|a|} The order of the elements in the recommendations affects the returned precision and recall scores. Parameters ---------- observed_user_items : SFrame An SFrame containing observed user item pairs, where the first column contains user ids and the second column contains item ids. recommendations : SFrame An SFrame containing columns pertaining to the user id, the item id, the score given to that pair, and the rank of that item among the recommendations made for user id. For example, see the output of recommend() produced by any turicreate.recommender model. cutoffs : list[int], optional The cutoffs to use when computing precision and recall. Returns ------- out : SFrame An SFrame containing columns user id, cutoff, precision, recall, and count where the precision and recall are reported for each user at each requested cutoff, and count is the number of observations for that user id. Notes ----- The corner cases that involve empty lists were chosen to be consistent with the feasible set of precision-recall curves, which start at (precision, recall) = (1,0) and end at (0,1). However, we do not believe there is a well-known consensus on this choice. Examples -------- Given SFrames ``train_data`` and ``test_data`` with columns user_id and item_id: >>> from turicreate.toolkits.recommender.util import precision_recall_by_user >>> m = turicreate.recommender.create(train_data) >>> recs = m.recommend() >>> precision_recall_by_user(test_data, recs, cutoffs=[5, 10]) """ assert type(observed_user_items) == _SFrame assert type(recommendations) == _SFrame assert type(cutoffs) == list assert min(cutoffs) > 0, "All cutoffs must be positive integers." assert recommendations.num_columns() >= 2 user_id = recommendations.column_names()[0] item_id = recommendations.column_names()[1] assert observed_user_items.num_rows() > 0, \ "Evaluating precision and recall requires a non-empty " + \ "observed_user_items." assert user_id in observed_user_items.column_names(), \ "User column required in observed_user_items." assert item_id in observed_user_items.column_names(), \ "Item column required in observed_user_items." assert observed_user_items[user_id].dtype == \ recommendations[user_id].dtype, \ "The user column in the two provided SFrames must have the same type." assert observed_user_items[item_id].dtype == \ recommendations[item_id].dtype, \ "The user column in the two provided SFrames must have the same type." cutoffs = _array.array('f', cutoffs) opts = {'data': observed_user_items, 'recommendations': recommendations, 'cutoffs': cutoffs} response = _turicreate.toolkits._main.run('evaluation_precision_recall_by_user', opts) sf = _SFrame(None, _proxy=response['pr']) return sf.sort([user_id, 'cutoff'])
def function[precision_recall_by_user, parameter[observed_user_items, recommendations, cutoffs]]: constant[ Compute precision and recall at a given cutoff for each user. In information retrieval terms, precision represents the ratio of relevant, retrieved items to the number of relevant items. Recall represents the ratio of relevant, retrieved items to the number of relevant items. Let :math:`p_k` be a vector of the first :math:`k` elements in the recommendations for a particular user, and let :math:`a` be the set of items in ``observed_user_items`` for that user. The "precision at cutoff k" for this user is defined as .. math:: P(k) = \frac{ | a \cap p_k | }{k}, while "recall at cutoff k" is defined as .. math:: R(k) = \frac{ | a \cap p_k | }{|a|} The order of the elements in the recommendations affects the returned precision and recall scores. Parameters ---------- observed_user_items : SFrame An SFrame containing observed user item pairs, where the first column contains user ids and the second column contains item ids. recommendations : SFrame An SFrame containing columns pertaining to the user id, the item id, the score given to that pair, and the rank of that item among the recommendations made for user id. For example, see the output of recommend() produced by any turicreate.recommender model. cutoffs : list[int], optional The cutoffs to use when computing precision and recall. Returns ------- out : SFrame An SFrame containing columns user id, cutoff, precision, recall, and count where the precision and recall are reported for each user at each requested cutoff, and count is the number of observations for that user id. Notes ----- The corner cases that involve empty lists were chosen to be consistent with the feasible set of precision-recall curves, which start at (precision, recall) = (1,0) and end at (0,1). However, we do not believe there is a well-known consensus on this choice. Examples -------- Given SFrames ``train_data`` and ``test_data`` with columns user_id and item_id: >>> from turicreate.toolkits.recommender.util import precision_recall_by_user >>> m = turicreate.recommender.create(train_data) >>> recs = m.recommend() >>> precision_recall_by_user(test_data, recs, cutoffs=[5, 10]) ] assert[compare[call[name[type], parameter[name[observed_user_items]]] equal[==] name[_SFrame]]] assert[compare[call[name[type], parameter[name[recommendations]]] equal[==] name[_SFrame]]] assert[compare[call[name[type], parameter[name[cutoffs]]] equal[==] name[list]]] assert[compare[call[name[min], parameter[name[cutoffs]]] greater[>] constant[0]]] assert[compare[call[name[recommendations].num_columns, parameter[]] greater_or_equal[>=] constant[2]]] variable[user_id] assign[=] call[call[name[recommendations].column_names, parameter[]]][constant[0]] variable[item_id] assign[=] call[call[name[recommendations].column_names, parameter[]]][constant[1]] assert[compare[call[name[observed_user_items].num_rows, parameter[]] greater[>] constant[0]]] assert[compare[name[user_id] in call[name[observed_user_items].column_names, parameter[]]]] assert[compare[name[item_id] in call[name[observed_user_items].column_names, parameter[]]]] assert[compare[call[name[observed_user_items]][name[user_id]].dtype equal[==] call[name[recommendations]][name[user_id]].dtype]] assert[compare[call[name[observed_user_items]][name[item_id]].dtype equal[==] call[name[recommendations]][name[item_id]].dtype]] variable[cutoffs] assign[=] call[name[_array].array, parameter[constant[f], name[cutoffs]]] variable[opts] assign[=] dictionary[[<ast.Constant object at 0x7da1b1f77a60>, <ast.Constant object at 0x7da1b1f74f10>, <ast.Constant object at 0x7da1b1f77310>], [<ast.Name object at 0x7da1b1f75ae0>, <ast.Name object at 0x7da1b1f74400>, <ast.Name object at 0x7da1b1f77280>]] variable[response] assign[=] call[name[_turicreate].toolkits._main.run, parameter[constant[evaluation_precision_recall_by_user], name[opts]]] variable[sf] assign[=] call[name[_SFrame], parameter[constant[None]]] return[call[name[sf].sort, parameter[list[[<ast.Name object at 0x7da20c992c50>, <ast.Constant object at 0x7da20c991240>]]]]]
keyword[def] identifier[precision_recall_by_user] ( identifier[observed_user_items] , identifier[recommendations] , identifier[cutoffs] =[ literal[int] ]): literal[string] keyword[assert] identifier[type] ( identifier[observed_user_items] )== identifier[_SFrame] keyword[assert] identifier[type] ( identifier[recommendations] )== identifier[_SFrame] keyword[assert] identifier[type] ( identifier[cutoffs] )== identifier[list] keyword[assert] identifier[min] ( identifier[cutoffs] )> literal[int] , literal[string] keyword[assert] identifier[recommendations] . identifier[num_columns] ()>= literal[int] identifier[user_id] = identifier[recommendations] . identifier[column_names] ()[ literal[int] ] identifier[item_id] = identifier[recommendations] . identifier[column_names] ()[ literal[int] ] keyword[assert] identifier[observed_user_items] . identifier[num_rows] ()> literal[int] , literal[string] + literal[string] keyword[assert] identifier[user_id] keyword[in] identifier[observed_user_items] . identifier[column_names] (), literal[string] keyword[assert] identifier[item_id] keyword[in] identifier[observed_user_items] . identifier[column_names] (), literal[string] keyword[assert] identifier[observed_user_items] [ identifier[user_id] ]. identifier[dtype] == identifier[recommendations] [ identifier[user_id] ]. identifier[dtype] , literal[string] keyword[assert] identifier[observed_user_items] [ identifier[item_id] ]. identifier[dtype] == identifier[recommendations] [ identifier[item_id] ]. identifier[dtype] , literal[string] identifier[cutoffs] = identifier[_array] . identifier[array] ( literal[string] , identifier[cutoffs] ) identifier[opts] ={ literal[string] : identifier[observed_user_items] , literal[string] : identifier[recommendations] , literal[string] : identifier[cutoffs] } identifier[response] = identifier[_turicreate] . identifier[toolkits] . identifier[_main] . identifier[run] ( literal[string] , identifier[opts] ) identifier[sf] = identifier[_SFrame] ( keyword[None] , identifier[_proxy] = identifier[response] [ literal[string] ]) keyword[return] identifier[sf] . identifier[sort] ([ identifier[user_id] , literal[string] ])
def precision_recall_by_user(observed_user_items, recommendations, cutoffs=[10]): """ Compute precision and recall at a given cutoff for each user. In information retrieval terms, precision represents the ratio of relevant, retrieved items to the number of relevant items. Recall represents the ratio of relevant, retrieved items to the number of relevant items. Let :math:`p_k` be a vector of the first :math:`k` elements in the recommendations for a particular user, and let :math:`a` be the set of items in ``observed_user_items`` for that user. The "precision at cutoff k" for this user is defined as .. math:: P(k) = \\frac{ | a \\cap p_k | }{k}, while "recall at cutoff k" is defined as .. math:: R(k) = \\frac{ | a \\cap p_k | }{|a|} The order of the elements in the recommendations affects the returned precision and recall scores. Parameters ---------- observed_user_items : SFrame An SFrame containing observed user item pairs, where the first column contains user ids and the second column contains item ids. recommendations : SFrame An SFrame containing columns pertaining to the user id, the item id, the score given to that pair, and the rank of that item among the recommendations made for user id. For example, see the output of recommend() produced by any turicreate.recommender model. cutoffs : list[int], optional The cutoffs to use when computing precision and recall. Returns ------- out : SFrame An SFrame containing columns user id, cutoff, precision, recall, and count where the precision and recall are reported for each user at each requested cutoff, and count is the number of observations for that user id. Notes ----- The corner cases that involve empty lists were chosen to be consistent with the feasible set of precision-recall curves, which start at (precision, recall) = (1,0) and end at (0,1). However, we do not believe there is a well-known consensus on this choice. Examples -------- Given SFrames ``train_data`` and ``test_data`` with columns user_id and item_id: >>> from turicreate.toolkits.recommender.util import precision_recall_by_user >>> m = turicreate.recommender.create(train_data) >>> recs = m.recommend() >>> precision_recall_by_user(test_data, recs, cutoffs=[5, 10]) """ assert type(observed_user_items) == _SFrame assert type(recommendations) == _SFrame assert type(cutoffs) == list assert min(cutoffs) > 0, 'All cutoffs must be positive integers.' assert recommendations.num_columns() >= 2 user_id = recommendations.column_names()[0] item_id = recommendations.column_names()[1] assert observed_user_items.num_rows() > 0, 'Evaluating precision and recall requires a non-empty ' + 'observed_user_items.' assert user_id in observed_user_items.column_names(), 'User column required in observed_user_items.' assert item_id in observed_user_items.column_names(), 'Item column required in observed_user_items.' assert observed_user_items[user_id].dtype == recommendations[user_id].dtype, 'The user column in the two provided SFrames must have the same type.' assert observed_user_items[item_id].dtype == recommendations[item_id].dtype, 'The user column in the two provided SFrames must have the same type.' cutoffs = _array.array('f', cutoffs) opts = {'data': observed_user_items, 'recommendations': recommendations, 'cutoffs': cutoffs} response = _turicreate.toolkits._main.run('evaluation_precision_recall_by_user', opts) sf = _SFrame(None, _proxy=response['pr']) return sf.sort([user_id, 'cutoff'])
def construct_streamreader_callback(process, handler): """ here we're constructing a closure for our streamreader callback. this is used in the case that we pass a callback into _out or _err, meaning we want to our callback to handle each bit of output we construct the closure based on how many arguments it takes. the reason for this is to make it as easy as possible for people to use, without limiting them. a new user will assume the callback takes 1 argument (the data). as they get more advanced, they may want to terminate the process, or pass some stdin back, and will realize that they can pass a callback of more args """ # implied arg refers to the "self" that methods will pass in. we need to # account for this implied arg when figuring out what function the user # passed in based on number of args implied_arg = 0 partial_args = 0 handler_to_inspect = handler if isinstance(handler, partial): partial_args = len(handler.args) handler_to_inspect = handler.func if inspect.ismethod(handler_to_inspect): implied_arg = 1 num_args = get_num_args(handler_to_inspect) else: if inspect.isfunction(handler_to_inspect): num_args = get_num_args(handler_to_inspect) # is an object instance with __call__ method else: implied_arg = 1 num_args = get_num_args(handler_to_inspect.__call__) net_args = num_args - implied_arg - partial_args handler_args = () # just the chunk if net_args == 1: handler_args = () # chunk, stdin if net_args == 2: handler_args = (process.stdin,) # chunk, stdin, process elif net_args == 3: # notice we're only storing a weakref, to prevent cyclic references # (where the process holds a streamreader, and a streamreader holds a # handler-closure with a reference to the process handler_args = (process.stdin, weakref.ref(process)) def fn(chunk): # this is pretty ugly, but we're evaluating the process at call-time, # because it's a weakref args = handler_args if len(args) == 2: args = (handler_args[0], handler_args[1]()) return handler(chunk, *args) return fn
def function[construct_streamreader_callback, parameter[process, handler]]: constant[ here we're constructing a closure for our streamreader callback. this is used in the case that we pass a callback into _out or _err, meaning we want to our callback to handle each bit of output we construct the closure based on how many arguments it takes. the reason for this is to make it as easy as possible for people to use, without limiting them. a new user will assume the callback takes 1 argument (the data). as they get more advanced, they may want to terminate the process, or pass some stdin back, and will realize that they can pass a callback of more args ] variable[implied_arg] assign[=] constant[0] variable[partial_args] assign[=] constant[0] variable[handler_to_inspect] assign[=] name[handler] if call[name[isinstance], parameter[name[handler], name[partial]]] begin[:] variable[partial_args] assign[=] call[name[len], parameter[name[handler].args]] variable[handler_to_inspect] assign[=] name[handler].func if call[name[inspect].ismethod, parameter[name[handler_to_inspect]]] begin[:] variable[implied_arg] assign[=] constant[1] variable[num_args] assign[=] call[name[get_num_args], parameter[name[handler_to_inspect]]] variable[net_args] assign[=] binary_operation[binary_operation[name[num_args] - name[implied_arg]] - name[partial_args]] variable[handler_args] assign[=] tuple[[]] if compare[name[net_args] equal[==] constant[1]] begin[:] variable[handler_args] assign[=] tuple[[]] if compare[name[net_args] equal[==] constant[2]] begin[:] variable[handler_args] assign[=] tuple[[<ast.Attribute object at 0x7da18fe93400>]] def function[fn, parameter[chunk]]: variable[args] assign[=] name[handler_args] if compare[call[name[len], parameter[name[args]]] equal[==] constant[2]] begin[:] variable[args] assign[=] tuple[[<ast.Subscript object at 0x7da1b21c7520>, <ast.Call object at 0x7da1b21c7670>]] return[call[name[handler], parameter[name[chunk], <ast.Starred object at 0x7da1b21c7850>]]] return[name[fn]]
keyword[def] identifier[construct_streamreader_callback] ( identifier[process] , identifier[handler] ): literal[string] identifier[implied_arg] = literal[int] identifier[partial_args] = literal[int] identifier[handler_to_inspect] = identifier[handler] keyword[if] identifier[isinstance] ( identifier[handler] , identifier[partial] ): identifier[partial_args] = identifier[len] ( identifier[handler] . identifier[args] ) identifier[handler_to_inspect] = identifier[handler] . identifier[func] keyword[if] identifier[inspect] . identifier[ismethod] ( identifier[handler_to_inspect] ): identifier[implied_arg] = literal[int] identifier[num_args] = identifier[get_num_args] ( identifier[handler_to_inspect] ) keyword[else] : keyword[if] identifier[inspect] . identifier[isfunction] ( identifier[handler_to_inspect] ): identifier[num_args] = identifier[get_num_args] ( identifier[handler_to_inspect] ) keyword[else] : identifier[implied_arg] = literal[int] identifier[num_args] = identifier[get_num_args] ( identifier[handler_to_inspect] . identifier[__call__] ) identifier[net_args] = identifier[num_args] - identifier[implied_arg] - identifier[partial_args] identifier[handler_args] =() keyword[if] identifier[net_args] == literal[int] : identifier[handler_args] =() keyword[if] identifier[net_args] == literal[int] : identifier[handler_args] =( identifier[process] . identifier[stdin] ,) keyword[elif] identifier[net_args] == literal[int] : identifier[handler_args] =( identifier[process] . identifier[stdin] , identifier[weakref] . identifier[ref] ( identifier[process] )) keyword[def] identifier[fn] ( identifier[chunk] ): identifier[args] = identifier[handler_args] keyword[if] identifier[len] ( identifier[args] )== literal[int] : identifier[args] =( identifier[handler_args] [ literal[int] ], identifier[handler_args] [ literal[int] ]()) keyword[return] identifier[handler] ( identifier[chunk] ,* identifier[args] ) keyword[return] identifier[fn]
def construct_streamreader_callback(process, handler): """ here we're constructing a closure for our streamreader callback. this is used in the case that we pass a callback into _out or _err, meaning we want to our callback to handle each bit of output we construct the closure based on how many arguments it takes. the reason for this is to make it as easy as possible for people to use, without limiting them. a new user will assume the callback takes 1 argument (the data). as they get more advanced, they may want to terminate the process, or pass some stdin back, and will realize that they can pass a callback of more args """ # implied arg refers to the "self" that methods will pass in. we need to # account for this implied arg when figuring out what function the user # passed in based on number of args implied_arg = 0 partial_args = 0 handler_to_inspect = handler if isinstance(handler, partial): partial_args = len(handler.args) handler_to_inspect = handler.func # depends on [control=['if'], data=[]] if inspect.ismethod(handler_to_inspect): implied_arg = 1 num_args = get_num_args(handler_to_inspect) # depends on [control=['if'], data=[]] elif inspect.isfunction(handler_to_inspect): num_args = get_num_args(handler_to_inspect) # depends on [control=['if'], data=[]] else: # is an object instance with __call__ method implied_arg = 1 num_args = get_num_args(handler_to_inspect.__call__) net_args = num_args - implied_arg - partial_args handler_args = () # just the chunk if net_args == 1: handler_args = () # depends on [control=['if'], data=[]] # chunk, stdin if net_args == 2: handler_args = (process.stdin,) # depends on [control=['if'], data=[]] # chunk, stdin, process elif net_args == 3: # notice we're only storing a weakref, to prevent cyclic references # (where the process holds a streamreader, and a streamreader holds a # handler-closure with a reference to the process handler_args = (process.stdin, weakref.ref(process)) # depends on [control=['if'], data=[]] def fn(chunk): # this is pretty ugly, but we're evaluating the process at call-time, # because it's a weakref args = handler_args if len(args) == 2: args = (handler_args[0], handler_args[1]()) # depends on [control=['if'], data=[]] return handler(chunk, *args) return fn
def search(self, term, type='place', page=False, retry=3, **options): """ Search for an item in the Graph API. :param term: A string describing the search term. :param type: A string describing the type of items to search for. :param page: A boolean describing whether to return a generator that iterates over each page of results. :param retry: An integer describing how many times the request may be retried. :param options: Graph API parameters, such as 'center' and 'distance'. Supported types are only ``place`` since Graph API 2.0. See `Facebook's Graph API documentation <http://developers.facebook.com/docs/reference/api/>`_ for an exhaustive list of options. """ if type != 'place': raise ValueError('Unsupported type "%s". The only supported type is "place" since Graph API 2.0.' % type) options = dict({ 'q': term, 'type': type, }, **options) response = self._query('GET', 'search', options, page, retry) return response
def function[search, parameter[self, term, type, page, retry]]: constant[ Search for an item in the Graph API. :param term: A string describing the search term. :param type: A string describing the type of items to search for. :param page: A boolean describing whether to return a generator that iterates over each page of results. :param retry: An integer describing how many times the request may be retried. :param options: Graph API parameters, such as 'center' and 'distance'. Supported types are only ``place`` since Graph API 2.0. See `Facebook's Graph API documentation <http://developers.facebook.com/docs/reference/api/>`_ for an exhaustive list of options. ] if compare[name[type] not_equal[!=] constant[place]] begin[:] <ast.Raise object at 0x7da20c76de70> variable[options] assign[=] call[name[dict], parameter[dictionary[[<ast.Constant object at 0x7da20c76f640>, <ast.Constant object at 0x7da20c76d3f0>], [<ast.Name object at 0x7da20c76dba0>, <ast.Name object at 0x7da20c76e110>]]]] variable[response] assign[=] call[name[self]._query, parameter[constant[GET], constant[search], name[options], name[page], name[retry]]] return[name[response]]
keyword[def] identifier[search] ( identifier[self] , identifier[term] , identifier[type] = literal[string] , identifier[page] = keyword[False] , identifier[retry] = literal[int] ,** identifier[options] ): literal[string] keyword[if] identifier[type] != literal[string] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[type] ) identifier[options] = identifier[dict] ({ literal[string] : identifier[term] , literal[string] : identifier[type] , },** identifier[options] ) identifier[response] = identifier[self] . identifier[_query] ( literal[string] , literal[string] , identifier[options] , identifier[page] , identifier[retry] ) keyword[return] identifier[response]
def search(self, term, type='place', page=False, retry=3, **options): """ Search for an item in the Graph API. :param term: A string describing the search term. :param type: A string describing the type of items to search for. :param page: A boolean describing whether to return a generator that iterates over each page of results. :param retry: An integer describing how many times the request may be retried. :param options: Graph API parameters, such as 'center' and 'distance'. Supported types are only ``place`` since Graph API 2.0. See `Facebook's Graph API documentation <http://developers.facebook.com/docs/reference/api/>`_ for an exhaustive list of options. """ if type != 'place': raise ValueError('Unsupported type "%s". The only supported type is "place" since Graph API 2.0.' % type) # depends on [control=['if'], data=['type']] options = dict({'q': term, 'type': type}, **options) response = self._query('GET', 'search', options, page, retry) return response
def slot_remove_nio_binding(self, slot_number, port_number): """ Removes a slot NIO binding. :param slot_number: slot number :param port_number: port number :returns: removed NIO instance """ try: adapter = self._slots[slot_number] except IndexError: raise DynamipsError('Slot {slot_number} does not exist on router "{name}"'.format(name=self._name, slot_number=slot_number)) if adapter is None: raise DynamipsError("Adapter is missing in slot {slot_number}".format(slot_number=slot_number)) if not adapter.port_exists(port_number): raise DynamipsError("Port {port_number} does not exist in adapter {adapter}".format(adapter=adapter, port_number=port_number)) yield from self.slot_disable_nio(slot_number, port_number) yield from self._hypervisor.send('vm slot_remove_nio_binding "{name}" {slot_number} {port_number}'.format(name=self._name, slot_number=slot_number, port_number=port_number)) nio = adapter.get_nio(port_number) if nio is None: return yield from nio.close() adapter.remove_nio(port_number) log.info('Router "{name}" [{id}]: NIO {nio_name} removed from port {slot_number}/{port_number}'.format(name=self._name, id=self._id, nio_name=nio.name, slot_number=slot_number, port_number=port_number)) return nio
def function[slot_remove_nio_binding, parameter[self, slot_number, port_number]]: constant[ Removes a slot NIO binding. :param slot_number: slot number :param port_number: port number :returns: removed NIO instance ] <ast.Try object at 0x7da20e74be20> if compare[name[adapter] is constant[None]] begin[:] <ast.Raise object at 0x7da204564c10> if <ast.UnaryOp object at 0x7da204567250> begin[:] <ast.Raise object at 0x7da20e957550> <ast.YieldFrom object at 0x7da20e957c70> <ast.YieldFrom object at 0x7da20e957010> variable[nio] assign[=] call[name[adapter].get_nio, parameter[name[port_number]]] if compare[name[nio] is constant[None]] begin[:] return[None] <ast.YieldFrom object at 0x7da20e955960> call[name[adapter].remove_nio, parameter[name[port_number]]] call[name[log].info, parameter[call[constant[Router "{name}" [{id}]: NIO {nio_name} removed from port {slot_number}/{port_number}].format, parameter[]]]] return[name[nio]]
keyword[def] identifier[slot_remove_nio_binding] ( identifier[self] , identifier[slot_number] , identifier[port_number] ): literal[string] keyword[try] : identifier[adapter] = identifier[self] . identifier[_slots] [ identifier[slot_number] ] keyword[except] identifier[IndexError] : keyword[raise] identifier[DynamipsError] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[_name] , identifier[slot_number] = identifier[slot_number] )) keyword[if] identifier[adapter] keyword[is] keyword[None] : keyword[raise] identifier[DynamipsError] ( literal[string] . identifier[format] ( identifier[slot_number] = identifier[slot_number] )) keyword[if] keyword[not] identifier[adapter] . identifier[port_exists] ( identifier[port_number] ): keyword[raise] identifier[DynamipsError] ( literal[string] . identifier[format] ( identifier[adapter] = identifier[adapter] , identifier[port_number] = identifier[port_number] )) keyword[yield] keyword[from] identifier[self] . identifier[slot_disable_nio] ( identifier[slot_number] , identifier[port_number] ) keyword[yield] keyword[from] identifier[self] . identifier[_hypervisor] . identifier[send] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[_name] , identifier[slot_number] = identifier[slot_number] , identifier[port_number] = identifier[port_number] )) identifier[nio] = identifier[adapter] . identifier[get_nio] ( identifier[port_number] ) keyword[if] identifier[nio] keyword[is] keyword[None] : keyword[return] keyword[yield] keyword[from] identifier[nio] . identifier[close] () identifier[adapter] . identifier[remove_nio] ( identifier[port_number] ) identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[_name] , identifier[id] = identifier[self] . identifier[_id] , identifier[nio_name] = identifier[nio] . identifier[name] , identifier[slot_number] = identifier[slot_number] , identifier[port_number] = identifier[port_number] )) keyword[return] identifier[nio]
def slot_remove_nio_binding(self, slot_number, port_number): """ Removes a slot NIO binding. :param slot_number: slot number :param port_number: port number :returns: removed NIO instance """ try: adapter = self._slots[slot_number] # depends on [control=['try'], data=[]] except IndexError: raise DynamipsError('Slot {slot_number} does not exist on router "{name}"'.format(name=self._name, slot_number=slot_number)) # depends on [control=['except'], data=[]] if adapter is None: raise DynamipsError('Adapter is missing in slot {slot_number}'.format(slot_number=slot_number)) # depends on [control=['if'], data=[]] if not adapter.port_exists(port_number): raise DynamipsError('Port {port_number} does not exist in adapter {adapter}'.format(adapter=adapter, port_number=port_number)) # depends on [control=['if'], data=[]] yield from self.slot_disable_nio(slot_number, port_number) yield from self._hypervisor.send('vm slot_remove_nio_binding "{name}" {slot_number} {port_number}'.format(name=self._name, slot_number=slot_number, port_number=port_number)) nio = adapter.get_nio(port_number) if nio is None: return # depends on [control=['if'], data=[]] yield from nio.close() adapter.remove_nio(port_number) log.info('Router "{name}" [{id}]: NIO {nio_name} removed from port {slot_number}/{port_number}'.format(name=self._name, id=self._id, nio_name=nio.name, slot_number=slot_number, port_number=port_number)) return nio
def write(self, page, data): """Send a WRITE command to store data on the tag. The *page* argument specifies the offset in multiples of 4 bytes. The *data* argument must be a string or bytearray of length 4. Command execution errors raise :exc:`Type2TagCommandError`. """ if len(data) != 4: raise ValueError("data must be a four byte string or array") log.debug("write {0} to page {1}".format(hexlify(data), page)) rsp = self.transceive("\xA2" + chr(page % 256) + data) if len(rsp) != 1: log.debug("invalid response " + hexlify(data)) raise Type2TagCommandError(INVALID_RESPONSE_ERROR) if rsp[0] != 0x0A: # NAK log.debug("invalid page, received nak") raise Type2TagCommandError(INVALID_PAGE_ERROR) return True
def function[write, parameter[self, page, data]]: constant[Send a WRITE command to store data on the tag. The *page* argument specifies the offset in multiples of 4 bytes. The *data* argument must be a string or bytearray of length 4. Command execution errors raise :exc:`Type2TagCommandError`. ] if compare[call[name[len], parameter[name[data]]] not_equal[!=] constant[4]] begin[:] <ast.Raise object at 0x7da1b1897880> call[name[log].debug, parameter[call[constant[write {0} to page {1}].format, parameter[call[name[hexlify], parameter[name[data]]], name[page]]]]] variable[rsp] assign[=] call[name[self].transceive, parameter[binary_operation[binary_operation[constant[¢] + call[name[chr], parameter[binary_operation[name[page] <ast.Mod object at 0x7da2590d6920> constant[256]]]]] + name[data]]]] if compare[call[name[len], parameter[name[rsp]]] not_equal[!=] constant[1]] begin[:] call[name[log].debug, parameter[binary_operation[constant[invalid response ] + call[name[hexlify], parameter[name[data]]]]]] <ast.Raise object at 0x7da18fe91ae0> if compare[call[name[rsp]][constant[0]] not_equal[!=] constant[10]] begin[:] call[name[log].debug, parameter[constant[invalid page, received nak]]] <ast.Raise object at 0x7da18fe93070> return[constant[True]]
keyword[def] identifier[write] ( identifier[self] , identifier[page] , identifier[data] ): literal[string] keyword[if] identifier[len] ( identifier[data] )!= literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[hexlify] ( identifier[data] ), identifier[page] )) identifier[rsp] = identifier[self] . identifier[transceive] ( literal[string] + identifier[chr] ( identifier[page] % literal[int] )+ identifier[data] ) keyword[if] identifier[len] ( identifier[rsp] )!= literal[int] : identifier[log] . identifier[debug] ( literal[string] + identifier[hexlify] ( identifier[data] )) keyword[raise] identifier[Type2TagCommandError] ( identifier[INVALID_RESPONSE_ERROR] ) keyword[if] identifier[rsp] [ literal[int] ]!= literal[int] : identifier[log] . identifier[debug] ( literal[string] ) keyword[raise] identifier[Type2TagCommandError] ( identifier[INVALID_PAGE_ERROR] ) keyword[return] keyword[True]
def write(self, page, data): """Send a WRITE command to store data on the tag. The *page* argument specifies the offset in multiples of 4 bytes. The *data* argument must be a string or bytearray of length 4. Command execution errors raise :exc:`Type2TagCommandError`. """ if len(data) != 4: raise ValueError('data must be a four byte string or array') # depends on [control=['if'], data=[]] log.debug('write {0} to page {1}'.format(hexlify(data), page)) rsp = self.transceive('¢' + chr(page % 256) + data) if len(rsp) != 1: log.debug('invalid response ' + hexlify(data)) raise Type2TagCommandError(INVALID_RESPONSE_ERROR) # depends on [control=['if'], data=[]] if rsp[0] != 10: # NAK log.debug('invalid page, received nak') raise Type2TagCommandError(INVALID_PAGE_ERROR) # depends on [control=['if'], data=[]] return True
def _file_op(source, destination, func, adapter, fatal, logger, must_exist=True, ignore=None): """Call func(source, destination) Args: source (str | None): Source file or folder destination (str | None): Destination file or folder func (callable): Implementation function adapter (callable | None): Optional function to call on 'source' before copy fatal (bool | None): Abort execution on failure if True logger (callable | None): Logger to use must_exist (bool): If True, verify that source does indeed exist ignore (callable | list | str | None): Names to be ignored Returns: (int): 1 if effectively done, 0 if no-op, -1 on failure """ if not source or not destination or source == destination: return 0 action = func.__name__[1:] indicator = "<-" if action == "symlink" else "->" psource = parent_folder(source) pdest = resolved_path(destination) if psource != pdest and psource.startswith(pdest): return abort( "Can't %s %s %s %s: source contained in destination", action, short(source), indicator, short(destination), fatal=(fatal, -1) ) if is_dryrun(): LOG.debug("Would %s %s %s %s", action, short(source), indicator, short(destination)) return 1 if must_exist and not os.path.exists(source): return abort("%s does not exist, can't %s to %s", short(source), action.title(), short(destination), fatal=(fatal, -1)) try: # Delete destination, but ensure that its parent folder exists delete(destination, fatal=fatal, logger=None) ensure_folder(destination, fatal=fatal, logger=None) if logger: note = adapter(source, destination, fatal=fatal, logger=logger) if adapter else "" if logger: logger("%s %s %s %s%s", action.title(), short(source), indicator, short(destination), note) if ignore is not None: if callable(ignore): func(source, destination, ignore=ignore) else: func(source, destination, ignore=lambda *_: ignore) else: func(source, destination) return 1 except Exception as e: return abort("Can't %s %s %s %s: %s", action, short(source), indicator, short(destination), e, fatal=(fatal, -1))
def function[_file_op, parameter[source, destination, func, adapter, fatal, logger, must_exist, ignore]]: constant[Call func(source, destination) Args: source (str | None): Source file or folder destination (str | None): Destination file or folder func (callable): Implementation function adapter (callable | None): Optional function to call on 'source' before copy fatal (bool | None): Abort execution on failure if True logger (callable | None): Logger to use must_exist (bool): If True, verify that source does indeed exist ignore (callable | list | str | None): Names to be ignored Returns: (int): 1 if effectively done, 0 if no-op, -1 on failure ] if <ast.BoolOp object at 0x7da1b24aee90> begin[:] return[constant[0]] variable[action] assign[=] call[name[func].__name__][<ast.Slice object at 0x7da1b24ae7d0>] variable[indicator] assign[=] <ast.IfExp object at 0x7da1b24ae560> variable[psource] assign[=] call[name[parent_folder], parameter[name[source]]] variable[pdest] assign[=] call[name[resolved_path], parameter[name[destination]]] if <ast.BoolOp object at 0x7da1b24af250> begin[:] return[call[name[abort], parameter[constant[Can't %s %s %s %s: source contained in destination], name[action], call[name[short], parameter[name[source]]], name[indicator], call[name[short], parameter[name[destination]]]]]] if call[name[is_dryrun], parameter[]] begin[:] call[name[LOG].debug, parameter[constant[Would %s %s %s %s], name[action], call[name[short], parameter[name[source]]], name[indicator], call[name[short], parameter[name[destination]]]]] return[constant[1]] if <ast.BoolOp object at 0x7da1b24af6d0> begin[:] return[call[name[abort], parameter[constant[%s does not exist, can't %s to %s], call[name[short], parameter[name[source]]], call[name[action].title, parameter[]], call[name[short], parameter[name[destination]]]]]] <ast.Try object at 0x7da1b24acd60>
keyword[def] identifier[_file_op] ( identifier[source] , identifier[destination] , identifier[func] , identifier[adapter] , identifier[fatal] , identifier[logger] , identifier[must_exist] = keyword[True] , identifier[ignore] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[source] keyword[or] keyword[not] identifier[destination] keyword[or] identifier[source] == identifier[destination] : keyword[return] literal[int] identifier[action] = identifier[func] . identifier[__name__] [ literal[int] :] identifier[indicator] = literal[string] keyword[if] identifier[action] == literal[string] keyword[else] literal[string] identifier[psource] = identifier[parent_folder] ( identifier[source] ) identifier[pdest] = identifier[resolved_path] ( identifier[destination] ) keyword[if] identifier[psource] != identifier[pdest] keyword[and] identifier[psource] . identifier[startswith] ( identifier[pdest] ): keyword[return] identifier[abort] ( literal[string] , identifier[action] , identifier[short] ( identifier[source] ), identifier[indicator] , identifier[short] ( identifier[destination] ), identifier[fatal] =( identifier[fatal] ,- literal[int] ) ) keyword[if] identifier[is_dryrun] (): identifier[LOG] . identifier[debug] ( literal[string] , identifier[action] , identifier[short] ( identifier[source] ), identifier[indicator] , identifier[short] ( identifier[destination] )) keyword[return] literal[int] keyword[if] identifier[must_exist] keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[source] ): keyword[return] identifier[abort] ( literal[string] , identifier[short] ( identifier[source] ), identifier[action] . identifier[title] (), identifier[short] ( identifier[destination] ), identifier[fatal] =( identifier[fatal] ,- literal[int] )) keyword[try] : identifier[delete] ( identifier[destination] , identifier[fatal] = identifier[fatal] , identifier[logger] = keyword[None] ) identifier[ensure_folder] ( identifier[destination] , identifier[fatal] = identifier[fatal] , identifier[logger] = keyword[None] ) keyword[if] identifier[logger] : identifier[note] = identifier[adapter] ( identifier[source] , identifier[destination] , identifier[fatal] = identifier[fatal] , identifier[logger] = identifier[logger] ) keyword[if] identifier[adapter] keyword[else] literal[string] keyword[if] identifier[logger] : identifier[logger] ( literal[string] , identifier[action] . identifier[title] (), identifier[short] ( identifier[source] ), identifier[indicator] , identifier[short] ( identifier[destination] ), identifier[note] ) keyword[if] identifier[ignore] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[callable] ( identifier[ignore] ): identifier[func] ( identifier[source] , identifier[destination] , identifier[ignore] = identifier[ignore] ) keyword[else] : identifier[func] ( identifier[source] , identifier[destination] , identifier[ignore] = keyword[lambda] * identifier[_] : identifier[ignore] ) keyword[else] : identifier[func] ( identifier[source] , identifier[destination] ) keyword[return] literal[int] keyword[except] identifier[Exception] keyword[as] identifier[e] : keyword[return] identifier[abort] ( literal[string] , identifier[action] , identifier[short] ( identifier[source] ), identifier[indicator] , identifier[short] ( identifier[destination] ), identifier[e] , identifier[fatal] =( identifier[fatal] ,- literal[int] ))
def _file_op(source, destination, func, adapter, fatal, logger, must_exist=True, ignore=None): """Call func(source, destination) Args: source (str | None): Source file or folder destination (str | None): Destination file or folder func (callable): Implementation function adapter (callable | None): Optional function to call on 'source' before copy fatal (bool | None): Abort execution on failure if True logger (callable | None): Logger to use must_exist (bool): If True, verify that source does indeed exist ignore (callable | list | str | None): Names to be ignored Returns: (int): 1 if effectively done, 0 if no-op, -1 on failure """ if not source or not destination or source == destination: return 0 # depends on [control=['if'], data=[]] action = func.__name__[1:] indicator = '<-' if action == 'symlink' else '->' psource = parent_folder(source) pdest = resolved_path(destination) if psource != pdest and psource.startswith(pdest): return abort("Can't %s %s %s %s: source contained in destination", action, short(source), indicator, short(destination), fatal=(fatal, -1)) # depends on [control=['if'], data=[]] if is_dryrun(): LOG.debug('Would %s %s %s %s', action, short(source), indicator, short(destination)) return 1 # depends on [control=['if'], data=[]] if must_exist and (not os.path.exists(source)): return abort("%s does not exist, can't %s to %s", short(source), action.title(), short(destination), fatal=(fatal, -1)) # depends on [control=['if'], data=[]] try: # Delete destination, but ensure that its parent folder exists delete(destination, fatal=fatal, logger=None) ensure_folder(destination, fatal=fatal, logger=None) if logger: note = adapter(source, destination, fatal=fatal, logger=logger) if adapter else '' if logger: logger('%s %s %s %s%s', action.title(), short(source), indicator, short(destination), note) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if ignore is not None: if callable(ignore): func(source, destination, ignore=ignore) # depends on [control=['if'], data=[]] else: func(source, destination, ignore=lambda *_: ignore) # depends on [control=['if'], data=['ignore']] else: func(source, destination) return 1 # depends on [control=['try'], data=[]] except Exception as e: return abort("Can't %s %s %s %s: %s", action, short(source), indicator, short(destination), e, fatal=(fatal, -1)) # depends on [control=['except'], data=['e']]
def coordinates(value): """ Convert a non-empty string into a list of lon-lat coordinates. >>> coordinates('') Traceback (most recent call last): ... ValueError: Empty list of coordinates: '' >>> coordinates('1.1 1.2') [(1.1, 1.2, 0.0)] >>> coordinates('1.1 1.2, 2.2 2.3') [(1.1, 1.2, 0.0), (2.2, 2.3, 0.0)] >>> coordinates('1.1 1.2 -0.4, 2.2 2.3 -0.5') [(1.1, 1.2, -0.4), (2.2, 2.3, -0.5)] >>> coordinates('0 0 0, 0 0 -1') Traceback (most recent call last): ... ValueError: Found overlapping site #2, 0 0 -1 """ if not value.strip(): raise ValueError('Empty list of coordinates: %r' % value) points = [] pointset = set() for i, line in enumerate(value.split(','), 1): pnt = point(line) if pnt[:2] in pointset: raise ValueError("Found overlapping site #%d, %s" % (i, line)) pointset.add(pnt[:2]) points.append(pnt) return points
def function[coordinates, parameter[value]]: constant[ Convert a non-empty string into a list of lon-lat coordinates. >>> coordinates('') Traceback (most recent call last): ... ValueError: Empty list of coordinates: '' >>> coordinates('1.1 1.2') [(1.1, 1.2, 0.0)] >>> coordinates('1.1 1.2, 2.2 2.3') [(1.1, 1.2, 0.0), (2.2, 2.3, 0.0)] >>> coordinates('1.1 1.2 -0.4, 2.2 2.3 -0.5') [(1.1, 1.2, -0.4), (2.2, 2.3, -0.5)] >>> coordinates('0 0 0, 0 0 -1') Traceback (most recent call last): ... ValueError: Found overlapping site #2, 0 0 -1 ] if <ast.UnaryOp object at 0x7da18f812710> begin[:] <ast.Raise object at 0x7da18f8137c0> variable[points] assign[=] list[[]] variable[pointset] assign[=] call[name[set], parameter[]] for taget[tuple[[<ast.Name object at 0x7da18f812950>, <ast.Name object at 0x7da18f8132b0>]]] in starred[call[name[enumerate], parameter[call[name[value].split, parameter[constant[,]]], constant[1]]]] begin[:] variable[pnt] assign[=] call[name[point], parameter[name[line]]] if compare[call[name[pnt]][<ast.Slice object at 0x7da18f813d60>] in name[pointset]] begin[:] <ast.Raise object at 0x7da18f812f80> call[name[pointset].add, parameter[call[name[pnt]][<ast.Slice object at 0x7da207f9b3a0>]]] call[name[points].append, parameter[name[pnt]]] return[name[points]]
keyword[def] identifier[coordinates] ( identifier[value] ): literal[string] keyword[if] keyword[not] identifier[value] . identifier[strip] (): keyword[raise] identifier[ValueError] ( literal[string] % identifier[value] ) identifier[points] =[] identifier[pointset] = identifier[set] () keyword[for] identifier[i] , identifier[line] keyword[in] identifier[enumerate] ( identifier[value] . identifier[split] ( literal[string] ), literal[int] ): identifier[pnt] = identifier[point] ( identifier[line] ) keyword[if] identifier[pnt] [: literal[int] ] keyword[in] identifier[pointset] : keyword[raise] identifier[ValueError] ( literal[string] %( identifier[i] , identifier[line] )) identifier[pointset] . identifier[add] ( identifier[pnt] [: literal[int] ]) identifier[points] . identifier[append] ( identifier[pnt] ) keyword[return] identifier[points]
def coordinates(value): """ Convert a non-empty string into a list of lon-lat coordinates. >>> coordinates('') Traceback (most recent call last): ... ValueError: Empty list of coordinates: '' >>> coordinates('1.1 1.2') [(1.1, 1.2, 0.0)] >>> coordinates('1.1 1.2, 2.2 2.3') [(1.1, 1.2, 0.0), (2.2, 2.3, 0.0)] >>> coordinates('1.1 1.2 -0.4, 2.2 2.3 -0.5') [(1.1, 1.2, -0.4), (2.2, 2.3, -0.5)] >>> coordinates('0 0 0, 0 0 -1') Traceback (most recent call last): ... ValueError: Found overlapping site #2, 0 0 -1 """ if not value.strip(): raise ValueError('Empty list of coordinates: %r' % value) # depends on [control=['if'], data=[]] points = [] pointset = set() for (i, line) in enumerate(value.split(','), 1): pnt = point(line) if pnt[:2] in pointset: raise ValueError('Found overlapping site #%d, %s' % (i, line)) # depends on [control=['if'], data=[]] pointset.add(pnt[:2]) points.append(pnt) # depends on [control=['for'], data=[]] return points
def find_root_path(absolute_path, relative_path): """ Return the root path of a path relative to an absolute path. Example: @param absolute_path: an absolute path that is ended by the specified relative path. @param relative_path: a relative path that ends the specified absolute path. @return: the root path of the relative path. """ _absolute_path = os.path.normpath(absolute_path) _relative_path = os.path.normpath(relative_path) index = _absolute_path.rfind(_relative_path) if index == -1 or len(_relative_path) + index < len(_absolute_path): raise ValueError('The relative path does not end the specified absolute path') return _absolute_path[:index]
def function[find_root_path, parameter[absolute_path, relative_path]]: constant[ Return the root path of a path relative to an absolute path. Example: @param absolute_path: an absolute path that is ended by the specified relative path. @param relative_path: a relative path that ends the specified absolute path. @return: the root path of the relative path. ] variable[_absolute_path] assign[=] call[name[os].path.normpath, parameter[name[absolute_path]]] variable[_relative_path] assign[=] call[name[os].path.normpath, parameter[name[relative_path]]] variable[index] assign[=] call[name[_absolute_path].rfind, parameter[name[_relative_path]]] if <ast.BoolOp object at 0x7da20c6aa290> begin[:] <ast.Raise object at 0x7da20c6a8670> return[call[name[_absolute_path]][<ast.Slice object at 0x7da20c6a9420>]]
keyword[def] identifier[find_root_path] ( identifier[absolute_path] , identifier[relative_path] ): literal[string] identifier[_absolute_path] = identifier[os] . identifier[path] . identifier[normpath] ( identifier[absolute_path] ) identifier[_relative_path] = identifier[os] . identifier[path] . identifier[normpath] ( identifier[relative_path] ) identifier[index] = identifier[_absolute_path] . identifier[rfind] ( identifier[_relative_path] ) keyword[if] identifier[index] ==- literal[int] keyword[or] identifier[len] ( identifier[_relative_path] )+ identifier[index] < identifier[len] ( identifier[_absolute_path] ): keyword[raise] identifier[ValueError] ( literal[string] ) keyword[return] identifier[_absolute_path] [: identifier[index] ]
def find_root_path(absolute_path, relative_path): """ Return the root path of a path relative to an absolute path. Example: @param absolute_path: an absolute path that is ended by the specified relative path. @param relative_path: a relative path that ends the specified absolute path. @return: the root path of the relative path. """ _absolute_path = os.path.normpath(absolute_path) _relative_path = os.path.normpath(relative_path) index = _absolute_path.rfind(_relative_path) if index == -1 or len(_relative_path) + index < len(_absolute_path): raise ValueError('The relative path does not end the specified absolute path') # depends on [control=['if'], data=[]] return _absolute_path[:index]
def set_wts_get_npred_wt(gta, maskname): """Set a weights file and get the weighted npred for all the sources Parameters ---------- gta : `fermipy.GTAnalysis` The analysis object maskname : str The path to the file with the mask Returns ------- odict : dict Dictionary mapping from source name to weighted npred """ if is_null(maskname): maskname = None gta.set_weights_map(maskname) for name in gta.like.sourceNames(): gta._init_source(name) gta._update_roi() return build_srcdict(gta, 'npred_wt')
def function[set_wts_get_npred_wt, parameter[gta, maskname]]: constant[Set a weights file and get the weighted npred for all the sources Parameters ---------- gta : `fermipy.GTAnalysis` The analysis object maskname : str The path to the file with the mask Returns ------- odict : dict Dictionary mapping from source name to weighted npred ] if call[name[is_null], parameter[name[maskname]]] begin[:] variable[maskname] assign[=] constant[None] call[name[gta].set_weights_map, parameter[name[maskname]]] for taget[name[name]] in starred[call[name[gta].like.sourceNames, parameter[]]] begin[:] call[name[gta]._init_source, parameter[name[name]]] call[name[gta]._update_roi, parameter[]] return[call[name[build_srcdict], parameter[name[gta], constant[npred_wt]]]]
keyword[def] identifier[set_wts_get_npred_wt] ( identifier[gta] , identifier[maskname] ): literal[string] keyword[if] identifier[is_null] ( identifier[maskname] ): identifier[maskname] = keyword[None] identifier[gta] . identifier[set_weights_map] ( identifier[maskname] ) keyword[for] identifier[name] keyword[in] identifier[gta] . identifier[like] . identifier[sourceNames] (): identifier[gta] . identifier[_init_source] ( identifier[name] ) identifier[gta] . identifier[_update_roi] () keyword[return] identifier[build_srcdict] ( identifier[gta] , literal[string] )
def set_wts_get_npred_wt(gta, maskname): """Set a weights file and get the weighted npred for all the sources Parameters ---------- gta : `fermipy.GTAnalysis` The analysis object maskname : str The path to the file with the mask Returns ------- odict : dict Dictionary mapping from source name to weighted npred """ if is_null(maskname): maskname = None # depends on [control=['if'], data=[]] gta.set_weights_map(maskname) for name in gta.like.sourceNames(): gta._init_source(name) # depends on [control=['for'], data=['name']] gta._update_roi() return build_srcdict(gta, 'npred_wt')
def rmsd(df1, df2, heavy_only=True): """Compute the Root Mean Square Deviation between molecules Parameters ---------- df1 : pandas.DataFrame DataFrame with HETATM, ATOM, and/or ANISOU entries df2 : pandas.DataFrame Second DataFrame for RMSD computation against df1. Must have the same number of entries as df1 heavy_only : bool (default: True) Which atoms to compare to compute the RMSD. If `True` (default), computes the RMSD between non-hydrogen atoms only. Returns --------- rmsd : float Root Mean Square Deviation between df1 and df2 """ if df1.shape[0] != df2.shape[0]: raise AttributeError('DataFrames have unequal lengths') if heavy_only: d1 = df1[df1['atom_type'] != 'H'] d2 = df2[df2['atom_type'] != 'H'] else: d1, d2 = df1, df2 total = ((d1['x'].values - d2['x'].values)**2 + (d1['y'].values - d2['y'].values)**2 + (d1['z'].values - d2['z'].values)**2) rmsd = round((total.sum() / df1.shape[0])**0.5, 4) return rmsd
def function[rmsd, parameter[df1, df2, heavy_only]]: constant[Compute the Root Mean Square Deviation between molecules Parameters ---------- df1 : pandas.DataFrame DataFrame with HETATM, ATOM, and/or ANISOU entries df2 : pandas.DataFrame Second DataFrame for RMSD computation against df1. Must have the same number of entries as df1 heavy_only : bool (default: True) Which atoms to compare to compute the RMSD. If `True` (default), computes the RMSD between non-hydrogen atoms only. Returns --------- rmsd : float Root Mean Square Deviation between df1 and df2 ] if compare[call[name[df1].shape][constant[0]] not_equal[!=] call[name[df2].shape][constant[0]]] begin[:] <ast.Raise object at 0x7da1b0e2c0d0> if name[heavy_only] begin[:] variable[d1] assign[=] call[name[df1]][compare[call[name[df1]][constant[atom_type]] not_equal[!=] constant[H]]] variable[d2] assign[=] call[name[df2]][compare[call[name[df2]][constant[atom_type]] not_equal[!=] constant[H]]] variable[total] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[call[name[d1]][constant[x]].values - call[name[d2]][constant[x]].values] ** constant[2]] + binary_operation[binary_operation[call[name[d1]][constant[y]].values - call[name[d2]][constant[y]].values] ** constant[2]]] + binary_operation[binary_operation[call[name[d1]][constant[z]].values - call[name[d2]][constant[z]].values] ** constant[2]]] variable[rmsd] assign[=] call[name[round], parameter[binary_operation[binary_operation[call[name[total].sum, parameter[]] / call[name[df1].shape][constant[0]]] ** constant[0.5]], constant[4]]] return[name[rmsd]]
keyword[def] identifier[rmsd] ( identifier[df1] , identifier[df2] , identifier[heavy_only] = keyword[True] ): literal[string] keyword[if] identifier[df1] . identifier[shape] [ literal[int] ]!= identifier[df2] . identifier[shape] [ literal[int] ]: keyword[raise] identifier[AttributeError] ( literal[string] ) keyword[if] identifier[heavy_only] : identifier[d1] = identifier[df1] [ identifier[df1] [ literal[string] ]!= literal[string] ] identifier[d2] = identifier[df2] [ identifier[df2] [ literal[string] ]!= literal[string] ] keyword[else] : identifier[d1] , identifier[d2] = identifier[df1] , identifier[df2] identifier[total] =(( identifier[d1] [ literal[string] ]. identifier[values] - identifier[d2] [ literal[string] ]. identifier[values] )** literal[int] + ( identifier[d1] [ literal[string] ]. identifier[values] - identifier[d2] [ literal[string] ]. identifier[values] )** literal[int] + ( identifier[d1] [ literal[string] ]. identifier[values] - identifier[d2] [ literal[string] ]. identifier[values] )** literal[int] ) identifier[rmsd] = identifier[round] (( identifier[total] . identifier[sum] ()/ identifier[df1] . identifier[shape] [ literal[int] ])** literal[int] , literal[int] ) keyword[return] identifier[rmsd]
def rmsd(df1, df2, heavy_only=True): """Compute the Root Mean Square Deviation between molecules Parameters ---------- df1 : pandas.DataFrame DataFrame with HETATM, ATOM, and/or ANISOU entries df2 : pandas.DataFrame Second DataFrame for RMSD computation against df1. Must have the same number of entries as df1 heavy_only : bool (default: True) Which atoms to compare to compute the RMSD. If `True` (default), computes the RMSD between non-hydrogen atoms only. Returns --------- rmsd : float Root Mean Square Deviation between df1 and df2 """ if df1.shape[0] != df2.shape[0]: raise AttributeError('DataFrames have unequal lengths') # depends on [control=['if'], data=[]] if heavy_only: d1 = df1[df1['atom_type'] != 'H'] d2 = df2[df2['atom_type'] != 'H'] # depends on [control=['if'], data=[]] else: (d1, d2) = (df1, df2) total = (d1['x'].values - d2['x'].values) ** 2 + (d1['y'].values - d2['y'].values) ** 2 + (d1['z'].values - d2['z'].values) ** 2 rmsd = round((total.sum() / df1.shape[0]) ** 0.5, 4) return rmsd
def is_submodule(self, name): """ Returns `True` if and only if `name` starts with the full import path of `self` and has length at least one greater than `len(self.name)`. """ return self.name != name and name.startswith(self.name)
def function[is_submodule, parameter[self, name]]: constant[ Returns `True` if and only if `name` starts with the full import path of `self` and has length at least one greater than `len(self.name)`. ] return[<ast.BoolOp object at 0x7da2054a4ac0>]
keyword[def] identifier[is_submodule] ( identifier[self] , identifier[name] ): literal[string] keyword[return] identifier[self] . identifier[name] != identifier[name] keyword[and] identifier[name] . identifier[startswith] ( identifier[self] . identifier[name] )
def is_submodule(self, name): """ Returns `True` if and only if `name` starts with the full import path of `self` and has length at least one greater than `len(self.name)`. """ return self.name != name and name.startswith(self.name)
def get_fastq_files_props(self,barcode=None): """ Returns the DNAnexus file properties for all FASTQ files in the project that match the specified barcode, or all FASTQ files if not barcode is specified. Args: barcode: `str`. If set, then only FASTQ file properties for FASTQ files having the specified barcode are returned. Returns: `dict`. Keys are the FASTQ file DXFile objects; values are the dict of associated properties on DNAnexus on the file. In addition to the properties on the file in DNAnexus, an additional property is added here called 'fastq_file_name'. Raises: dnanexus_utils.FastqNotFound exception if no FASTQ files were found. """ fastqs = self.get_fastq_dxfile_objects(barcode=barcode) #FastqNotFound Exception here if no FASTQs found for specified barcode. dico = {} for f in fastqs: #props = dxpy.api.file_describe(object_id=f.id, input_params={"fields": {"properties": True}})["properties"] props = f.get_properties() dico[f] = props dico[f]["fastq_file_name"] = f.name return dico
def function[get_fastq_files_props, parameter[self, barcode]]: constant[ Returns the DNAnexus file properties for all FASTQ files in the project that match the specified barcode, or all FASTQ files if not barcode is specified. Args: barcode: `str`. If set, then only FASTQ file properties for FASTQ files having the specified barcode are returned. Returns: `dict`. Keys are the FASTQ file DXFile objects; values are the dict of associated properties on DNAnexus on the file. In addition to the properties on the file in DNAnexus, an additional property is added here called 'fastq_file_name'. Raises: dnanexus_utils.FastqNotFound exception if no FASTQ files were found. ] variable[fastqs] assign[=] call[name[self].get_fastq_dxfile_objects, parameter[]] variable[dico] assign[=] dictionary[[], []] for taget[name[f]] in starred[name[fastqs]] begin[:] variable[props] assign[=] call[name[f].get_properties, parameter[]] call[name[dico]][name[f]] assign[=] name[props] call[call[name[dico]][name[f]]][constant[fastq_file_name]] assign[=] name[f].name return[name[dico]]
keyword[def] identifier[get_fastq_files_props] ( identifier[self] , identifier[barcode] = keyword[None] ): literal[string] identifier[fastqs] = identifier[self] . identifier[get_fastq_dxfile_objects] ( identifier[barcode] = identifier[barcode] ) identifier[dico] ={} keyword[for] identifier[f] keyword[in] identifier[fastqs] : identifier[props] = identifier[f] . identifier[get_properties] () identifier[dico] [ identifier[f] ]= identifier[props] identifier[dico] [ identifier[f] ][ literal[string] ]= identifier[f] . identifier[name] keyword[return] identifier[dico]
def get_fastq_files_props(self, barcode=None): """ Returns the DNAnexus file properties for all FASTQ files in the project that match the specified barcode, or all FASTQ files if not barcode is specified. Args: barcode: `str`. If set, then only FASTQ file properties for FASTQ files having the specified barcode are returned. Returns: `dict`. Keys are the FASTQ file DXFile objects; values are the dict of associated properties on DNAnexus on the file. In addition to the properties on the file in DNAnexus, an additional property is added here called 'fastq_file_name'. Raises: dnanexus_utils.FastqNotFound exception if no FASTQ files were found. """ fastqs = self.get_fastq_dxfile_objects(barcode=barcode) #FastqNotFound Exception here if no FASTQs found for specified barcode. dico = {} for f in fastqs: #props = dxpy.api.file_describe(object_id=f.id, input_params={"fields": {"properties": True}})["properties"] props = f.get_properties() dico[f] = props dico[f]['fastq_file_name'] = f.name # depends on [control=['for'], data=['f']] return dico
def processTPED(uniqueSNPs, mapF, fileName, tfam, prefix): """Process the TPED file. :param uniqueSNPs: the unique markers. :param mapF: a representation of the ``map`` file. :param fileName: the name of the ``tped`` file. :param tfam: the name of the ``tfam`` file. :param prefix: the prefix of all the files. :type uniqueSNPs: dict :type mapF: list :type fileName: str :type tfam: str :type prefix: str :returns: a tuple with the representation of the ``tped`` file (:py:class:`numpy.array`) as first element, and the updated position of the duplicated markers in the ``tped`` representation. Copies the ``tfam`` file into ``prefix.unique_snps.tfam``. While reading the ``tped`` file, creates a new one (``prefix.unique_snps.tped``) containing only unique markers. """ # Copying the tfam file try: shutil.copy(tfam, prefix + ".unique_snps.tfam") except IOError: msg = "%s: can't write file" % prefix + ".unique_snps.tfam" raise ProgramError(msg) tped = [] updatedSNPs = defaultdict(list) outputFile = None try: outputFile = open(prefix + ".unique_snps.tped", "w") except IOError: msg = "%s: can't write to file" % prefix + ".unique_snps.tped" raise ProgramError(msg) nbSNP = 0 with open(fileName, 'r') as inputFile: for line in inputFile: nbSNP += 1 row = line.rstrip("\r\n").split("\t") snpInfo = row[:4] genotype = [i.upper() for i in row[4:]] chromosome = snpInfo[0] position = snpInfo[3] if (chromosome, position) in uniqueSNPs: # Printing the new TPED file (unique SNPs only) print >>outputFile, "\t".join(snpInfo + genotype) else: # Saving the TPED file (duplicated samples only) currPos = len(tped) tped.append(tuple(snpInfo + genotype)) updatedSNPs[(chromosome, position)].append(currPos) outputFile.close() if len(mapF) != nbSNP: msg = "%(fileName)s: no the same number of SNPs than MAP " \ "file" % locals() raise ProgramError(msg) tped = np.array(tped) return tped, updatedSNPs
def function[processTPED, parameter[uniqueSNPs, mapF, fileName, tfam, prefix]]: constant[Process the TPED file. :param uniqueSNPs: the unique markers. :param mapF: a representation of the ``map`` file. :param fileName: the name of the ``tped`` file. :param tfam: the name of the ``tfam`` file. :param prefix: the prefix of all the files. :type uniqueSNPs: dict :type mapF: list :type fileName: str :type tfam: str :type prefix: str :returns: a tuple with the representation of the ``tped`` file (:py:class:`numpy.array`) as first element, and the updated position of the duplicated markers in the ``tped`` representation. Copies the ``tfam`` file into ``prefix.unique_snps.tfam``. While reading the ``tped`` file, creates a new one (``prefix.unique_snps.tped``) containing only unique markers. ] <ast.Try object at 0x7da1b096b160> variable[tped] assign[=] list[[]] variable[updatedSNPs] assign[=] call[name[defaultdict], parameter[name[list]]] variable[outputFile] assign[=] constant[None] <ast.Try object at 0x7da1b0ae3df0> variable[nbSNP] assign[=] constant[0] with call[name[open], parameter[name[fileName], constant[r]]] begin[:] for taget[name[line]] in starred[name[inputFile]] begin[:] <ast.AugAssign object at 0x7da1b0ae2110> variable[row] assign[=] call[call[name[line].rstrip, parameter[constant[ ]]].split, parameter[constant[ ]]] variable[snpInfo] assign[=] call[name[row]][<ast.Slice object at 0x7da1b0a70af0>] variable[genotype] assign[=] <ast.ListComp object at 0x7da204567160> variable[chromosome] assign[=] call[name[snpInfo]][constant[0]] variable[position] assign[=] call[name[snpInfo]][constant[3]] if compare[tuple[[<ast.Name object at 0x7da1b0ae1750>, <ast.Name object at 0x7da1b0ae1cf0>]] in name[uniqueSNPs]] begin[:] tuple[[<ast.BinOp object at 0x7da1b0ae2860>, <ast.Call object at 0x7da1b0ae3f70>]] call[name[outputFile].close, parameter[]] if compare[call[name[len], parameter[name[mapF]]] not_equal[!=] name[nbSNP]] begin[:] variable[msg] assign[=] binary_operation[constant[%(fileName)s: no the same number of SNPs than MAP file] <ast.Mod object at 0x7da2590d6920> call[name[locals], parameter[]]] <ast.Raise object at 0x7da1b0ae3400> variable[tped] assign[=] call[name[np].array, parameter[name[tped]]] return[tuple[[<ast.Name object at 0x7da1b0ae0fa0>, <ast.Name object at 0x7da1b0ae1630>]]]
keyword[def] identifier[processTPED] ( identifier[uniqueSNPs] , identifier[mapF] , identifier[fileName] , identifier[tfam] , identifier[prefix] ): literal[string] keyword[try] : identifier[shutil] . identifier[copy] ( identifier[tfam] , identifier[prefix] + literal[string] ) keyword[except] identifier[IOError] : identifier[msg] = literal[string] % identifier[prefix] + literal[string] keyword[raise] identifier[ProgramError] ( identifier[msg] ) identifier[tped] =[] identifier[updatedSNPs] = identifier[defaultdict] ( identifier[list] ) identifier[outputFile] = keyword[None] keyword[try] : identifier[outputFile] = identifier[open] ( identifier[prefix] + literal[string] , literal[string] ) keyword[except] identifier[IOError] : identifier[msg] = literal[string] % identifier[prefix] + literal[string] keyword[raise] identifier[ProgramError] ( identifier[msg] ) identifier[nbSNP] = literal[int] keyword[with] identifier[open] ( identifier[fileName] , literal[string] ) keyword[as] identifier[inputFile] : keyword[for] identifier[line] keyword[in] identifier[inputFile] : identifier[nbSNP] += literal[int] identifier[row] = identifier[line] . identifier[rstrip] ( literal[string] ). identifier[split] ( literal[string] ) identifier[snpInfo] = identifier[row] [: literal[int] ] identifier[genotype] =[ identifier[i] . identifier[upper] () keyword[for] identifier[i] keyword[in] identifier[row] [ literal[int] :]] identifier[chromosome] = identifier[snpInfo] [ literal[int] ] identifier[position] = identifier[snpInfo] [ literal[int] ] keyword[if] ( identifier[chromosome] , identifier[position] ) keyword[in] identifier[uniqueSNPs] : identifier[print] >> identifier[outputFile] , literal[string] . identifier[join] ( identifier[snpInfo] + identifier[genotype] ) keyword[else] : identifier[currPos] = identifier[len] ( identifier[tped] ) identifier[tped] . identifier[append] ( identifier[tuple] ( identifier[snpInfo] + identifier[genotype] )) identifier[updatedSNPs] [( identifier[chromosome] , identifier[position] )]. identifier[append] ( identifier[currPos] ) identifier[outputFile] . identifier[close] () keyword[if] identifier[len] ( identifier[mapF] )!= identifier[nbSNP] : identifier[msg] = literal[string] literal[string] % identifier[locals] () keyword[raise] identifier[ProgramError] ( identifier[msg] ) identifier[tped] = identifier[np] . identifier[array] ( identifier[tped] ) keyword[return] identifier[tped] , identifier[updatedSNPs]
def processTPED(uniqueSNPs, mapF, fileName, tfam, prefix): """Process the TPED file. :param uniqueSNPs: the unique markers. :param mapF: a representation of the ``map`` file. :param fileName: the name of the ``tped`` file. :param tfam: the name of the ``tfam`` file. :param prefix: the prefix of all the files. :type uniqueSNPs: dict :type mapF: list :type fileName: str :type tfam: str :type prefix: str :returns: a tuple with the representation of the ``tped`` file (:py:class:`numpy.array`) as first element, and the updated position of the duplicated markers in the ``tped`` representation. Copies the ``tfam`` file into ``prefix.unique_snps.tfam``. While reading the ``tped`` file, creates a new one (``prefix.unique_snps.tped``) containing only unique markers. """ # Copying the tfam file try: shutil.copy(tfam, prefix + '.unique_snps.tfam') # depends on [control=['try'], data=[]] except IOError: msg = "%s: can't write file" % prefix + '.unique_snps.tfam' raise ProgramError(msg) # depends on [control=['except'], data=[]] tped = [] updatedSNPs = defaultdict(list) outputFile = None try: outputFile = open(prefix + '.unique_snps.tped', 'w') # depends on [control=['try'], data=[]] except IOError: msg = "%s: can't write to file" % prefix + '.unique_snps.tped' raise ProgramError(msg) # depends on [control=['except'], data=[]] nbSNP = 0 with open(fileName, 'r') as inputFile: for line in inputFile: nbSNP += 1 row = line.rstrip('\r\n').split('\t') snpInfo = row[:4] genotype = [i.upper() for i in row[4:]] chromosome = snpInfo[0] position = snpInfo[3] if (chromosome, position) in uniqueSNPs: # Printing the new TPED file (unique SNPs only) (print >> outputFile, '\t'.join(snpInfo + genotype)) # depends on [control=['if'], data=[]] else: # Saving the TPED file (duplicated samples only) currPos = len(tped) tped.append(tuple(snpInfo + genotype)) updatedSNPs[chromosome, position].append(currPos) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['inputFile']] outputFile.close() if len(mapF) != nbSNP: msg = '%(fileName)s: no the same number of SNPs than MAP file' % locals() raise ProgramError(msg) # depends on [control=['if'], data=[]] tped = np.array(tped) return (tped, updatedSNPs)
def strftime(date_time=None, time_format=None): """ 将 datetime 对象转换为 str :param: * date_time: (obj) datetime 对象 * time_format: (sting) 日期格式字符串 :return: * date_time_str: (string) 日期字符串 """ if not date_time: datetime_now = datetime.now() else: datetime_now = date_time if not time_format: time_format = '%Y/%m/%d %H:%M:%S' return datetime.strftime(datetime_now, time_format)
def function[strftime, parameter[date_time, time_format]]: constant[ 将 datetime 对象转换为 str :param: * date_time: (obj) datetime 对象 * time_format: (sting) 日期格式字符串 :return: * date_time_str: (string) 日期字符串 ] if <ast.UnaryOp object at 0x7da1b26ae6b0> begin[:] variable[datetime_now] assign[=] call[name[datetime].now, parameter[]] if <ast.UnaryOp object at 0x7da1b26ac940> begin[:] variable[time_format] assign[=] constant[%Y/%m/%d %H:%M:%S] return[call[name[datetime].strftime, parameter[name[datetime_now], name[time_format]]]]
keyword[def] identifier[strftime] ( identifier[date_time] = keyword[None] , identifier[time_format] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[date_time] : identifier[datetime_now] = identifier[datetime] . identifier[now] () keyword[else] : identifier[datetime_now] = identifier[date_time] keyword[if] keyword[not] identifier[time_format] : identifier[time_format] = literal[string] keyword[return] identifier[datetime] . identifier[strftime] ( identifier[datetime_now] , identifier[time_format] )
def strftime(date_time=None, time_format=None): """ 将 datetime 对象转换为 str :param: * date_time: (obj) datetime 对象 * time_format: (sting) 日期格式字符串 :return: * date_time_str: (string) 日期字符串 """ if not date_time: datetime_now = datetime.now() # depends on [control=['if'], data=[]] else: datetime_now = date_time if not time_format: time_format = '%Y/%m/%d %H:%M:%S' # depends on [control=['if'], data=[]] return datetime.strftime(datetime_now, time_format)
def which(executable_name, env_var='PATH'): """Equivalent to ``which executable_name`` in a *nix environment. Will return ``None`` if ``executable_name`` cannot be found in ``env_var`` or if ``env_var`` is not set. Otherwise will return the first match in ``env_var``. Note: this function will likely not work on Windows. Code taken and modified from: http://www.velocityreviews.com/forums/ t689526-python-library-call-equivalent-to-which-command.html """ exec_fp = None if env_var in os.environ: paths = os.environ[env_var] for path in paths.split(os.pathsep): curr_exec_fp = os.path.join(path, executable_name) if os.access(curr_exec_fp, os.X_OK): exec_fp = curr_exec_fp break return exec_fp
def function[which, parameter[executable_name, env_var]]: constant[Equivalent to ``which executable_name`` in a *nix environment. Will return ``None`` if ``executable_name`` cannot be found in ``env_var`` or if ``env_var`` is not set. Otherwise will return the first match in ``env_var``. Note: this function will likely not work on Windows. Code taken and modified from: http://www.velocityreviews.com/forums/ t689526-python-library-call-equivalent-to-which-command.html ] variable[exec_fp] assign[=] constant[None] if compare[name[env_var] in name[os].environ] begin[:] variable[paths] assign[=] call[name[os].environ][name[env_var]] for taget[name[path]] in starred[call[name[paths].split, parameter[name[os].pathsep]]] begin[:] variable[curr_exec_fp] assign[=] call[name[os].path.join, parameter[name[path], name[executable_name]]] if call[name[os].access, parameter[name[curr_exec_fp], name[os].X_OK]] begin[:] variable[exec_fp] assign[=] name[curr_exec_fp] break return[name[exec_fp]]
keyword[def] identifier[which] ( identifier[executable_name] , identifier[env_var] = literal[string] ): literal[string] identifier[exec_fp] = keyword[None] keyword[if] identifier[env_var] keyword[in] identifier[os] . identifier[environ] : identifier[paths] = identifier[os] . identifier[environ] [ identifier[env_var] ] keyword[for] identifier[path] keyword[in] identifier[paths] . identifier[split] ( identifier[os] . identifier[pathsep] ): identifier[curr_exec_fp] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[executable_name] ) keyword[if] identifier[os] . identifier[access] ( identifier[curr_exec_fp] , identifier[os] . identifier[X_OK] ): identifier[exec_fp] = identifier[curr_exec_fp] keyword[break] keyword[return] identifier[exec_fp]
def which(executable_name, env_var='PATH'): """Equivalent to ``which executable_name`` in a *nix environment. Will return ``None`` if ``executable_name`` cannot be found in ``env_var`` or if ``env_var`` is not set. Otherwise will return the first match in ``env_var``. Note: this function will likely not work on Windows. Code taken and modified from: http://www.velocityreviews.com/forums/ t689526-python-library-call-equivalent-to-which-command.html """ exec_fp = None if env_var in os.environ: paths = os.environ[env_var] for path in paths.split(os.pathsep): curr_exec_fp = os.path.join(path, executable_name) if os.access(curr_exec_fp, os.X_OK): exec_fp = curr_exec_fp break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['path']] # depends on [control=['if'], data=['env_var']] return exec_fp
def get_parent_families(self, family_id): """Gets the parent families of the given ``id``. arg: family_id (osid.id.Id): the ``Id`` of the ``Family`` to query return: (osid.relationship.FamilyList) - the parent families of the ``id`` raise: NotFound - a ``Family`` identified by ``Id is`` not found raise: NullArgument - ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.get_parent_bins if self._catalog_session is not None: return self._catalog_session.get_parent_catalogs(catalog_id=family_id) return FamilyLookupSession( self._proxy, self._runtime).get_families_by_ids( list(self.get_parent_family_ids(family_id)))
def function[get_parent_families, parameter[self, family_id]]: constant[Gets the parent families of the given ``id``. arg: family_id (osid.id.Id): the ``Id`` of the ``Family`` to query return: (osid.relationship.FamilyList) - the parent families of the ``id`` raise: NotFound - a ``Family`` identified by ``Id is`` not found raise: NullArgument - ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* ] if compare[name[self]._catalog_session is_not constant[None]] begin[:] return[call[name[self]._catalog_session.get_parent_catalogs, parameter[]]] return[call[call[name[FamilyLookupSession], parameter[name[self]._proxy, name[self]._runtime]].get_families_by_ids, parameter[call[name[list], parameter[call[name[self].get_parent_family_ids, parameter[name[family_id]]]]]]]]
keyword[def] identifier[get_parent_families] ( identifier[self] , identifier[family_id] ): literal[string] keyword[if] identifier[self] . identifier[_catalog_session] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[self] . identifier[_catalog_session] . identifier[get_parent_catalogs] ( identifier[catalog_id] = identifier[family_id] ) keyword[return] identifier[FamilyLookupSession] ( identifier[self] . identifier[_proxy] , identifier[self] . identifier[_runtime] ). identifier[get_families_by_ids] ( identifier[list] ( identifier[self] . identifier[get_parent_family_ids] ( identifier[family_id] )))
def get_parent_families(self, family_id): """Gets the parent families of the given ``id``. arg: family_id (osid.id.Id): the ``Id`` of the ``Family`` to query return: (osid.relationship.FamilyList) - the parent families of the ``id`` raise: NotFound - a ``Family`` identified by ``Id is`` not found raise: NullArgument - ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.get_parent_bins if self._catalog_session is not None: return self._catalog_session.get_parent_catalogs(catalog_id=family_id) # depends on [control=['if'], data=[]] return FamilyLookupSession(self._proxy, self._runtime).get_families_by_ids(list(self.get_parent_family_ids(family_id)))
def register (self, target): """ Registers a new virtual target. Checks if there's already registered target, with the same name, type, project and subvariant properties, and also with the same sources and equal action. If such target is found it is retured and 'target' is not registered. Otherwise, 'target' is registered and returned. """ assert isinstance(target, VirtualTarget) if target.path(): signature = target.path() + "-" + target.name() else: signature = "-" + target.name() result = None if signature not in self.cache_: self.cache_ [signature] = [] for t in self.cache_ [signature]: a1 = t.action () a2 = target.action () # TODO: why are we checking for not result? if not result: if not a1 and not a2: result = t else: if a1 and a2 and a1.action_name () == a2.action_name () and a1.sources () == a2.sources (): ps1 = a1.properties () ps2 = a2.properties () p1 = ps1.base () + ps1.free () +\ b2.util.set.difference(ps1.dependency(), ps1.incidental()) p2 = ps2.base () + ps2.free () +\ b2.util.set.difference(ps2.dependency(), ps2.incidental()) if p1 == p2: result = t if not result: self.cache_ [signature].append (target) result = target # TODO: Don't append if we found pre-existing target? self.recent_targets_.append(result) self.all_targets_.append(result) return result
def function[register, parameter[self, target]]: constant[ Registers a new virtual target. Checks if there's already registered target, with the same name, type, project and subvariant properties, and also with the same sources and equal action. If such target is found it is retured and 'target' is not registered. Otherwise, 'target' is registered and returned. ] assert[call[name[isinstance], parameter[name[target], name[VirtualTarget]]]] if call[name[target].path, parameter[]] begin[:] variable[signature] assign[=] binary_operation[binary_operation[call[name[target].path, parameter[]] + constant[-]] + call[name[target].name, parameter[]]] variable[result] assign[=] constant[None] if compare[name[signature] <ast.NotIn object at 0x7da2590d7190> name[self].cache_] begin[:] call[name[self].cache_][name[signature]] assign[=] list[[]] for taget[name[t]] in starred[call[name[self].cache_][name[signature]]] begin[:] variable[a1] assign[=] call[name[t].action, parameter[]] variable[a2] assign[=] call[name[target].action, parameter[]] if <ast.UnaryOp object at 0x7da1b1f8d1e0> begin[:] if <ast.BoolOp object at 0x7da1b1f8c850> begin[:] variable[result] assign[=] name[t] if <ast.UnaryOp object at 0x7da1b1f8d7e0> begin[:] call[call[name[self].cache_][name[signature]].append, parameter[name[target]]] variable[result] assign[=] name[target] call[name[self].recent_targets_.append, parameter[name[result]]] call[name[self].all_targets_.append, parameter[name[result]]] return[name[result]]
keyword[def] identifier[register] ( identifier[self] , identifier[target] ): literal[string] keyword[assert] identifier[isinstance] ( identifier[target] , identifier[VirtualTarget] ) keyword[if] identifier[target] . identifier[path] (): identifier[signature] = identifier[target] . identifier[path] ()+ literal[string] + identifier[target] . identifier[name] () keyword[else] : identifier[signature] = literal[string] + identifier[target] . identifier[name] () identifier[result] = keyword[None] keyword[if] identifier[signature] keyword[not] keyword[in] identifier[self] . identifier[cache_] : identifier[self] . identifier[cache_] [ identifier[signature] ]=[] keyword[for] identifier[t] keyword[in] identifier[self] . identifier[cache_] [ identifier[signature] ]: identifier[a1] = identifier[t] . identifier[action] () identifier[a2] = identifier[target] . identifier[action] () keyword[if] keyword[not] identifier[result] : keyword[if] keyword[not] identifier[a1] keyword[and] keyword[not] identifier[a2] : identifier[result] = identifier[t] keyword[else] : keyword[if] identifier[a1] keyword[and] identifier[a2] keyword[and] identifier[a1] . identifier[action_name] ()== identifier[a2] . identifier[action_name] () keyword[and] identifier[a1] . identifier[sources] ()== identifier[a2] . identifier[sources] (): identifier[ps1] = identifier[a1] . identifier[properties] () identifier[ps2] = identifier[a2] . identifier[properties] () identifier[p1] = identifier[ps1] . identifier[base] ()+ identifier[ps1] . identifier[free] ()+ identifier[b2] . identifier[util] . identifier[set] . identifier[difference] ( identifier[ps1] . identifier[dependency] (), identifier[ps1] . identifier[incidental] ()) identifier[p2] = identifier[ps2] . identifier[base] ()+ identifier[ps2] . identifier[free] ()+ identifier[b2] . identifier[util] . identifier[set] . identifier[difference] ( identifier[ps2] . identifier[dependency] (), identifier[ps2] . identifier[incidental] ()) keyword[if] identifier[p1] == identifier[p2] : identifier[result] = identifier[t] keyword[if] keyword[not] identifier[result] : identifier[self] . identifier[cache_] [ identifier[signature] ]. identifier[append] ( identifier[target] ) identifier[result] = identifier[target] identifier[self] . identifier[recent_targets_] . identifier[append] ( identifier[result] ) identifier[self] . identifier[all_targets_] . identifier[append] ( identifier[result] ) keyword[return] identifier[result]
def register(self, target): """ Registers a new virtual target. Checks if there's already registered target, with the same name, type, project and subvariant properties, and also with the same sources and equal action. If such target is found it is retured and 'target' is not registered. Otherwise, 'target' is registered and returned. """ assert isinstance(target, VirtualTarget) if target.path(): signature = target.path() + '-' + target.name() # depends on [control=['if'], data=[]] else: signature = '-' + target.name() result = None if signature not in self.cache_: self.cache_[signature] = [] # depends on [control=['if'], data=['signature']] for t in self.cache_[signature]: a1 = t.action() a2 = target.action() # TODO: why are we checking for not result? if not result: if not a1 and (not a2): result = t # depends on [control=['if'], data=[]] elif a1 and a2 and (a1.action_name() == a2.action_name()) and (a1.sources() == a2.sources()): ps1 = a1.properties() ps2 = a2.properties() p1 = ps1.base() + ps1.free() + b2.util.set.difference(ps1.dependency(), ps1.incidental()) p2 = ps2.base() + ps2.free() + b2.util.set.difference(ps2.dependency(), ps2.incidental()) if p1 == p2: result = t # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['t']] if not result: self.cache_[signature].append(target) result = target # depends on [control=['if'], data=[]] # TODO: Don't append if we found pre-existing target? self.recent_targets_.append(result) self.all_targets_.append(result) return result
def clear(name): ''' Clear the namespace from the register USAGE: .. code-block:: yaml clearns: reg.clear: - name: myregister ''' ret = {'name': name, 'changes': {}, 'comment': '', 'result': True} if name in __reg__: __reg__[name].clear() return ret
def function[clear, parameter[name]]: constant[ Clear the namespace from the register USAGE: .. code-block:: yaml clearns: reg.clear: - name: myregister ] variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b1fa3610>, <ast.Constant object at 0x7da1b1fa1240>, <ast.Constant object at 0x7da1b1fa1150>, <ast.Constant object at 0x7da1b1fa1480>], [<ast.Name object at 0x7da1b1fa1f30>, <ast.Dict object at 0x7da1b1fa1300>, <ast.Constant object at 0x7da1b1fa1fc0>, <ast.Constant object at 0x7da1b1fa1c90>]] if compare[name[name] in name[__reg__]] begin[:] call[call[name[__reg__]][name[name]].clear, parameter[]] return[name[ret]]
keyword[def] identifier[clear] ( identifier[name] ): literal[string] identifier[ret] ={ literal[string] : identifier[name] , literal[string] :{}, literal[string] : literal[string] , literal[string] : keyword[True] } keyword[if] identifier[name] keyword[in] identifier[__reg__] : identifier[__reg__] [ identifier[name] ]. identifier[clear] () keyword[return] identifier[ret]
def clear(name): """ Clear the namespace from the register USAGE: .. code-block:: yaml clearns: reg.clear: - name: myregister """ ret = {'name': name, 'changes': {}, 'comment': '', 'result': True} if name in __reg__: __reg__[name].clear() # depends on [control=['if'], data=['name', '__reg__']] return ret
def get_page_kwargs(**kwargs): """Construct page and page size kwargs (if present).""" page_kwargs = {} page = kwargs.get("page") if page is not None and page > 0: page_kwargs["page"] = page page_size = kwargs.get("page_size") if page_size is not None and page_size > 0: page_kwargs["page_size"] = page_size return page_kwargs
def function[get_page_kwargs, parameter[]]: constant[Construct page and page size kwargs (if present).] variable[page_kwargs] assign[=] dictionary[[], []] variable[page] assign[=] call[name[kwargs].get, parameter[constant[page]]] if <ast.BoolOp object at 0x7da1b1a3e8f0> begin[:] call[name[page_kwargs]][constant[page]] assign[=] name[page] variable[page_size] assign[=] call[name[kwargs].get, parameter[constant[page_size]]] if <ast.BoolOp object at 0x7da1b1a3ce50> begin[:] call[name[page_kwargs]][constant[page_size]] assign[=] name[page_size] return[name[page_kwargs]]
keyword[def] identifier[get_page_kwargs] (** identifier[kwargs] ): literal[string] identifier[page_kwargs] ={} identifier[page] = identifier[kwargs] . identifier[get] ( literal[string] ) keyword[if] identifier[page] keyword[is] keyword[not] keyword[None] keyword[and] identifier[page] > literal[int] : identifier[page_kwargs] [ literal[string] ]= identifier[page] identifier[page_size] = identifier[kwargs] . identifier[get] ( literal[string] ) keyword[if] identifier[page_size] keyword[is] keyword[not] keyword[None] keyword[and] identifier[page_size] > literal[int] : identifier[page_kwargs] [ literal[string] ]= identifier[page_size] keyword[return] identifier[page_kwargs]
def get_page_kwargs(**kwargs): """Construct page and page size kwargs (if present).""" page_kwargs = {} page = kwargs.get('page') if page is not None and page > 0: page_kwargs['page'] = page # depends on [control=['if'], data=[]] page_size = kwargs.get('page_size') if page_size is not None and page_size > 0: page_kwargs['page_size'] = page_size # depends on [control=['if'], data=[]] return page_kwargs
def read_source_models(fnames, converter, monitor): """ :param fnames: list of source model files :param converter: a SourceConverter instance :param monitor: a :class:`openquake.performance.Monitor` instance :yields: SourceModel instances """ for fname in fnames: if fname.endswith(('.xml', '.nrml')): sm = to_python(fname, converter) elif fname.endswith('.hdf5'): sm = sourceconverter.to_python(fname, converter) else: raise ValueError('Unrecognized extension in %s' % fname) sm.fname = fname yield sm
def function[read_source_models, parameter[fnames, converter, monitor]]: constant[ :param fnames: list of source model files :param converter: a SourceConverter instance :param monitor: a :class:`openquake.performance.Monitor` instance :yields: SourceModel instances ] for taget[name[fname]] in starred[name[fnames]] begin[:] if call[name[fname].endswith, parameter[tuple[[<ast.Constant object at 0x7da204621000>, <ast.Constant object at 0x7da204623820>]]]] begin[:] variable[sm] assign[=] call[name[to_python], parameter[name[fname], name[converter]]] name[sm].fname assign[=] name[fname] <ast.Yield object at 0x7da18f58df90>
keyword[def] identifier[read_source_models] ( identifier[fnames] , identifier[converter] , identifier[monitor] ): literal[string] keyword[for] identifier[fname] keyword[in] identifier[fnames] : keyword[if] identifier[fname] . identifier[endswith] (( literal[string] , literal[string] )): identifier[sm] = identifier[to_python] ( identifier[fname] , identifier[converter] ) keyword[elif] identifier[fname] . identifier[endswith] ( literal[string] ): identifier[sm] = identifier[sourceconverter] . identifier[to_python] ( identifier[fname] , identifier[converter] ) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[fname] ) identifier[sm] . identifier[fname] = identifier[fname] keyword[yield] identifier[sm]
def read_source_models(fnames, converter, monitor): """ :param fnames: list of source model files :param converter: a SourceConverter instance :param monitor: a :class:`openquake.performance.Monitor` instance :yields: SourceModel instances """ for fname in fnames: if fname.endswith(('.xml', '.nrml')): sm = to_python(fname, converter) # depends on [control=['if'], data=[]] elif fname.endswith('.hdf5'): sm = sourceconverter.to_python(fname, converter) # depends on [control=['if'], data=[]] else: raise ValueError('Unrecognized extension in %s' % fname) sm.fname = fname yield sm # depends on [control=['for'], data=['fname']]
def _pythonized_comments(tokens): """ Similar to tokens but converts strings after a colon (:) to comments. """ is_after_colon = True for token_type, token_text in tokens: if is_after_colon and (token_type in pygments.token.String): token_type = pygments.token.Comment elif token_text == ':': is_after_colon = True elif token_type not in pygments.token.Comment: is_whitespace = len(token_text.rstrip(' \f\n\r\t')) == 0 if not is_whitespace: is_after_colon = False yield token_type, token_text
def function[_pythonized_comments, parameter[tokens]]: constant[ Similar to tokens but converts strings after a colon (:) to comments. ] variable[is_after_colon] assign[=] constant[True] for taget[tuple[[<ast.Name object at 0x7da204623820>, <ast.Name object at 0x7da204623700>]]] in starred[name[tokens]] begin[:] if <ast.BoolOp object at 0x7da204622fb0> begin[:] variable[token_type] assign[=] name[pygments].token.Comment <ast.Yield object at 0x7da18f00cca0>
keyword[def] identifier[_pythonized_comments] ( identifier[tokens] ): literal[string] identifier[is_after_colon] = keyword[True] keyword[for] identifier[token_type] , identifier[token_text] keyword[in] identifier[tokens] : keyword[if] identifier[is_after_colon] keyword[and] ( identifier[token_type] keyword[in] identifier[pygments] . identifier[token] . identifier[String] ): identifier[token_type] = identifier[pygments] . identifier[token] . identifier[Comment] keyword[elif] identifier[token_text] == literal[string] : identifier[is_after_colon] = keyword[True] keyword[elif] identifier[token_type] keyword[not] keyword[in] identifier[pygments] . identifier[token] . identifier[Comment] : identifier[is_whitespace] = identifier[len] ( identifier[token_text] . identifier[rstrip] ( literal[string] ))== literal[int] keyword[if] keyword[not] identifier[is_whitespace] : identifier[is_after_colon] = keyword[False] keyword[yield] identifier[token_type] , identifier[token_text]
def _pythonized_comments(tokens): """ Similar to tokens but converts strings after a colon (:) to comments. """ is_after_colon = True for (token_type, token_text) in tokens: if is_after_colon and token_type in pygments.token.String: token_type = pygments.token.Comment # depends on [control=['if'], data=[]] elif token_text == ':': is_after_colon = True # depends on [control=['if'], data=[]] elif token_type not in pygments.token.Comment: is_whitespace = len(token_text.rstrip(' \x0c\n\r\t')) == 0 if not is_whitespace: is_after_colon = False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] yield (token_type, token_text) # depends on [control=['for'], data=[]]
def write_json_response(self, response): """ write back json response """ self.write(tornado.escape.json_encode(response)) self.set_header("Content-Type", "application/json")
def function[write_json_response, parameter[self, response]]: constant[ write back json response ] call[name[self].write, parameter[call[name[tornado].escape.json_encode, parameter[name[response]]]]] call[name[self].set_header, parameter[constant[Content-Type], constant[application/json]]]
keyword[def] identifier[write_json_response] ( identifier[self] , identifier[response] ): literal[string] identifier[self] . identifier[write] ( identifier[tornado] . identifier[escape] . identifier[json_encode] ( identifier[response] )) identifier[self] . identifier[set_header] ( literal[string] , literal[string] )
def write_json_response(self, response): """ write back json response """ self.write(tornado.escape.json_encode(response)) self.set_header('Content-Type', 'application/json')
def perspective(img, startpoints, endpoints, interpolation=Image.BICUBIC): """Perform perspective transform of the given PIL Image. Args: img (PIL Image): Image to be transformed. coeffs (tuple) : 8-tuple (a, b, c, d, e, f, g, h) which contains the coefficients. for a perspective transform. interpolation: Default- Image.BICUBIC Returns: PIL Image: Perspectively transformed Image. """ if not _is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) coeffs = _get_perspective_coeffs(startpoints, endpoints) return img.transform(img.size, Image.PERSPECTIVE, coeffs, interpolation)
def function[perspective, parameter[img, startpoints, endpoints, interpolation]]: constant[Perform perspective transform of the given PIL Image. Args: img (PIL Image): Image to be transformed. coeffs (tuple) : 8-tuple (a, b, c, d, e, f, g, h) which contains the coefficients. for a perspective transform. interpolation: Default- Image.BICUBIC Returns: PIL Image: Perspectively transformed Image. ] if <ast.UnaryOp object at 0x7da1b033dbd0> begin[:] <ast.Raise object at 0x7da1b033dcc0> variable[coeffs] assign[=] call[name[_get_perspective_coeffs], parameter[name[startpoints], name[endpoints]]] return[call[name[img].transform, parameter[name[img].size, name[Image].PERSPECTIVE, name[coeffs], name[interpolation]]]]
keyword[def] identifier[perspective] ( identifier[img] , identifier[startpoints] , identifier[endpoints] , identifier[interpolation] = identifier[Image] . identifier[BICUBIC] ): literal[string] keyword[if] keyword[not] identifier[_is_pil_image] ( identifier[img] ): keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[type] ( identifier[img] ))) identifier[coeffs] = identifier[_get_perspective_coeffs] ( identifier[startpoints] , identifier[endpoints] ) keyword[return] identifier[img] . identifier[transform] ( identifier[img] . identifier[size] , identifier[Image] . identifier[PERSPECTIVE] , identifier[coeffs] , identifier[interpolation] )
def perspective(img, startpoints, endpoints, interpolation=Image.BICUBIC): """Perform perspective transform of the given PIL Image. Args: img (PIL Image): Image to be transformed. coeffs (tuple) : 8-tuple (a, b, c, d, e, f, g, h) which contains the coefficients. for a perspective transform. interpolation: Default- Image.BICUBIC Returns: PIL Image: Perspectively transformed Image. """ if not _is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) # depends on [control=['if'], data=[]] coeffs = _get_perspective_coeffs(startpoints, endpoints) return img.transform(img.size, Image.PERSPECTIVE, coeffs, interpolation)
def multipath_flush(device): ''' Device-Mapper Multipath flush CLI Example: .. code-block:: bash salt '*' devmap.multipath_flush mpath1 ''' if not os.path.exists(device): return '{0} does not exist'.format(device) cmd = 'multipath -f {0}'.format(device) return __salt__['cmd.run'](cmd).splitlines()
def function[multipath_flush, parameter[device]]: constant[ Device-Mapper Multipath flush CLI Example: .. code-block:: bash salt '*' devmap.multipath_flush mpath1 ] if <ast.UnaryOp object at 0x7da20e74b1f0> begin[:] return[call[constant[{0} does not exist].format, parameter[name[device]]]] variable[cmd] assign[=] call[constant[multipath -f {0}].format, parameter[name[device]]] return[call[call[call[name[__salt__]][constant[cmd.run]], parameter[name[cmd]]].splitlines, parameter[]]]
keyword[def] identifier[multipath_flush] ( identifier[device] ): literal[string] keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[device] ): keyword[return] literal[string] . identifier[format] ( identifier[device] ) identifier[cmd] = literal[string] . identifier[format] ( identifier[device] ) keyword[return] identifier[__salt__] [ literal[string] ]( identifier[cmd] ). identifier[splitlines] ()
def multipath_flush(device): """ Device-Mapper Multipath flush CLI Example: .. code-block:: bash salt '*' devmap.multipath_flush mpath1 """ if not os.path.exists(device): return '{0} does not exist'.format(device) # depends on [control=['if'], data=[]] cmd = 'multipath -f {0}'.format(device) return __salt__['cmd.run'](cmd).splitlines()
def get_nameserver_detail_output_show_nameserver_nameserver_permanent_portname(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_nameserver_detail = ET.Element("get_nameserver_detail") config = get_nameserver_detail output = ET.SubElement(get_nameserver_detail, "output") show_nameserver = ET.SubElement(output, "show-nameserver") nameserver_portid_key = ET.SubElement(show_nameserver, "nameserver-portid") nameserver_portid_key.text = kwargs.pop('nameserver_portid') nameserver_permanent_portname = ET.SubElement(show_nameserver, "nameserver-permanent-portname") nameserver_permanent_portname.text = kwargs.pop('nameserver_permanent_portname') callback = kwargs.pop('callback', self._callback) return callback(config)
def function[get_nameserver_detail_output_show_nameserver_nameserver_permanent_portname, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[get_nameserver_detail] assign[=] call[name[ET].Element, parameter[constant[get_nameserver_detail]]] variable[config] assign[=] name[get_nameserver_detail] variable[output] assign[=] call[name[ET].SubElement, parameter[name[get_nameserver_detail], constant[output]]] variable[show_nameserver] assign[=] call[name[ET].SubElement, parameter[name[output], constant[show-nameserver]]] variable[nameserver_portid_key] assign[=] call[name[ET].SubElement, parameter[name[show_nameserver], constant[nameserver-portid]]] name[nameserver_portid_key].text assign[=] call[name[kwargs].pop, parameter[constant[nameserver_portid]]] variable[nameserver_permanent_portname] assign[=] call[name[ET].SubElement, parameter[name[show_nameserver], constant[nameserver-permanent-portname]]] name[nameserver_permanent_portname].text assign[=] call[name[kwargs].pop, parameter[constant[nameserver_permanent_portname]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[get_nameserver_detail_output_show_nameserver_nameserver_permanent_portname] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[get_nameserver_detail] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[config] = identifier[get_nameserver_detail] identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[get_nameserver_detail] , literal[string] ) identifier[show_nameserver] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] ) identifier[nameserver_portid_key] = identifier[ET] . identifier[SubElement] ( identifier[show_nameserver] , literal[string] ) identifier[nameserver_portid_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[nameserver_permanent_portname] = identifier[ET] . identifier[SubElement] ( identifier[show_nameserver] , literal[string] ) identifier[nameserver_permanent_portname] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def get_nameserver_detail_output_show_nameserver_nameserver_permanent_portname(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') get_nameserver_detail = ET.Element('get_nameserver_detail') config = get_nameserver_detail output = ET.SubElement(get_nameserver_detail, 'output') show_nameserver = ET.SubElement(output, 'show-nameserver') nameserver_portid_key = ET.SubElement(show_nameserver, 'nameserver-portid') nameserver_portid_key.text = kwargs.pop('nameserver_portid') nameserver_permanent_portname = ET.SubElement(show_nameserver, 'nameserver-permanent-portname') nameserver_permanent_portname.text = kwargs.pop('nameserver_permanent_portname') callback = kwargs.pop('callback', self._callback) return callback(config)
def generate_records(self, infile): """ Process a file of rest and yield dictionaries """ state = 0 record = {} for item in self.generate_lines(infile): line = item['line'] heading = item['heading'] # any Markdown heading is just a caption, no image if heading: record['heading'] = True record['caption'] = line[1:].strip() state = 'caption' continue if not line[0].isspace(): # at a potential image if state == 'caption': yield record record = {} state = 0 if state == 'caption': record['caption'] += '\n' + line[:-1] continue fields = line.split(',') # nothing there, carry on if not fields: continue image = fields[0].strip() if not image: continue record['image'] = image try: time = float(fields[1]) except: time = 0 record['time'] = time try: caption = fields[2].strip() except: caption = None if caption: record['caption'] = caption # yield it if we have anything if record: yield record record = {}
def function[generate_records, parameter[self, infile]]: constant[ Process a file of rest and yield dictionaries ] variable[state] assign[=] constant[0] variable[record] assign[=] dictionary[[], []] for taget[name[item]] in starred[call[name[self].generate_lines, parameter[name[infile]]]] begin[:] variable[line] assign[=] call[name[item]][constant[line]] variable[heading] assign[=] call[name[item]][constant[heading]] if name[heading] begin[:] call[name[record]][constant[heading]] assign[=] constant[True] call[name[record]][constant[caption]] assign[=] call[call[name[line]][<ast.Slice object at 0x7da1b1343cd0>].strip, parameter[]] variable[state] assign[=] constant[caption] continue if <ast.UnaryOp object at 0x7da1b13422f0> begin[:] if compare[name[state] equal[==] constant[caption]] begin[:] <ast.Yield object at 0x7da1b1463ee0> variable[record] assign[=] dictionary[[], []] variable[state] assign[=] constant[0] if compare[name[state] equal[==] constant[caption]] begin[:] <ast.AugAssign object at 0x7da1b1460e20> continue variable[fields] assign[=] call[name[line].split, parameter[constant[,]]] if <ast.UnaryOp object at 0x7da1b1341390> begin[:] continue variable[image] assign[=] call[call[name[fields]][constant[0]].strip, parameter[]] if <ast.UnaryOp object at 0x7da1b1341ff0> begin[:] continue call[name[record]][constant[image]] assign[=] name[image] <ast.Try object at 0x7da1b1340bb0> call[name[record]][constant[time]] assign[=] name[time] <ast.Try object at 0x7da1b13423e0> if name[caption] begin[:] call[name[record]][constant[caption]] assign[=] name[caption] if name[record] begin[:] <ast.Yield object at 0x7da1b15b18d0> variable[record] assign[=] dictionary[[], []]
keyword[def] identifier[generate_records] ( identifier[self] , identifier[infile] ): literal[string] identifier[state] = literal[int] identifier[record] ={} keyword[for] identifier[item] keyword[in] identifier[self] . identifier[generate_lines] ( identifier[infile] ): identifier[line] = identifier[item] [ literal[string] ] identifier[heading] = identifier[item] [ literal[string] ] keyword[if] identifier[heading] : identifier[record] [ literal[string] ]= keyword[True] identifier[record] [ literal[string] ]= identifier[line] [ literal[int] :]. identifier[strip] () identifier[state] = literal[string] keyword[continue] keyword[if] keyword[not] identifier[line] [ literal[int] ]. identifier[isspace] (): keyword[if] identifier[state] == literal[string] : keyword[yield] identifier[record] identifier[record] ={} identifier[state] = literal[int] keyword[if] identifier[state] == literal[string] : identifier[record] [ literal[string] ]+= literal[string] + identifier[line] [:- literal[int] ] keyword[continue] identifier[fields] = identifier[line] . identifier[split] ( literal[string] ) keyword[if] keyword[not] identifier[fields] : keyword[continue] identifier[image] = identifier[fields] [ literal[int] ]. identifier[strip] () keyword[if] keyword[not] identifier[image] : keyword[continue] identifier[record] [ literal[string] ]= identifier[image] keyword[try] : identifier[time] = identifier[float] ( identifier[fields] [ literal[int] ]) keyword[except] : identifier[time] = literal[int] identifier[record] [ literal[string] ]= identifier[time] keyword[try] : identifier[caption] = identifier[fields] [ literal[int] ]. identifier[strip] () keyword[except] : identifier[caption] = keyword[None] keyword[if] identifier[caption] : identifier[record] [ literal[string] ]= identifier[caption] keyword[if] identifier[record] : keyword[yield] identifier[record] identifier[record] ={}
def generate_records(self, infile): """ Process a file of rest and yield dictionaries """ state = 0 record = {} for item in self.generate_lines(infile): line = item['line'] heading = item['heading'] # any Markdown heading is just a caption, no image if heading: record['heading'] = True record['caption'] = line[1:].strip() state = 'caption' continue # depends on [control=['if'], data=[]] if not line[0].isspace(): # at a potential image if state == 'caption': yield record record = {} state = 0 # depends on [control=['if'], data=['state']] # depends on [control=['if'], data=[]] if state == 'caption': record['caption'] += '\n' + line[:-1] continue # depends on [control=['if'], data=[]] fields = line.split(',') # nothing there, carry on if not fields: continue # depends on [control=['if'], data=[]] image = fields[0].strip() if not image: continue # depends on [control=['if'], data=[]] record['image'] = image try: time = float(fields[1]) # depends on [control=['try'], data=[]] except: time = 0 # depends on [control=['except'], data=[]] record['time'] = time try: caption = fields[2].strip() # depends on [control=['try'], data=[]] except: caption = None # depends on [control=['except'], data=[]] if caption: record['caption'] = caption # depends on [control=['if'], data=[]] # yield it if we have anything if record: yield record record = {} # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
def Serialize(self, writer): """ Serialize full object. Args: writer (neo.IO.BinaryWriter): """ super(Header, self).Serialize(writer) writer.WriteByte(0)
def function[Serialize, parameter[self, writer]]: constant[ Serialize full object. Args: writer (neo.IO.BinaryWriter): ] call[call[name[super], parameter[name[Header], name[self]]].Serialize, parameter[name[writer]]] call[name[writer].WriteByte, parameter[constant[0]]]
keyword[def] identifier[Serialize] ( identifier[self] , identifier[writer] ): literal[string] identifier[super] ( identifier[Header] , identifier[self] ). identifier[Serialize] ( identifier[writer] ) identifier[writer] . identifier[WriteByte] ( literal[int] )
def Serialize(self, writer): """ Serialize full object. Args: writer (neo.IO.BinaryWriter): """ super(Header, self).Serialize(writer) writer.WriteByte(0)
def samtools_index(self, bam_file): """Index a bam file.""" cmd = self.tools.samtools + " index {0}".format(bam_file) return cmd
def function[samtools_index, parameter[self, bam_file]]: constant[Index a bam file.] variable[cmd] assign[=] binary_operation[name[self].tools.samtools + call[constant[ index {0}].format, parameter[name[bam_file]]]] return[name[cmd]]
keyword[def] identifier[samtools_index] ( identifier[self] , identifier[bam_file] ): literal[string] identifier[cmd] = identifier[self] . identifier[tools] . identifier[samtools] + literal[string] . identifier[format] ( identifier[bam_file] ) keyword[return] identifier[cmd]
def samtools_index(self, bam_file): """Index a bam file.""" cmd = self.tools.samtools + ' index {0}'.format(bam_file) return cmd
def _build_zmat(self, construction_table): """Create the Zmatrix from a construction table. Args: Construction table (pd.DataFrame): Returns: Zmat: A new instance of :class:`Zmat`. """ c_table = construction_table default_cols = ['atom', 'b', 'bond', 'a', 'angle', 'd', 'dihedral'] optional_cols = list(set(self.columns) - {'atom', 'x', 'y', 'z'}) zmat_frame = pd.DataFrame(columns=default_cols + optional_cols, dtype='float', index=c_table.index) zmat_frame.loc[:, optional_cols] = self.loc[c_table.index, optional_cols] zmat_frame.loc[:, 'atom'] = self.loc[c_table.index, 'atom'] zmat_frame.loc[:, ['b', 'a', 'd']] = c_table zmat_values = self._calculate_zmat_values(c_table) zmat_frame.loc[:, ['bond', 'angle', 'dihedral']] = zmat_values zmatrix = Zmat(zmat_frame, metadata=self.metadata, _metadata={'last_valid_cartesian': self.copy()}) return zmatrix
def function[_build_zmat, parameter[self, construction_table]]: constant[Create the Zmatrix from a construction table. Args: Construction table (pd.DataFrame): Returns: Zmat: A new instance of :class:`Zmat`. ] variable[c_table] assign[=] name[construction_table] variable[default_cols] assign[=] list[[<ast.Constant object at 0x7da1b26f2ef0>, <ast.Constant object at 0x7da1b26f2e00>, <ast.Constant object at 0x7da1b26f3040>, <ast.Constant object at 0x7da1b26f2fe0>, <ast.Constant object at 0x7da1b26f2fb0>, <ast.Constant object at 0x7da1b26f1b40>, <ast.Constant object at 0x7da1b26f37f0>]] variable[optional_cols] assign[=] call[name[list], parameter[binary_operation[call[name[set], parameter[name[self].columns]] - <ast.Set object at 0x7da1b26f3d90>]]] variable[zmat_frame] assign[=] call[name[pd].DataFrame, parameter[]] call[name[zmat_frame].loc][tuple[[<ast.Slice object at 0x7da1b26f2770>, <ast.Name object at 0x7da1b26f1e10>]]] assign[=] call[name[self].loc][tuple[[<ast.Attribute object at 0x7da207f9ab90>, <ast.Name object at 0x7da207f996f0>]]] call[name[zmat_frame].loc][tuple[[<ast.Slice object at 0x7da207f99390>, <ast.Constant object at 0x7da207f9aad0>]]] assign[=] call[name[self].loc][tuple[[<ast.Attribute object at 0x7da207f9a9b0>, <ast.Constant object at 0x7da207f99ba0>]]] call[name[zmat_frame].loc][tuple[[<ast.Slice object at 0x7da207f9b9a0>, <ast.List object at 0x7da207f98070>]]] assign[=] name[c_table] variable[zmat_values] assign[=] call[name[self]._calculate_zmat_values, parameter[name[c_table]]] call[name[zmat_frame].loc][tuple[[<ast.Slice object at 0x7da207f990c0>, <ast.List object at 0x7da207f981f0>]]] assign[=] name[zmat_values] variable[zmatrix] assign[=] call[name[Zmat], parameter[name[zmat_frame]]] return[name[zmatrix]]
keyword[def] identifier[_build_zmat] ( identifier[self] , identifier[construction_table] ): literal[string] identifier[c_table] = identifier[construction_table] identifier[default_cols] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] identifier[optional_cols] = identifier[list] ( identifier[set] ( identifier[self] . identifier[columns] )-{ literal[string] , literal[string] , literal[string] , literal[string] }) identifier[zmat_frame] = identifier[pd] . identifier[DataFrame] ( identifier[columns] = identifier[default_cols] + identifier[optional_cols] , identifier[dtype] = literal[string] , identifier[index] = identifier[c_table] . identifier[index] ) identifier[zmat_frame] . identifier[loc] [:, identifier[optional_cols] ]= identifier[self] . identifier[loc] [ identifier[c_table] . identifier[index] , identifier[optional_cols] ] identifier[zmat_frame] . identifier[loc] [:, literal[string] ]= identifier[self] . identifier[loc] [ identifier[c_table] . identifier[index] , literal[string] ] identifier[zmat_frame] . identifier[loc] [:,[ literal[string] , literal[string] , literal[string] ]]= identifier[c_table] identifier[zmat_values] = identifier[self] . identifier[_calculate_zmat_values] ( identifier[c_table] ) identifier[zmat_frame] . identifier[loc] [:,[ literal[string] , literal[string] , literal[string] ]]= identifier[zmat_values] identifier[zmatrix] = identifier[Zmat] ( identifier[zmat_frame] , identifier[metadata] = identifier[self] . identifier[metadata] , identifier[_metadata] ={ literal[string] : identifier[self] . identifier[copy] ()}) keyword[return] identifier[zmatrix]
def _build_zmat(self, construction_table): """Create the Zmatrix from a construction table. Args: Construction table (pd.DataFrame): Returns: Zmat: A new instance of :class:`Zmat`. """ c_table = construction_table default_cols = ['atom', 'b', 'bond', 'a', 'angle', 'd', 'dihedral'] optional_cols = list(set(self.columns) - {'atom', 'x', 'y', 'z'}) zmat_frame = pd.DataFrame(columns=default_cols + optional_cols, dtype='float', index=c_table.index) zmat_frame.loc[:, optional_cols] = self.loc[c_table.index, optional_cols] zmat_frame.loc[:, 'atom'] = self.loc[c_table.index, 'atom'] zmat_frame.loc[:, ['b', 'a', 'd']] = c_table zmat_values = self._calculate_zmat_values(c_table) zmat_frame.loc[:, ['bond', 'angle', 'dihedral']] = zmat_values zmatrix = Zmat(zmat_frame, metadata=self.metadata, _metadata={'last_valid_cartesian': self.copy()}) return zmatrix
def select_where(self, table, cols, pk_att, pk): ''' SELECT with WHERE clause. :param table: target table :param cols: list of columns to select :param pk_att: attribute for the where clause :param pk: the id that the pk_att should match :return: rows from the given table and cols, with the condition pk_att==pk :rtype: list ''' if self.orng_tables: data = [] for ex in self.orng_tables[table]: if str(ex[str(pk_att)]) == str(pk): data.append([ex[str(col)] for col in cols]) return data else: return self.src.select_where(table, cols, pk_att, pk)
def function[select_where, parameter[self, table, cols, pk_att, pk]]: constant[ SELECT with WHERE clause. :param table: target table :param cols: list of columns to select :param pk_att: attribute for the where clause :param pk: the id that the pk_att should match :return: rows from the given table and cols, with the condition pk_att==pk :rtype: list ] if name[self].orng_tables begin[:] variable[data] assign[=] list[[]] for taget[name[ex]] in starred[call[name[self].orng_tables][name[table]]] begin[:] if compare[call[name[str], parameter[call[name[ex]][call[name[str], parameter[name[pk_att]]]]]] equal[==] call[name[str], parameter[name[pk]]]] begin[:] call[name[data].append, parameter[<ast.ListComp object at 0x7da18c4cc550>]] return[name[data]]
keyword[def] identifier[select_where] ( identifier[self] , identifier[table] , identifier[cols] , identifier[pk_att] , identifier[pk] ): literal[string] keyword[if] identifier[self] . identifier[orng_tables] : identifier[data] =[] keyword[for] identifier[ex] keyword[in] identifier[self] . identifier[orng_tables] [ identifier[table] ]: keyword[if] identifier[str] ( identifier[ex] [ identifier[str] ( identifier[pk_att] )])== identifier[str] ( identifier[pk] ): identifier[data] . identifier[append] ([ identifier[ex] [ identifier[str] ( identifier[col] )] keyword[for] identifier[col] keyword[in] identifier[cols] ]) keyword[return] identifier[data] keyword[else] : keyword[return] identifier[self] . identifier[src] . identifier[select_where] ( identifier[table] , identifier[cols] , identifier[pk_att] , identifier[pk] )
def select_where(self, table, cols, pk_att, pk): """ SELECT with WHERE clause. :param table: target table :param cols: list of columns to select :param pk_att: attribute for the where clause :param pk: the id that the pk_att should match :return: rows from the given table and cols, with the condition pk_att==pk :rtype: list """ if self.orng_tables: data = [] for ex in self.orng_tables[table]: if str(ex[str(pk_att)]) == str(pk): data.append([ex[str(col)] for col in cols]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ex']] return data # depends on [control=['if'], data=[]] else: return self.src.select_where(table, cols, pk_att, pk)
def get(cls, resource_id): """Returns the class object identified by `resource_id` Args: resource_id (str): Unique EC2 Instance ID to load from database Returns: EC2 Instance object if found, else None """ res = Resource.get(resource_id) return cls(res) if res else None
def function[get, parameter[cls, resource_id]]: constant[Returns the class object identified by `resource_id` Args: resource_id (str): Unique EC2 Instance ID to load from database Returns: EC2 Instance object if found, else None ] variable[res] assign[=] call[name[Resource].get, parameter[name[resource_id]]] return[<ast.IfExp object at 0x7da1b1e94430>]
keyword[def] identifier[get] ( identifier[cls] , identifier[resource_id] ): literal[string] identifier[res] = identifier[Resource] . identifier[get] ( identifier[resource_id] ) keyword[return] identifier[cls] ( identifier[res] ) keyword[if] identifier[res] keyword[else] keyword[None]
def get(cls, resource_id): """Returns the class object identified by `resource_id` Args: resource_id (str): Unique EC2 Instance ID to load from database Returns: EC2 Instance object if found, else None """ res = Resource.get(resource_id) return cls(res) if res else None
def _RunAndWaitForVFSFileUpdate(self, path): """Runs a flow on the client, and waits for it to finish.""" client_id = rdf_client.GetClientURNFromPath(path) # If we're not actually in a directory on a client, no need to run a flow. if client_id is None: return flow_utils.UpdateVFSFileAndWait( client_id, token=self.token, vfs_file_urn=self.root.Add(path), timeout=self.timeout)
def function[_RunAndWaitForVFSFileUpdate, parameter[self, path]]: constant[Runs a flow on the client, and waits for it to finish.] variable[client_id] assign[=] call[name[rdf_client].GetClientURNFromPath, parameter[name[path]]] if compare[name[client_id] is constant[None]] begin[:] return[None] call[name[flow_utils].UpdateVFSFileAndWait, parameter[name[client_id]]]
keyword[def] identifier[_RunAndWaitForVFSFileUpdate] ( identifier[self] , identifier[path] ): literal[string] identifier[client_id] = identifier[rdf_client] . identifier[GetClientURNFromPath] ( identifier[path] ) keyword[if] identifier[client_id] keyword[is] keyword[None] : keyword[return] identifier[flow_utils] . identifier[UpdateVFSFileAndWait] ( identifier[client_id] , identifier[token] = identifier[self] . identifier[token] , identifier[vfs_file_urn] = identifier[self] . identifier[root] . identifier[Add] ( identifier[path] ), identifier[timeout] = identifier[self] . identifier[timeout] )
def _RunAndWaitForVFSFileUpdate(self, path): """Runs a flow on the client, and waits for it to finish.""" client_id = rdf_client.GetClientURNFromPath(path) # If we're not actually in a directory on a client, no need to run a flow. if client_id is None: return # depends on [control=['if'], data=[]] flow_utils.UpdateVFSFileAndWait(client_id, token=self.token, vfs_file_urn=self.root.Add(path), timeout=self.timeout)
def _fsync_files(filenames): """Call fsync() a list of file names The filenames should be absolute paths already. """ touched_directories = set() mode = os.O_RDONLY # Windows if hasattr(os, 'O_BINARY'): mode |= os.O_BINARY for filename in filenames: fd = os.open(filename, mode) os.fsync(fd) os.close(fd) touched_directories.add(os.path.dirname(filename)) # Some OSes also require us to fsync the directory where we've # created files or subdirectories. if hasattr(os, 'O_DIRECTORY'): for dirname in touched_directories: fd = os.open(dirname, os.O_RDONLY | os.O_DIRECTORY) os.fsync(fd) os.close(fd)
def function[_fsync_files, parameter[filenames]]: constant[Call fsync() a list of file names The filenames should be absolute paths already. ] variable[touched_directories] assign[=] call[name[set], parameter[]] variable[mode] assign[=] name[os].O_RDONLY if call[name[hasattr], parameter[name[os], constant[O_BINARY]]] begin[:] <ast.AugAssign object at 0x7da20c6aa620> for taget[name[filename]] in starred[name[filenames]] begin[:] variable[fd] assign[=] call[name[os].open, parameter[name[filename], name[mode]]] call[name[os].fsync, parameter[name[fd]]] call[name[os].close, parameter[name[fd]]] call[name[touched_directories].add, parameter[call[name[os].path.dirname, parameter[name[filename]]]]] if call[name[hasattr], parameter[name[os], constant[O_DIRECTORY]]] begin[:] for taget[name[dirname]] in starred[name[touched_directories]] begin[:] variable[fd] assign[=] call[name[os].open, parameter[name[dirname], binary_operation[name[os].O_RDONLY <ast.BitOr object at 0x7da2590d6aa0> name[os].O_DIRECTORY]]] call[name[os].fsync, parameter[name[fd]]] call[name[os].close, parameter[name[fd]]]
keyword[def] identifier[_fsync_files] ( identifier[filenames] ): literal[string] identifier[touched_directories] = identifier[set] () identifier[mode] = identifier[os] . identifier[O_RDONLY] keyword[if] identifier[hasattr] ( identifier[os] , literal[string] ): identifier[mode] |= identifier[os] . identifier[O_BINARY] keyword[for] identifier[filename] keyword[in] identifier[filenames] : identifier[fd] = identifier[os] . identifier[open] ( identifier[filename] , identifier[mode] ) identifier[os] . identifier[fsync] ( identifier[fd] ) identifier[os] . identifier[close] ( identifier[fd] ) identifier[touched_directories] . identifier[add] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[filename] )) keyword[if] identifier[hasattr] ( identifier[os] , literal[string] ): keyword[for] identifier[dirname] keyword[in] identifier[touched_directories] : identifier[fd] = identifier[os] . identifier[open] ( identifier[dirname] , identifier[os] . identifier[O_RDONLY] | identifier[os] . identifier[O_DIRECTORY] ) identifier[os] . identifier[fsync] ( identifier[fd] ) identifier[os] . identifier[close] ( identifier[fd] )
def _fsync_files(filenames): """Call fsync() a list of file names The filenames should be absolute paths already. """ touched_directories = set() mode = os.O_RDONLY # Windows if hasattr(os, 'O_BINARY'): mode |= os.O_BINARY # depends on [control=['if'], data=[]] for filename in filenames: fd = os.open(filename, mode) os.fsync(fd) os.close(fd) touched_directories.add(os.path.dirname(filename)) # depends on [control=['for'], data=['filename']] # Some OSes also require us to fsync the directory where we've # created files or subdirectories. if hasattr(os, 'O_DIRECTORY'): for dirname in touched_directories: fd = os.open(dirname, os.O_RDONLY | os.O_DIRECTORY) os.fsync(fd) os.close(fd) # depends on [control=['for'], data=['dirname']] # depends on [control=['if'], data=[]]
def create_file(self, share_name, directory_name, file_name, content_length, content_settings=None, metadata=None, timeout=None): ''' Creates a new file. See create_file_from_* for high level functions that handle the creation and upload of large files with automatic chunking and progress notifications. :param str share_name: Name of existing share. :param str directory_name: The path to the directory. :param str file_name: Name of file to create or update. :param int content_length: Length of the file in bytes. :param ~azure.storage.file.models.ContentSettings content_settings: ContentSettings object used to set file properties. :param metadata: Name-value pairs associated with the file as metadata. :type metadata: dict(str, str) :param int timeout: The timeout parameter is expressed in seconds. ''' _validate_not_none('share_name', share_name) _validate_not_none('file_name', file_name) _validate_not_none('content_length', content_length) request = HTTPRequest() request.method = 'PUT' request.host_locations = self._get_host_locations() request.path = _get_path(share_name, directory_name, file_name) request.query = {'timeout': _int_to_str(timeout)} request.headers = { 'x-ms-content-length': _to_str(content_length), 'x-ms-type': 'file' } _add_metadata_headers(metadata, request) if content_settings is not None: request.headers.update(content_settings._to_headers()) self._perform_request(request)
def function[create_file, parameter[self, share_name, directory_name, file_name, content_length, content_settings, metadata, timeout]]: constant[ Creates a new file. See create_file_from_* for high level functions that handle the creation and upload of large files with automatic chunking and progress notifications. :param str share_name: Name of existing share. :param str directory_name: The path to the directory. :param str file_name: Name of file to create or update. :param int content_length: Length of the file in bytes. :param ~azure.storage.file.models.ContentSettings content_settings: ContentSettings object used to set file properties. :param metadata: Name-value pairs associated with the file as metadata. :type metadata: dict(str, str) :param int timeout: The timeout parameter is expressed in seconds. ] call[name[_validate_not_none], parameter[constant[share_name], name[share_name]]] call[name[_validate_not_none], parameter[constant[file_name], name[file_name]]] call[name[_validate_not_none], parameter[constant[content_length], name[content_length]]] variable[request] assign[=] call[name[HTTPRequest], parameter[]] name[request].method assign[=] constant[PUT] name[request].host_locations assign[=] call[name[self]._get_host_locations, parameter[]] name[request].path assign[=] call[name[_get_path], parameter[name[share_name], name[directory_name], name[file_name]]] name[request].query assign[=] dictionary[[<ast.Constant object at 0x7da1b1da0910>], [<ast.Call object at 0x7da1b1da3070>]] name[request].headers assign[=] dictionary[[<ast.Constant object at 0x7da1b1da3880>, <ast.Constant object at 0x7da1b1da20b0>], [<ast.Call object at 0x7da1b1da1fc0>, <ast.Constant object at 0x7da1b1da3640>]] call[name[_add_metadata_headers], parameter[name[metadata], name[request]]] if compare[name[content_settings] is_not constant[None]] begin[:] call[name[request].headers.update, parameter[call[name[content_settings]._to_headers, parameter[]]]] call[name[self]._perform_request, parameter[name[request]]]
keyword[def] identifier[create_file] ( identifier[self] , identifier[share_name] , identifier[directory_name] , identifier[file_name] , identifier[content_length] , identifier[content_settings] = keyword[None] , identifier[metadata] = keyword[None] , identifier[timeout] = keyword[None] ): literal[string] identifier[_validate_not_none] ( literal[string] , identifier[share_name] ) identifier[_validate_not_none] ( literal[string] , identifier[file_name] ) identifier[_validate_not_none] ( literal[string] , identifier[content_length] ) identifier[request] = identifier[HTTPRequest] () identifier[request] . identifier[method] = literal[string] identifier[request] . identifier[host_locations] = identifier[self] . identifier[_get_host_locations] () identifier[request] . identifier[path] = identifier[_get_path] ( identifier[share_name] , identifier[directory_name] , identifier[file_name] ) identifier[request] . identifier[query] ={ literal[string] : identifier[_int_to_str] ( identifier[timeout] )} identifier[request] . identifier[headers] ={ literal[string] : identifier[_to_str] ( identifier[content_length] ), literal[string] : literal[string] } identifier[_add_metadata_headers] ( identifier[metadata] , identifier[request] ) keyword[if] identifier[content_settings] keyword[is] keyword[not] keyword[None] : identifier[request] . identifier[headers] . identifier[update] ( identifier[content_settings] . identifier[_to_headers] ()) identifier[self] . identifier[_perform_request] ( identifier[request] )
def create_file(self, share_name, directory_name, file_name, content_length, content_settings=None, metadata=None, timeout=None): """ Creates a new file. See create_file_from_* for high level functions that handle the creation and upload of large files with automatic chunking and progress notifications. :param str share_name: Name of existing share. :param str directory_name: The path to the directory. :param str file_name: Name of file to create or update. :param int content_length: Length of the file in bytes. :param ~azure.storage.file.models.ContentSettings content_settings: ContentSettings object used to set file properties. :param metadata: Name-value pairs associated with the file as metadata. :type metadata: dict(str, str) :param int timeout: The timeout parameter is expressed in seconds. """ _validate_not_none('share_name', share_name) _validate_not_none('file_name', file_name) _validate_not_none('content_length', content_length) request = HTTPRequest() request.method = 'PUT' request.host_locations = self._get_host_locations() request.path = _get_path(share_name, directory_name, file_name) request.query = {'timeout': _int_to_str(timeout)} request.headers = {'x-ms-content-length': _to_str(content_length), 'x-ms-type': 'file'} _add_metadata_headers(metadata, request) if content_settings is not None: request.headers.update(content_settings._to_headers()) # depends on [control=['if'], data=['content_settings']] self._perform_request(request)
def decode_keys(store, encoding='utf-8'): """ If a dictionary has keys that are bytes decode them to a str. Parameters --------- store : dict Dictionary with data Returns --------- result : dict Values are untouched but keys that were bytes are converted to ASCII strings. Example ----------- In [1]: d Out[1]: {1020: 'nah', b'hi': 'stuff'} In [2]: trimesh.util.decode_keys(d) Out[2]: {1020: 'nah', 'hi': 'stuff'} """ keys = store.keys() for key in keys: if hasattr(key, 'decode'): decoded = key.decode(encoding) if key != decoded: store[key.decode(encoding)] = store[key] store.pop(key) return store
def function[decode_keys, parameter[store, encoding]]: constant[ If a dictionary has keys that are bytes decode them to a str. Parameters --------- store : dict Dictionary with data Returns --------- result : dict Values are untouched but keys that were bytes are converted to ASCII strings. Example ----------- In [1]: d Out[1]: {1020: 'nah', b'hi': 'stuff'} In [2]: trimesh.util.decode_keys(d) Out[2]: {1020: 'nah', 'hi': 'stuff'} ] variable[keys] assign[=] call[name[store].keys, parameter[]] for taget[name[key]] in starred[name[keys]] begin[:] if call[name[hasattr], parameter[name[key], constant[decode]]] begin[:] variable[decoded] assign[=] call[name[key].decode, parameter[name[encoding]]] if compare[name[key] not_equal[!=] name[decoded]] begin[:] call[name[store]][call[name[key].decode, parameter[name[encoding]]]] assign[=] call[name[store]][name[key]] call[name[store].pop, parameter[name[key]]] return[name[store]]
keyword[def] identifier[decode_keys] ( identifier[store] , identifier[encoding] = literal[string] ): literal[string] identifier[keys] = identifier[store] . identifier[keys] () keyword[for] identifier[key] keyword[in] identifier[keys] : keyword[if] identifier[hasattr] ( identifier[key] , literal[string] ): identifier[decoded] = identifier[key] . identifier[decode] ( identifier[encoding] ) keyword[if] identifier[key] != identifier[decoded] : identifier[store] [ identifier[key] . identifier[decode] ( identifier[encoding] )]= identifier[store] [ identifier[key] ] identifier[store] . identifier[pop] ( identifier[key] ) keyword[return] identifier[store]
def decode_keys(store, encoding='utf-8'): """ If a dictionary has keys that are bytes decode them to a str. Parameters --------- store : dict Dictionary with data Returns --------- result : dict Values are untouched but keys that were bytes are converted to ASCII strings. Example ----------- In [1]: d Out[1]: {1020: 'nah', b'hi': 'stuff'} In [2]: trimesh.util.decode_keys(d) Out[2]: {1020: 'nah', 'hi': 'stuff'} """ keys = store.keys() for key in keys: if hasattr(key, 'decode'): decoded = key.decode(encoding) if key != decoded: store[key.decode(encoding)] = store[key] store.pop(key) # depends on [control=['if'], data=['key']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] return store
def distance_inches_ping(self): """ Measurement of the distance detected by the sensor, in inches. The sensor will take a single measurement then stop broadcasting. If you use this property too frequently (e.g. every 100msec), the sensor will sometimes lock up and writing to the mode attribute will return an error. A delay of 250msec between each usage seems sufficient to keep the sensor from locking up. """ # This mode is special; setting the mode causes the sensor to send out # a "ping", but the mode isn't actually changed. self.mode = self.MODE_US_SI_IN return self.value(0) * self._scale('US_DIST_IN')
def function[distance_inches_ping, parameter[self]]: constant[ Measurement of the distance detected by the sensor, in inches. The sensor will take a single measurement then stop broadcasting. If you use this property too frequently (e.g. every 100msec), the sensor will sometimes lock up and writing to the mode attribute will return an error. A delay of 250msec between each usage seems sufficient to keep the sensor from locking up. ] name[self].mode assign[=] name[self].MODE_US_SI_IN return[binary_operation[call[name[self].value, parameter[constant[0]]] * call[name[self]._scale, parameter[constant[US_DIST_IN]]]]]
keyword[def] identifier[distance_inches_ping] ( identifier[self] ): literal[string] identifier[self] . identifier[mode] = identifier[self] . identifier[MODE_US_SI_IN] keyword[return] identifier[self] . identifier[value] ( literal[int] )* identifier[self] . identifier[_scale] ( literal[string] )
def distance_inches_ping(self): """ Measurement of the distance detected by the sensor, in inches. The sensor will take a single measurement then stop broadcasting. If you use this property too frequently (e.g. every 100msec), the sensor will sometimes lock up and writing to the mode attribute will return an error. A delay of 250msec between each usage seems sufficient to keep the sensor from locking up. """ # This mode is special; setting the mode causes the sensor to send out # a "ping", but the mode isn't actually changed. self.mode = self.MODE_US_SI_IN return self.value(0) * self._scale('US_DIST_IN')
def url(self): """Returns the public URL for the given key.""" if self.is_public: return '{0}/{1}/{2}'.format( self.bucket._boto_s3.meta.client.meta.endpoint_url, self.bucket.name, self.name ) else: raise ValueError('{0!r} does not have the public-read ACL set. ' 'Use the make_public() method to allow for ' 'public URL sharing.'.format(self.name))
def function[url, parameter[self]]: constant[Returns the public URL for the given key.] if name[self].is_public begin[:] return[call[constant[{0}/{1}/{2}].format, parameter[name[self].bucket._boto_s3.meta.client.meta.endpoint_url, name[self].bucket.name, name[self].name]]]
keyword[def] identifier[url] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[is_public] : keyword[return] literal[string] . identifier[format] ( identifier[self] . identifier[bucket] . identifier[_boto_s3] . identifier[meta] . identifier[client] . identifier[meta] . identifier[endpoint_url] , identifier[self] . identifier[bucket] . identifier[name] , identifier[self] . identifier[name] ) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] literal[string] . identifier[format] ( identifier[self] . identifier[name] ))
def url(self): """Returns the public URL for the given key.""" if self.is_public: return '{0}/{1}/{2}'.format(self.bucket._boto_s3.meta.client.meta.endpoint_url, self.bucket.name, self.name) # depends on [control=['if'], data=[]] else: raise ValueError('{0!r} does not have the public-read ACL set. Use the make_public() method to allow for public URL sharing.'.format(self.name))
def square_batch_region(data, region, bam_files, vrn_files, out_file): """Perform squaring of a batch in a supplied region, with input BAMs """ from bcbio.variation import sentieon, strelka2 if not utils.file_exists(out_file): jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data) if jointcaller in ["%s-joint" % x for x in SUPPORTED["general"]]: _square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file, "square") elif jointcaller in ["%s-merge" % x for x in SUPPORTED["general"]]: _square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file, "merge") elif jointcaller in ["%s-joint" % x for x in SUPPORTED["gatk"]]: gatkjoint.run_region(data, region, vrn_files, out_file) elif jointcaller in ["%s-joint" % x for x in SUPPORTED["gvcf"]]: strelka2.run_gvcfgenotyper(data, region, vrn_files, out_file) elif jointcaller in ["%s-joint" % x for x in SUPPORTED["sentieon"]]: sentieon.run_gvcftyper(vrn_files, out_file, region, data) else: raise ValueError("Unexpected joint calling approach: %s." % jointcaller) if region: data["region"] = region data = _fix_orig_vcf_refs(data) data["vrn_file"] = out_file return [data]
def function[square_batch_region, parameter[data, region, bam_files, vrn_files, out_file]]: constant[Perform squaring of a batch in a supplied region, with input BAMs ] from relative_module[bcbio.variation] import module[sentieon], module[strelka2] if <ast.UnaryOp object at 0x7da1b1833700> begin[:] variable[jointcaller] assign[=] call[name[tz].get_in, parameter[tuple[[<ast.Constant object at 0x7da1b1830610>, <ast.Constant object at 0x7da1b1831570>, <ast.Constant object at 0x7da1b1832800>]], name[data]]] if compare[name[jointcaller] in <ast.ListComp object at 0x7da1b1833f70>] begin[:] call[name[_square_batch_bcbio_variation], parameter[name[data], name[region], name[bam_files], name[vrn_files], name[out_file], constant[square]]] if name[region] begin[:] call[name[data]][constant[region]] assign[=] name[region] variable[data] assign[=] call[name[_fix_orig_vcf_refs], parameter[name[data]]] call[name[data]][constant[vrn_file]] assign[=] name[out_file] return[list[[<ast.Name object at 0x7da1b1833520>]]]
keyword[def] identifier[square_batch_region] ( identifier[data] , identifier[region] , identifier[bam_files] , identifier[vrn_files] , identifier[out_file] ): literal[string] keyword[from] identifier[bcbio] . identifier[variation] keyword[import] identifier[sentieon] , identifier[strelka2] keyword[if] keyword[not] identifier[utils] . identifier[file_exists] ( identifier[out_file] ): identifier[jointcaller] = identifier[tz] . identifier[get_in] (( literal[string] , literal[string] , literal[string] ), identifier[data] ) keyword[if] identifier[jointcaller] keyword[in] [ literal[string] % identifier[x] keyword[for] identifier[x] keyword[in] identifier[SUPPORTED] [ literal[string] ]]: identifier[_square_batch_bcbio_variation] ( identifier[data] , identifier[region] , identifier[bam_files] , identifier[vrn_files] , identifier[out_file] , literal[string] ) keyword[elif] identifier[jointcaller] keyword[in] [ literal[string] % identifier[x] keyword[for] identifier[x] keyword[in] identifier[SUPPORTED] [ literal[string] ]]: identifier[_square_batch_bcbio_variation] ( identifier[data] , identifier[region] , identifier[bam_files] , identifier[vrn_files] , identifier[out_file] , literal[string] ) keyword[elif] identifier[jointcaller] keyword[in] [ literal[string] % identifier[x] keyword[for] identifier[x] keyword[in] identifier[SUPPORTED] [ literal[string] ]]: identifier[gatkjoint] . identifier[run_region] ( identifier[data] , identifier[region] , identifier[vrn_files] , identifier[out_file] ) keyword[elif] identifier[jointcaller] keyword[in] [ literal[string] % identifier[x] keyword[for] identifier[x] keyword[in] identifier[SUPPORTED] [ literal[string] ]]: identifier[strelka2] . identifier[run_gvcfgenotyper] ( identifier[data] , identifier[region] , identifier[vrn_files] , identifier[out_file] ) keyword[elif] identifier[jointcaller] keyword[in] [ literal[string] % identifier[x] keyword[for] identifier[x] keyword[in] identifier[SUPPORTED] [ literal[string] ]]: identifier[sentieon] . identifier[run_gvcftyper] ( identifier[vrn_files] , identifier[out_file] , identifier[region] , identifier[data] ) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[jointcaller] ) keyword[if] identifier[region] : identifier[data] [ literal[string] ]= identifier[region] identifier[data] = identifier[_fix_orig_vcf_refs] ( identifier[data] ) identifier[data] [ literal[string] ]= identifier[out_file] keyword[return] [ identifier[data] ]
def square_batch_region(data, region, bam_files, vrn_files, out_file): """Perform squaring of a batch in a supplied region, with input BAMs """ from bcbio.variation import sentieon, strelka2 if not utils.file_exists(out_file): jointcaller = tz.get_in(('config', 'algorithm', 'jointcaller'), data) if jointcaller in ['%s-joint' % x for x in SUPPORTED['general']]: _square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file, 'square') # depends on [control=['if'], data=[]] elif jointcaller in ['%s-merge' % x for x in SUPPORTED['general']]: _square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file, 'merge') # depends on [control=['if'], data=[]] elif jointcaller in ['%s-joint' % x for x in SUPPORTED['gatk']]: gatkjoint.run_region(data, region, vrn_files, out_file) # depends on [control=['if'], data=[]] elif jointcaller in ['%s-joint' % x for x in SUPPORTED['gvcf']]: strelka2.run_gvcfgenotyper(data, region, vrn_files, out_file) # depends on [control=['if'], data=[]] elif jointcaller in ['%s-joint' % x for x in SUPPORTED['sentieon']]: sentieon.run_gvcftyper(vrn_files, out_file, region, data) # depends on [control=['if'], data=[]] else: raise ValueError('Unexpected joint calling approach: %s.' % jointcaller) # depends on [control=['if'], data=[]] if region: data['region'] = region # depends on [control=['if'], data=[]] data = _fix_orig_vcf_refs(data) data['vrn_file'] = out_file return [data]
def polylinesFromBinImage(img, minimum_cluster_size=6, remove_small_obj_size=3, reconnect_size=3, max_n_contours=None, max_len_contour=None, copy=True): ''' return a list of arrays of un-branching contours img -> (boolean) array optional: --------- minimum_cluster_size -> minimum number of pixels connected together to build a contour ##search_kernel_size -> TODO ##min_search_kernel_moment -> TODO numeric: ------------- max_n_contours -> maximum number of possible contours in img max_len_contour -> maximum contour length ''' assert minimum_cluster_size > 1 assert reconnect_size % 2, 'ksize needs to be odd' # assert search_kernel_size == 0 or search_kernel_size > 2 and search_kernel_size%2, 'kernel size needs to be odd' # assume array size parameters, is not given: if max_n_contours is None: max_n_contours = max(img.shape) if max_len_contour is None: max_len_contour = sum(img.shape[:2]) # array containing coord. of all contours: contours = np.zeros(shape=(max_n_contours, max_len_contour, 2), dtype=np.uint16) # if not search_kernel_size else np.float32) if img.dtype != np.bool: img = img.astype(bool) elif copy: img = img.copy() if remove_small_obj_size: remove_small_objects(img, remove_small_obj_size, connectivity=2, in_place=True) if reconnect_size: # remove gaps maximum_filter(img, reconnect_size, output=img) # reduce contour width to 1 img = skeletonize(img) n_contours = _populateContoursArray(img, contours, minimum_cluster_size) contours = contours[:n_contours] l = [] for c in contours: ind = np.zeros(shape=len(c), dtype=bool) _getValidInd(c, ind) # remove all empty spaces: l.append(c[ind]) return l
def function[polylinesFromBinImage, parameter[img, minimum_cluster_size, remove_small_obj_size, reconnect_size, max_n_contours, max_len_contour, copy]]: constant[ return a list of arrays of un-branching contours img -> (boolean) array optional: --------- minimum_cluster_size -> minimum number of pixels connected together to build a contour ##search_kernel_size -> TODO ##min_search_kernel_moment -> TODO numeric: ------------- max_n_contours -> maximum number of possible contours in img max_len_contour -> maximum contour length ] assert[compare[name[minimum_cluster_size] greater[>] constant[1]]] assert[binary_operation[name[reconnect_size] <ast.Mod object at 0x7da2590d6920> constant[2]]] if compare[name[max_n_contours] is constant[None]] begin[:] variable[max_n_contours] assign[=] call[name[max], parameter[name[img].shape]] if compare[name[max_len_contour] is constant[None]] begin[:] variable[max_len_contour] assign[=] call[name[sum], parameter[call[name[img].shape][<ast.Slice object at 0x7da1b2347760>]]] variable[contours] assign[=] call[name[np].zeros, parameter[]] if compare[name[img].dtype not_equal[!=] name[np].bool] begin[:] variable[img] assign[=] call[name[img].astype, parameter[name[bool]]] if name[remove_small_obj_size] begin[:] call[name[remove_small_objects], parameter[name[img], name[remove_small_obj_size]]] if name[reconnect_size] begin[:] call[name[maximum_filter], parameter[name[img], name[reconnect_size]]] variable[img] assign[=] call[name[skeletonize], parameter[name[img]]] variable[n_contours] assign[=] call[name[_populateContoursArray], parameter[name[img], name[contours], name[minimum_cluster_size]]] variable[contours] assign[=] call[name[contours]][<ast.Slice object at 0x7da18fe93460>] variable[l] assign[=] list[[]] for taget[name[c]] in starred[name[contours]] begin[:] variable[ind] assign[=] call[name[np].zeros, parameter[]] call[name[_getValidInd], parameter[name[c], name[ind]]] call[name[l].append, parameter[call[name[c]][name[ind]]]] return[name[l]]
keyword[def] identifier[polylinesFromBinImage] ( identifier[img] , identifier[minimum_cluster_size] = literal[int] , identifier[remove_small_obj_size] = literal[int] , identifier[reconnect_size] = literal[int] , identifier[max_n_contours] = keyword[None] , identifier[max_len_contour] = keyword[None] , identifier[copy] = keyword[True] ): literal[string] keyword[assert] identifier[minimum_cluster_size] > literal[int] keyword[assert] identifier[reconnect_size] % literal[int] , literal[string] keyword[if] identifier[max_n_contours] keyword[is] keyword[None] : identifier[max_n_contours] = identifier[max] ( identifier[img] . identifier[shape] ) keyword[if] identifier[max_len_contour] keyword[is] keyword[None] : identifier[max_len_contour] = identifier[sum] ( identifier[img] . identifier[shape] [: literal[int] ]) identifier[contours] = identifier[np] . identifier[zeros] ( identifier[shape] =( identifier[max_n_contours] , identifier[max_len_contour] , literal[int] ), identifier[dtype] = identifier[np] . identifier[uint16] ) keyword[if] identifier[img] . identifier[dtype] != identifier[np] . identifier[bool] : identifier[img] = identifier[img] . identifier[astype] ( identifier[bool] ) keyword[elif] identifier[copy] : identifier[img] = identifier[img] . identifier[copy] () keyword[if] identifier[remove_small_obj_size] : identifier[remove_small_objects] ( identifier[img] , identifier[remove_small_obj_size] , identifier[connectivity] = literal[int] , identifier[in_place] = keyword[True] ) keyword[if] identifier[reconnect_size] : identifier[maximum_filter] ( identifier[img] , identifier[reconnect_size] , identifier[output] = identifier[img] ) identifier[img] = identifier[skeletonize] ( identifier[img] ) identifier[n_contours] = identifier[_populateContoursArray] ( identifier[img] , identifier[contours] , identifier[minimum_cluster_size] ) identifier[contours] = identifier[contours] [: identifier[n_contours] ] identifier[l] =[] keyword[for] identifier[c] keyword[in] identifier[contours] : identifier[ind] = identifier[np] . identifier[zeros] ( identifier[shape] = identifier[len] ( identifier[c] ), identifier[dtype] = identifier[bool] ) identifier[_getValidInd] ( identifier[c] , identifier[ind] ) identifier[l] . identifier[append] ( identifier[c] [ identifier[ind] ]) keyword[return] identifier[l]
def polylinesFromBinImage(img, minimum_cluster_size=6, remove_small_obj_size=3, reconnect_size=3, max_n_contours=None, max_len_contour=None, copy=True): """ return a list of arrays of un-branching contours img -> (boolean) array optional: --------- minimum_cluster_size -> minimum number of pixels connected together to build a contour ##search_kernel_size -> TODO ##min_search_kernel_moment -> TODO numeric: ------------- max_n_contours -> maximum number of possible contours in img max_len_contour -> maximum contour length """ assert minimum_cluster_size > 1 assert reconnect_size % 2, 'ksize needs to be odd' # assert search_kernel_size == 0 or search_kernel_size > 2 and search_kernel_size%2, 'kernel size needs to be odd' # assume array size parameters, is not given: if max_n_contours is None: max_n_contours = max(img.shape) # depends on [control=['if'], data=['max_n_contours']] if max_len_contour is None: max_len_contour = sum(img.shape[:2]) # depends on [control=['if'], data=['max_len_contour']] # array containing coord. of all contours: contours = np.zeros(shape=(max_n_contours, max_len_contour, 2), dtype=np.uint16) # if not search_kernel_size else np.float32) if img.dtype != np.bool: img = img.astype(bool) # depends on [control=['if'], data=[]] elif copy: img = img.copy() # depends on [control=['if'], data=[]] if remove_small_obj_size: remove_small_objects(img, remove_small_obj_size, connectivity=2, in_place=True) # depends on [control=['if'], data=[]] if reconnect_size: # remove gaps maximum_filter(img, reconnect_size, output=img) # reduce contour width to 1 img = skeletonize(img) # depends on [control=['if'], data=[]] n_contours = _populateContoursArray(img, contours, minimum_cluster_size) contours = contours[:n_contours] l = [] for c in contours: ind = np.zeros(shape=len(c), dtype=bool) _getValidInd(c, ind) # remove all empty spaces: l.append(c[ind]) # depends on [control=['for'], data=['c']] return l