code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def in_order(self) -> Iterator["BSP"]: """Iterate over this BSP's hierarchy in order. .. versionadded:: 8.3 """ if self.children: yield from self.children[0].in_order() yield self yield from self.children[1].in_order() else: yield self
def function[in_order, parameter[self]]: constant[Iterate over this BSP's hierarchy in order. .. versionadded:: 8.3 ] if name[self].children begin[:] <ast.YieldFrom object at 0x7da18eb55b40> <ast.Yield object at 0x7da18eb55bd0> <ast.YieldFrom object at 0x7da18eb55cf0>
keyword[def] identifier[in_order] ( identifier[self] )-> identifier[Iterator] [ literal[string] ]: literal[string] keyword[if] identifier[self] . identifier[children] : keyword[yield] keyword[from] identifier[self] . identifier[children] [ literal[int] ]. identifier[in_order] () keyword[yield] identifier[self] keyword[yield] keyword[from] identifier[self] . identifier[children] [ literal[int] ]. identifier[in_order] () keyword[else] : keyword[yield] identifier[self]
def in_order(self) -> Iterator['BSP']: """Iterate over this BSP's hierarchy in order. .. versionadded:: 8.3 """ if self.children: yield from self.children[0].in_order() yield self yield from self.children[1].in_order() # depends on [control=['if'], data=[]] else: yield self
def reset(self): """ Empties all internal storage containers """ self.X = [] self.Y = [] self.w = [] self.Xnorm = [] self.graph_rep = None
def function[reset, parameter[self]]: constant[ Empties all internal storage containers ] name[self].X assign[=] list[[]] name[self].Y assign[=] list[[]] name[self].w assign[=] list[[]] name[self].Xnorm assign[=] list[[]] name[self].graph_rep assign[=] constant[None]
keyword[def] identifier[reset] ( identifier[self] ): literal[string] identifier[self] . identifier[X] =[] identifier[self] . identifier[Y] =[] identifier[self] . identifier[w] =[] identifier[self] . identifier[Xnorm] =[] identifier[self] . identifier[graph_rep] = keyword[None]
def reset(self): """ Empties all internal storage containers """ self.X = [] self.Y = [] self.w = [] self.Xnorm = [] self.graph_rep = None
def stored_bind(self, instance): """Bind an instance to this Pangler, using the bound Pangler store. This method functions identically to `bind`, except that it might return a Pangler which was previously bound to the provided instance. """ if self.id is None: return self.bind(instance) store = self._bound_pangler_store.setdefault(instance, {}) p = store.get(self.id) if p is None: p = store[self.id] = self.bind(instance) return p
def function[stored_bind, parameter[self, instance]]: constant[Bind an instance to this Pangler, using the bound Pangler store. This method functions identically to `bind`, except that it might return a Pangler which was previously bound to the provided instance. ] if compare[name[self].id is constant[None]] begin[:] return[call[name[self].bind, parameter[name[instance]]]] variable[store] assign[=] call[name[self]._bound_pangler_store.setdefault, parameter[name[instance], dictionary[[], []]]] variable[p] assign[=] call[name[store].get, parameter[name[self].id]] if compare[name[p] is constant[None]] begin[:] variable[p] assign[=] call[name[self].bind, parameter[name[instance]]] return[name[p]]
keyword[def] identifier[stored_bind] ( identifier[self] , identifier[instance] ): literal[string] keyword[if] identifier[self] . identifier[id] keyword[is] keyword[None] : keyword[return] identifier[self] . identifier[bind] ( identifier[instance] ) identifier[store] = identifier[self] . identifier[_bound_pangler_store] . identifier[setdefault] ( identifier[instance] ,{}) identifier[p] = identifier[store] . identifier[get] ( identifier[self] . identifier[id] ) keyword[if] identifier[p] keyword[is] keyword[None] : identifier[p] = identifier[store] [ identifier[self] . identifier[id] ]= identifier[self] . identifier[bind] ( identifier[instance] ) keyword[return] identifier[p]
def stored_bind(self, instance): """Bind an instance to this Pangler, using the bound Pangler store. This method functions identically to `bind`, except that it might return a Pangler which was previously bound to the provided instance. """ if self.id is None: return self.bind(instance) # depends on [control=['if'], data=[]] store = self._bound_pangler_store.setdefault(instance, {}) p = store.get(self.id) if p is None: p = store[self.id] = self.bind(instance) # depends on [control=['if'], data=['p']] return p
def get_section_data(self, name): """Get the data of the section.""" logging.debug(_('Obtaining PE section: %s'), name) for section in self.binary.sections: if section.Name.rstrip(b'\x00') == name: return section.get_data() return b''
def function[get_section_data, parameter[self, name]]: constant[Get the data of the section.] call[name[logging].debug, parameter[call[name[_], parameter[constant[Obtaining PE section: %s]]], name[name]]] for taget[name[section]] in starred[name[self].binary.sections] begin[:] if compare[call[name[section].Name.rstrip, parameter[constant[b'\x00']]] equal[==] name[name]] begin[:] return[call[name[section].get_data, parameter[]]] return[constant[b'']]
keyword[def] identifier[get_section_data] ( identifier[self] , identifier[name] ): literal[string] identifier[logging] . identifier[debug] ( identifier[_] ( literal[string] ), identifier[name] ) keyword[for] identifier[section] keyword[in] identifier[self] . identifier[binary] . identifier[sections] : keyword[if] identifier[section] . identifier[Name] . identifier[rstrip] ( literal[string] )== identifier[name] : keyword[return] identifier[section] . identifier[get_data] () keyword[return] literal[string]
def get_section_data(self, name): """Get the data of the section.""" logging.debug(_('Obtaining PE section: %s'), name) for section in self.binary.sections: if section.Name.rstrip(b'\x00') == name: return section.get_data() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['section']] return b''
def download_object(self, object_name): """ Download an object. :param str object_name: The object to fetch. """ return self._client.download_object( self._instance, self.name, object_name)
def function[download_object, parameter[self, object_name]]: constant[ Download an object. :param str object_name: The object to fetch. ] return[call[name[self]._client.download_object, parameter[name[self]._instance, name[self].name, name[object_name]]]]
keyword[def] identifier[download_object] ( identifier[self] , identifier[object_name] ): literal[string] keyword[return] identifier[self] . identifier[_client] . identifier[download_object] ( identifier[self] . identifier[_instance] , identifier[self] . identifier[name] , identifier[object_name] )
def download_object(self, object_name): """ Download an object. :param str object_name: The object to fetch. """ return self._client.download_object(self._instance, self.name, object_name)
def remove(self, item): """Remove an item from the list :param item: The item to remove from the list. :raises ValueError: If the item is not present in the list. """ if item not in self: raise ValueError('objectlist.remove(item) failed, item not in list') item_id = int(self._view_path_for(item)) giter = self._iter_for(item) del self[giter] self.emit('item-removed', item, item_id)
def function[remove, parameter[self, item]]: constant[Remove an item from the list :param item: The item to remove from the list. :raises ValueError: If the item is not present in the list. ] if compare[name[item] <ast.NotIn object at 0x7da2590d7190> name[self]] begin[:] <ast.Raise object at 0x7da2047eb850> variable[item_id] assign[=] call[name[int], parameter[call[name[self]._view_path_for, parameter[name[item]]]]] variable[giter] assign[=] call[name[self]._iter_for, parameter[name[item]]] <ast.Delete object at 0x7da2047eb460> call[name[self].emit, parameter[constant[item-removed], name[item], name[item_id]]]
keyword[def] identifier[remove] ( identifier[self] , identifier[item] ): literal[string] keyword[if] identifier[item] keyword[not] keyword[in] identifier[self] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[item_id] = identifier[int] ( identifier[self] . identifier[_view_path_for] ( identifier[item] )) identifier[giter] = identifier[self] . identifier[_iter_for] ( identifier[item] ) keyword[del] identifier[self] [ identifier[giter] ] identifier[self] . identifier[emit] ( literal[string] , identifier[item] , identifier[item_id] )
def remove(self, item): """Remove an item from the list :param item: The item to remove from the list. :raises ValueError: If the item is not present in the list. """ if item not in self: raise ValueError('objectlist.remove(item) failed, item not in list') # depends on [control=['if'], data=[]] item_id = int(self._view_path_for(item)) giter = self._iter_for(item) del self[giter] self.emit('item-removed', item, item_id)
def check_compatibility(self, other, check_edges=False, precision=1E-7): """ Test whether two histograms are considered compatible by the number of dimensions, number of bins along each axis, and optionally the bin edges. Parameters ---------- other : histogram A rootpy histogram check_edges : bool, optional (default=False) If True then also check that the bin edges are equal within the specified precision. precision : float, optional (default=1E-7) The value below which differences between floats are treated as nil when comparing bin edges. Raises ------ TypeError If the histogram dimensionalities do not match ValueError If the histogram sizes, number of bins along an axis, or optionally the bin edges do not match """ if self.GetDimension() != other.GetDimension(): raise TypeError("histogram dimensionalities do not match") if len(self) != len(other): raise ValueError("histogram sizes do not match") for axis in range(self.GetDimension()): if self.nbins(axis=axis) != other.nbins(axis=axis): raise ValueError( "numbers of bins along axis {0:d} do not match".format( axis)) if check_edges: for axis in range(self.GetDimension()): if not all([abs(l - r) < precision for l, r in zip(self._edges(axis), other._edges(axis))]): raise ValueError( "edges do not match along axis {0:d}".format(axis))
def function[check_compatibility, parameter[self, other, check_edges, precision]]: constant[ Test whether two histograms are considered compatible by the number of dimensions, number of bins along each axis, and optionally the bin edges. Parameters ---------- other : histogram A rootpy histogram check_edges : bool, optional (default=False) If True then also check that the bin edges are equal within the specified precision. precision : float, optional (default=1E-7) The value below which differences between floats are treated as nil when comparing bin edges. Raises ------ TypeError If the histogram dimensionalities do not match ValueError If the histogram sizes, number of bins along an axis, or optionally the bin edges do not match ] if compare[call[name[self].GetDimension, parameter[]] not_equal[!=] call[name[other].GetDimension, parameter[]]] begin[:] <ast.Raise object at 0x7da1b11f2680> if compare[call[name[len], parameter[name[self]]] not_equal[!=] call[name[len], parameter[name[other]]]] begin[:] <ast.Raise object at 0x7da1b11f3250> for taget[name[axis]] in starred[call[name[range], parameter[call[name[self].GetDimension, parameter[]]]]] begin[:] if compare[call[name[self].nbins, parameter[]] not_equal[!=] call[name[other].nbins, parameter[]]] begin[:] <ast.Raise object at 0x7da1b11f0c40> if name[check_edges] begin[:] for taget[name[axis]] in starred[call[name[range], parameter[call[name[self].GetDimension, parameter[]]]]] begin[:] if <ast.UnaryOp object at 0x7da1b11f2260> begin[:] <ast.Raise object at 0x7da1b11f31c0>
keyword[def] identifier[check_compatibility] ( identifier[self] , identifier[other] , identifier[check_edges] = keyword[False] , identifier[precision] = literal[int] ): literal[string] keyword[if] identifier[self] . identifier[GetDimension] ()!= identifier[other] . identifier[GetDimension] (): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[if] identifier[len] ( identifier[self] )!= identifier[len] ( identifier[other] ): keyword[raise] identifier[ValueError] ( literal[string] ) keyword[for] identifier[axis] keyword[in] identifier[range] ( identifier[self] . identifier[GetDimension] ()): keyword[if] identifier[self] . identifier[nbins] ( identifier[axis] = identifier[axis] )!= identifier[other] . identifier[nbins] ( identifier[axis] = identifier[axis] ): keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[axis] )) keyword[if] identifier[check_edges] : keyword[for] identifier[axis] keyword[in] identifier[range] ( identifier[self] . identifier[GetDimension] ()): keyword[if] keyword[not] identifier[all] ([ identifier[abs] ( identifier[l] - identifier[r] )< identifier[precision] keyword[for] identifier[l] , identifier[r] keyword[in] identifier[zip] ( identifier[self] . identifier[_edges] ( identifier[axis] ), identifier[other] . identifier[_edges] ( identifier[axis] ))]): keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[axis] ))
def check_compatibility(self, other, check_edges=False, precision=1e-07): """ Test whether two histograms are considered compatible by the number of dimensions, number of bins along each axis, and optionally the bin edges. Parameters ---------- other : histogram A rootpy histogram check_edges : bool, optional (default=False) If True then also check that the bin edges are equal within the specified precision. precision : float, optional (default=1E-7) The value below which differences between floats are treated as nil when comparing bin edges. Raises ------ TypeError If the histogram dimensionalities do not match ValueError If the histogram sizes, number of bins along an axis, or optionally the bin edges do not match """ if self.GetDimension() != other.GetDimension(): raise TypeError('histogram dimensionalities do not match') # depends on [control=['if'], data=[]] if len(self) != len(other): raise ValueError('histogram sizes do not match') # depends on [control=['if'], data=[]] for axis in range(self.GetDimension()): if self.nbins(axis=axis) != other.nbins(axis=axis): raise ValueError('numbers of bins along axis {0:d} do not match'.format(axis)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['axis']] if check_edges: for axis in range(self.GetDimension()): if not all([abs(l - r) < precision for (l, r) in zip(self._edges(axis), other._edges(axis))]): raise ValueError('edges do not match along axis {0:d}'.format(axis)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['axis']] # depends on [control=['if'], data=[]]
def differing_constants(block_a, block_b): """ Compares two basic blocks and finds all the constants that differ from the first block to the second. :param block_a: The first block to compare. :param block_b: The second block to compare. :returns: Returns a list of differing constants in the form of ConstantChange, which has the offset in the block and the respective constants. """ statements_a = [s for s in block_a.vex.statements if s.tag != "Ist_IMark"] + [block_a.vex.next] statements_b = [s for s in block_b.vex.statements if s.tag != "Ist_IMark"] + [block_b.vex.next] if len(statements_a) != len(statements_b): raise UnmatchedStatementsException("Blocks have different numbers of statements") start_1 = min(block_a.instruction_addrs) start_2 = min(block_b.instruction_addrs) changes = [] # check statements current_offset = None for statement, statement_2 in zip(statements_a, statements_b): # sanity check if statement.tag != statement_2.tag: raise UnmatchedStatementsException("Statement tag has changed") if statement.tag == "Ist_IMark": if statement.addr - start_1 != statement_2.addr - start_2: raise UnmatchedStatementsException("Instruction length has changed") current_offset = statement.addr - start_1 continue differences = compare_statement_dict(statement, statement_2) for d in differences: if d.type != DIFF_VALUE: raise UnmatchedStatementsException("Instruction has changed") else: changes.append(ConstantChange(current_offset, d.value_a, d.value_b)) return changes
def function[differing_constants, parameter[block_a, block_b]]: constant[ Compares two basic blocks and finds all the constants that differ from the first block to the second. :param block_a: The first block to compare. :param block_b: The second block to compare. :returns: Returns a list of differing constants in the form of ConstantChange, which has the offset in the block and the respective constants. ] variable[statements_a] assign[=] binary_operation[<ast.ListComp object at 0x7da1b1c33c40> + list[[<ast.Attribute object at 0x7da1b1c30dc0>]]] variable[statements_b] assign[=] binary_operation[<ast.ListComp object at 0x7da1b1c31390> + list[[<ast.Attribute object at 0x7da1b1c30850>]]] if compare[call[name[len], parameter[name[statements_a]]] not_equal[!=] call[name[len], parameter[name[statements_b]]]] begin[:] <ast.Raise object at 0x7da1b1c32740> variable[start_1] assign[=] call[name[min], parameter[name[block_a].instruction_addrs]] variable[start_2] assign[=] call[name[min], parameter[name[block_b].instruction_addrs]] variable[changes] assign[=] list[[]] variable[current_offset] assign[=] constant[None] for taget[tuple[[<ast.Name object at 0x7da1b1c30100>, <ast.Name object at 0x7da1b1c32350>]]] in starred[call[name[zip], parameter[name[statements_a], name[statements_b]]]] begin[:] if compare[name[statement].tag not_equal[!=] name[statement_2].tag] begin[:] <ast.Raise object at 0x7da1b1c31930> if compare[name[statement].tag equal[==] constant[Ist_IMark]] begin[:] if compare[binary_operation[name[statement].addr - name[start_1]] not_equal[!=] binary_operation[name[statement_2].addr - name[start_2]]] begin[:] <ast.Raise object at 0x7da18ede7010> variable[current_offset] assign[=] binary_operation[name[statement].addr - name[start_1]] continue variable[differences] assign[=] call[name[compare_statement_dict], parameter[name[statement], name[statement_2]]] for taget[name[d]] in starred[name[differences]] begin[:] if compare[name[d].type not_equal[!=] name[DIFF_VALUE]] begin[:] <ast.Raise object at 0x7da18ede7eb0> return[name[changes]]
keyword[def] identifier[differing_constants] ( identifier[block_a] , identifier[block_b] ): literal[string] identifier[statements_a] =[ identifier[s] keyword[for] identifier[s] keyword[in] identifier[block_a] . identifier[vex] . identifier[statements] keyword[if] identifier[s] . identifier[tag] != literal[string] ]+[ identifier[block_a] . identifier[vex] . identifier[next] ] identifier[statements_b] =[ identifier[s] keyword[for] identifier[s] keyword[in] identifier[block_b] . identifier[vex] . identifier[statements] keyword[if] identifier[s] . identifier[tag] != literal[string] ]+[ identifier[block_b] . identifier[vex] . identifier[next] ] keyword[if] identifier[len] ( identifier[statements_a] )!= identifier[len] ( identifier[statements_b] ): keyword[raise] identifier[UnmatchedStatementsException] ( literal[string] ) identifier[start_1] = identifier[min] ( identifier[block_a] . identifier[instruction_addrs] ) identifier[start_2] = identifier[min] ( identifier[block_b] . identifier[instruction_addrs] ) identifier[changes] =[] identifier[current_offset] = keyword[None] keyword[for] identifier[statement] , identifier[statement_2] keyword[in] identifier[zip] ( identifier[statements_a] , identifier[statements_b] ): keyword[if] identifier[statement] . identifier[tag] != identifier[statement_2] . identifier[tag] : keyword[raise] identifier[UnmatchedStatementsException] ( literal[string] ) keyword[if] identifier[statement] . identifier[tag] == literal[string] : keyword[if] identifier[statement] . identifier[addr] - identifier[start_1] != identifier[statement_2] . identifier[addr] - identifier[start_2] : keyword[raise] identifier[UnmatchedStatementsException] ( literal[string] ) identifier[current_offset] = identifier[statement] . identifier[addr] - identifier[start_1] keyword[continue] identifier[differences] = identifier[compare_statement_dict] ( identifier[statement] , identifier[statement_2] ) keyword[for] identifier[d] keyword[in] identifier[differences] : keyword[if] identifier[d] . identifier[type] != identifier[DIFF_VALUE] : keyword[raise] identifier[UnmatchedStatementsException] ( literal[string] ) keyword[else] : identifier[changes] . identifier[append] ( identifier[ConstantChange] ( identifier[current_offset] , identifier[d] . identifier[value_a] , identifier[d] . identifier[value_b] )) keyword[return] identifier[changes]
def differing_constants(block_a, block_b): """ Compares two basic blocks and finds all the constants that differ from the first block to the second. :param block_a: The first block to compare. :param block_b: The second block to compare. :returns: Returns a list of differing constants in the form of ConstantChange, which has the offset in the block and the respective constants. """ statements_a = [s for s in block_a.vex.statements if s.tag != 'Ist_IMark'] + [block_a.vex.next] statements_b = [s for s in block_b.vex.statements if s.tag != 'Ist_IMark'] + [block_b.vex.next] if len(statements_a) != len(statements_b): raise UnmatchedStatementsException('Blocks have different numbers of statements') # depends on [control=['if'], data=[]] start_1 = min(block_a.instruction_addrs) start_2 = min(block_b.instruction_addrs) changes = [] # check statements current_offset = None for (statement, statement_2) in zip(statements_a, statements_b): # sanity check if statement.tag != statement_2.tag: raise UnmatchedStatementsException('Statement tag has changed') # depends on [control=['if'], data=[]] if statement.tag == 'Ist_IMark': if statement.addr - start_1 != statement_2.addr - start_2: raise UnmatchedStatementsException('Instruction length has changed') # depends on [control=['if'], data=[]] current_offset = statement.addr - start_1 continue # depends on [control=['if'], data=[]] differences = compare_statement_dict(statement, statement_2) for d in differences: if d.type != DIFF_VALUE: raise UnmatchedStatementsException('Instruction has changed') # depends on [control=['if'], data=[]] else: changes.append(ConstantChange(current_offset, d.value_a, d.value_b)) # depends on [control=['for'], data=['d']] # depends on [control=['for'], data=[]] return changes
def create_delete_model(record): """Create a vpc model from a record.""" data = cloudwatch.get_historical_base_info(record) vpc_id = cloudwatch.filter_request_parameters('vpcId', record) arn = get_arn(vpc_id, cloudwatch.get_region(record), record['account']) LOG.debug(F'[-] Deleting Dynamodb Records. Hash Key: {arn}') # tombstone these records so that the deletion event time can be accurately tracked. data.update({ 'configuration': {} }) items = list(CurrentVPCModel.query(arn, limit=1)) if items: model_dict = items[0].__dict__['attribute_values'].copy() model_dict.update(data) model = CurrentVPCModel(**model_dict) model.save() return model return None
def function[create_delete_model, parameter[record]]: constant[Create a vpc model from a record.] variable[data] assign[=] call[name[cloudwatch].get_historical_base_info, parameter[name[record]]] variable[vpc_id] assign[=] call[name[cloudwatch].filter_request_parameters, parameter[constant[vpcId], name[record]]] variable[arn] assign[=] call[name[get_arn], parameter[name[vpc_id], call[name[cloudwatch].get_region, parameter[name[record]]], call[name[record]][constant[account]]]] call[name[LOG].debug, parameter[<ast.JoinedStr object at 0x7da1b12954b0>]] call[name[data].update, parameter[dictionary[[<ast.Constant object at 0x7da1b1295840>], [<ast.Dict object at 0x7da1b1295720>]]]] variable[items] assign[=] call[name[list], parameter[call[name[CurrentVPCModel].query, parameter[name[arn]]]]] if name[items] begin[:] variable[model_dict] assign[=] call[call[call[name[items]][constant[0]].__dict__][constant[attribute_values]].copy, parameter[]] call[name[model_dict].update, parameter[name[data]]] variable[model] assign[=] call[name[CurrentVPCModel], parameter[]] call[name[model].save, parameter[]] return[name[model]] return[constant[None]]
keyword[def] identifier[create_delete_model] ( identifier[record] ): literal[string] identifier[data] = identifier[cloudwatch] . identifier[get_historical_base_info] ( identifier[record] ) identifier[vpc_id] = identifier[cloudwatch] . identifier[filter_request_parameters] ( literal[string] , identifier[record] ) identifier[arn] = identifier[get_arn] ( identifier[vpc_id] , identifier[cloudwatch] . identifier[get_region] ( identifier[record] ), identifier[record] [ literal[string] ]) identifier[LOG] . identifier[debug] ( literal[string] ) identifier[data] . identifier[update] ({ literal[string] :{} }) identifier[items] = identifier[list] ( identifier[CurrentVPCModel] . identifier[query] ( identifier[arn] , identifier[limit] = literal[int] )) keyword[if] identifier[items] : identifier[model_dict] = identifier[items] [ literal[int] ]. identifier[__dict__] [ literal[string] ]. identifier[copy] () identifier[model_dict] . identifier[update] ( identifier[data] ) identifier[model] = identifier[CurrentVPCModel] (** identifier[model_dict] ) identifier[model] . identifier[save] () keyword[return] identifier[model] keyword[return] keyword[None]
def create_delete_model(record): """Create a vpc model from a record.""" data = cloudwatch.get_historical_base_info(record) vpc_id = cloudwatch.filter_request_parameters('vpcId', record) arn = get_arn(vpc_id, cloudwatch.get_region(record), record['account']) LOG.debug(f'[-] Deleting Dynamodb Records. Hash Key: {arn}') # tombstone these records so that the deletion event time can be accurately tracked. data.update({'configuration': {}}) items = list(CurrentVPCModel.query(arn, limit=1)) if items: model_dict = items[0].__dict__['attribute_values'].copy() model_dict.update(data) model = CurrentVPCModel(**model_dict) model.save() return model # depends on [control=['if'], data=[]] return None
def get_types(json_type: StrOrList) -> typing.Tuple[str, str]: """Returns the json and native python type based on the json_type input. If json_type is a list of types it will return the first non 'null' value. :param json_type: A json type or a list of json types. :returns: A tuple containing the json type and native python type. """ # If the type is a list, use the first non 'null' value as the type. if isinstance(json_type, list): for j_type in json_type: if j_type != 'null': json_type = j_type break return (json_type, JSON_TYPES_TO_NATIVE[json_type])
def function[get_types, parameter[json_type]]: constant[Returns the json and native python type based on the json_type input. If json_type is a list of types it will return the first non 'null' value. :param json_type: A json type or a list of json types. :returns: A tuple containing the json type and native python type. ] if call[name[isinstance], parameter[name[json_type], name[list]]] begin[:] for taget[name[j_type]] in starred[name[json_type]] begin[:] if compare[name[j_type] not_equal[!=] constant[null]] begin[:] variable[json_type] assign[=] name[j_type] break return[tuple[[<ast.Name object at 0x7da20e9559f0>, <ast.Subscript object at 0x7da20e955420>]]]
keyword[def] identifier[get_types] ( identifier[json_type] : identifier[StrOrList] )-> identifier[typing] . identifier[Tuple] [ identifier[str] , identifier[str] ]: literal[string] keyword[if] identifier[isinstance] ( identifier[json_type] , identifier[list] ): keyword[for] identifier[j_type] keyword[in] identifier[json_type] : keyword[if] identifier[j_type] != literal[string] : identifier[json_type] = identifier[j_type] keyword[break] keyword[return] ( identifier[json_type] , identifier[JSON_TYPES_TO_NATIVE] [ identifier[json_type] ])
def get_types(json_type: StrOrList) -> typing.Tuple[str, str]: """Returns the json and native python type based on the json_type input. If json_type is a list of types it will return the first non 'null' value. :param json_type: A json type or a list of json types. :returns: A tuple containing the json type and native python type. """ # If the type is a list, use the first non 'null' value as the type. if isinstance(json_type, list): for j_type in json_type: if j_type != 'null': json_type = j_type break # depends on [control=['if'], data=['j_type']] # depends on [control=['for'], data=['j_type']] # depends on [control=['if'], data=[]] return (json_type, JSON_TYPES_TO_NATIVE[json_type])
def sample(self, n_samples, divs=1, visual=False, safe=False): """ Sample Sampling Inputs : n_samples : number of samples to generate Optional Inputs : divs : (1) number of divisions visual : show progress safe : save the chain at every division """ if visual: print("Sampling: 0%") for i in xrange(divs): self._sample(int(n_samples/divs)) if visual: sys.stdout.write("\033[F") # curser up print("Sampling: "+str(int(i*100./divs)+1)+'%') if safe: self.save(path="chain_{:}.dat".format(i)) if n_samples % divs != 0: self._sample(n_samples % divs) if safe: self.save(path="chain_{:}.dat".format(divs))
def function[sample, parameter[self, n_samples, divs, visual, safe]]: constant[ Sample Sampling Inputs : n_samples : number of samples to generate Optional Inputs : divs : (1) number of divisions visual : show progress safe : save the chain at every division ] if name[visual] begin[:] call[name[print], parameter[constant[Sampling: 0%]]] for taget[name[i]] in starred[call[name[xrange], parameter[name[divs]]]] begin[:] call[name[self]._sample, parameter[call[name[int], parameter[binary_operation[name[n_samples] / name[divs]]]]]] if name[visual] begin[:] call[name[sys].stdout.write, parameter[constant[]]] call[name[print], parameter[binary_operation[binary_operation[constant[Sampling: ] + call[name[str], parameter[binary_operation[call[name[int], parameter[binary_operation[binary_operation[name[i] * constant[100.0]] / name[divs]]]] + constant[1]]]]] + constant[%]]]] if name[safe] begin[:] call[name[self].save, parameter[]] if compare[binary_operation[name[n_samples] <ast.Mod object at 0x7da2590d6920> name[divs]] not_equal[!=] constant[0]] begin[:] call[name[self]._sample, parameter[binary_operation[name[n_samples] <ast.Mod object at 0x7da2590d6920> name[divs]]]] if name[safe] begin[:] call[name[self].save, parameter[]]
keyword[def] identifier[sample] ( identifier[self] , identifier[n_samples] , identifier[divs] = literal[int] , identifier[visual] = keyword[False] , identifier[safe] = keyword[False] ): literal[string] keyword[if] identifier[visual] : identifier[print] ( literal[string] ) keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[divs] ): identifier[self] . identifier[_sample] ( identifier[int] ( identifier[n_samples] / identifier[divs] )) keyword[if] identifier[visual] : identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] ) identifier[print] ( literal[string] + identifier[str] ( identifier[int] ( identifier[i] * literal[int] / identifier[divs] )+ literal[int] )+ literal[string] ) keyword[if] identifier[safe] : identifier[self] . identifier[save] ( identifier[path] = literal[string] . identifier[format] ( identifier[i] )) keyword[if] identifier[n_samples] % identifier[divs] != literal[int] : identifier[self] . identifier[_sample] ( identifier[n_samples] % identifier[divs] ) keyword[if] identifier[safe] : identifier[self] . identifier[save] ( identifier[path] = literal[string] . identifier[format] ( identifier[divs] ))
def sample(self, n_samples, divs=1, visual=False, safe=False): """ Sample Sampling Inputs : n_samples : number of samples to generate Optional Inputs : divs : (1) number of divisions visual : show progress safe : save the chain at every division """ if visual: print('Sampling: 0%') # depends on [control=['if'], data=[]] for i in xrange(divs): self._sample(int(n_samples / divs)) if visual: sys.stdout.write('\x1b[F') # curser up print('Sampling: ' + str(int(i * 100.0 / divs) + 1) + '%') # depends on [control=['if'], data=[]] if safe: self.save(path='chain_{:}.dat'.format(i)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] if n_samples % divs != 0: self._sample(n_samples % divs) if safe: self.save(path='chain_{:}.dat'.format(divs)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def check_against_chunks(self, chunks): # type: (Iterator[bytes]) -> None """Check good hashes against ones built from iterable of chunks of data. Raise HashMismatch if none match. """ gots = {} for hash_name in iterkeys(self._allowed): try: gots[hash_name] = hashlib.new(hash_name) except (ValueError, TypeError): raise InstallationError('Unknown hash name: %s' % hash_name) for chunk in chunks: for hash in itervalues(gots): hash.update(chunk) for hash_name, got in iteritems(gots): if got.hexdigest() in self._allowed[hash_name]: return self._raise(gots)
def function[check_against_chunks, parameter[self, chunks]]: constant[Check good hashes against ones built from iterable of chunks of data. Raise HashMismatch if none match. ] variable[gots] assign[=] dictionary[[], []] for taget[name[hash_name]] in starred[call[name[iterkeys], parameter[name[self]._allowed]]] begin[:] <ast.Try object at 0x7da18bcca560> for taget[name[chunk]] in starred[name[chunks]] begin[:] for taget[name[hash]] in starred[call[name[itervalues], parameter[name[gots]]]] begin[:] call[name[hash].update, parameter[name[chunk]]] for taget[tuple[[<ast.Name object at 0x7da18bccb5b0>, <ast.Name object at 0x7da18bccacb0>]]] in starred[call[name[iteritems], parameter[name[gots]]]] begin[:] if compare[call[name[got].hexdigest, parameter[]] in call[name[self]._allowed][name[hash_name]]] begin[:] return[None] call[name[self]._raise, parameter[name[gots]]]
keyword[def] identifier[check_against_chunks] ( identifier[self] , identifier[chunks] ): literal[string] identifier[gots] ={} keyword[for] identifier[hash_name] keyword[in] identifier[iterkeys] ( identifier[self] . identifier[_allowed] ): keyword[try] : identifier[gots] [ identifier[hash_name] ]= identifier[hashlib] . identifier[new] ( identifier[hash_name] ) keyword[except] ( identifier[ValueError] , identifier[TypeError] ): keyword[raise] identifier[InstallationError] ( literal[string] % identifier[hash_name] ) keyword[for] identifier[chunk] keyword[in] identifier[chunks] : keyword[for] identifier[hash] keyword[in] identifier[itervalues] ( identifier[gots] ): identifier[hash] . identifier[update] ( identifier[chunk] ) keyword[for] identifier[hash_name] , identifier[got] keyword[in] identifier[iteritems] ( identifier[gots] ): keyword[if] identifier[got] . identifier[hexdigest] () keyword[in] identifier[self] . identifier[_allowed] [ identifier[hash_name] ]: keyword[return] identifier[self] . identifier[_raise] ( identifier[gots] )
def check_against_chunks(self, chunks): # type: (Iterator[bytes]) -> None 'Check good hashes against ones built from iterable of chunks of\n data.\n\n Raise HashMismatch if none match.\n\n ' gots = {} for hash_name in iterkeys(self._allowed): try: gots[hash_name] = hashlib.new(hash_name) # depends on [control=['try'], data=[]] except (ValueError, TypeError): raise InstallationError('Unknown hash name: %s' % hash_name) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['hash_name']] for chunk in chunks: for hash in itervalues(gots): hash.update(chunk) # depends on [control=['for'], data=['hash']] # depends on [control=['for'], data=['chunk']] for (hash_name, got) in iteritems(gots): if got.hexdigest() in self._allowed[hash_name]: return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] self._raise(gots)
def findFlippedSNPs(goldFrqFile1, sourceAlleles, outPrefix): """Find flipped SNPs and flip them in the data1.""" goldAlleles = {} with open(goldFrqFile1, "r") as inputFile: headerIndex = None for i, line in enumerate(inputFile): row = createRowFromPlinkSpacedOutput(line) if i == 0: # This is the header headerIndex = dict([ (row[j], j) for j in xrange(len(row)) ]) # Checking the columns for columnName in ["SNP", "A1", "A2"]: if columnName not in headerIndex: msg = "%(fileName)s: no column named " \ "%(columnName)s" % locals() raise ProgramError(msg) else: snpName = row[headerIndex["SNP"]] allele1 = row[headerIndex["A1"]] allele2 = row[headerIndex["A2"]] alleles = set([allele1, allele2]) if "0" in alleles: alleles.remove("0") goldAlleles[snpName] = alleles # Finding the SNPs to flip toFlipOutputFile = None try: toFlipOutputFile = open(outPrefix + ".snp_to_flip_in_reference", "w") except IOError: msg = "%(outPrefix)s.snp_to_flip_in_reference: can't write " \ "file" % locals() raise ProgramError(msg) toRemoveOutputFile = None try: toRemoveOutputFile = open(outPrefix + ".snp_to_remove", "w") except IOError: msg = "%(outPrefix)s.snp_to_remove: can't write file" % locals() raise ProgramError(msg) toRemoveOutputFileExplanation = None try: toRemoveOutputFileExplanation = open( outPrefix + ".snp_to_remove.explanation", "w", ) print >>toRemoveOutputFileExplanation, "\t".join(["Name", "Reason", "Alleles 1", "Alleles 2"]) except IOError: msg = "%(outPrefix)s.snp_to_remove: can't write file" % locals() raise ProgramError(msg) for snpName in goldAlleles.iterkeys(): alleles1 = goldAlleles[snpName] alleles2 = sourceAlleles[snpName] if (len(alleles1) == 2) and (len(alleles2) == 2): # Both are heterozygous if (({"A", "T"} == alleles1 and {"A", "T"} == alleles2) or ({"C", "G"} == alleles1 and {"C", "G"} == alleles2)): # We can't flip those..., so we remove them print >>toRemoveOutputFile, snpName print >>toRemoveOutputFileExplanation, "\t".join([ snpName, "Undetermined", "".join(alleles1), "".join(alleles2), ]) else: if alleles1 != alleles2: # Let's try the flip one if flipGenotype(alleles1) == alleles2: # We need to flip it print >>toFlipOutputFile, snpName else: # Those SNP are discordant... print >>toRemoveOutputFile, snpName print >>toRemoveOutputFileExplanation, "\t".join([ snpName, "Invalid", "".join(alleles1), "".join(alleles2), ]) else: # We want to remove this SNP, because there is at least one # homozygous individual print >>toRemoveOutputFile, snpName tmp_allele1 = "".join(alleles1) if len(alleles1) == 1: tmp_allele1 += tmp_allele1 tmp_allele2 = "".join(alleles1) if len(alleles1) == 1: tmp_allele2 += tmp_allele2 print >>toRemoveOutputFileExplanation, "\t".join([snpName, "Homozygous", tmp_allele1, tmp_allele2]) # Closing output files toFlipOutputFile.close() toRemoveOutputFile.close() toRemoveOutputFileExplanation.close()
def function[findFlippedSNPs, parameter[goldFrqFile1, sourceAlleles, outPrefix]]: constant[Find flipped SNPs and flip them in the data1.] variable[goldAlleles] assign[=] dictionary[[], []] with call[name[open], parameter[name[goldFrqFile1], constant[r]]] begin[:] variable[headerIndex] assign[=] constant[None] for taget[tuple[[<ast.Name object at 0x7da1b0a4f9a0>, <ast.Name object at 0x7da1b0a4f9d0>]]] in starred[call[name[enumerate], parameter[name[inputFile]]]] begin[:] variable[row] assign[=] call[name[createRowFromPlinkSpacedOutput], parameter[name[line]]] if compare[name[i] equal[==] constant[0]] begin[:] variable[headerIndex] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da1b0a4c0d0>]] for taget[name[columnName]] in starred[list[[<ast.Constant object at 0x7da1b0a4c610>, <ast.Constant object at 0x7da1b0a4c640>, <ast.Constant object at 0x7da1b0a4c490>]]] begin[:] if compare[name[columnName] <ast.NotIn object at 0x7da2590d7190> name[headerIndex]] begin[:] variable[msg] assign[=] binary_operation[constant[%(fileName)s: no column named %(columnName)s] <ast.Mod object at 0x7da2590d6920> call[name[locals], parameter[]]] <ast.Raise object at 0x7da1b0a4c700> variable[toFlipOutputFile] assign[=] constant[None] <ast.Try object at 0x7da1b0a4d270> variable[toRemoveOutputFile] assign[=] constant[None] <ast.Try object at 0x7da1b0a4d990> variable[toRemoveOutputFileExplanation] assign[=] constant[None] <ast.Try object at 0x7da1b0a4d630> for taget[name[snpName]] in starred[call[name[goldAlleles].iterkeys, parameter[]]] begin[:] variable[alleles1] assign[=] call[name[goldAlleles]][name[snpName]] variable[alleles2] assign[=] call[name[sourceAlleles]][name[snpName]] if <ast.BoolOp object at 0x7da1b0a4efb0> begin[:] if <ast.BoolOp object at 0x7da1b0a4e530> begin[:] tuple[[<ast.BinOp object at 0x7da1b0a4ea70>, <ast.Name object at 0x7da1b0a4eb00>]] tuple[[<ast.BinOp object at 0x7da1b0a4ece0>, <ast.Call object at 0x7da1b0a4ed40>]] call[name[toFlipOutputFile].close, parameter[]] call[name[toRemoveOutputFile].close, parameter[]] call[name[toRemoveOutputFileExplanation].close, parameter[]]
keyword[def] identifier[findFlippedSNPs] ( identifier[goldFrqFile1] , identifier[sourceAlleles] , identifier[outPrefix] ): literal[string] identifier[goldAlleles] ={} keyword[with] identifier[open] ( identifier[goldFrqFile1] , literal[string] ) keyword[as] identifier[inputFile] : identifier[headerIndex] = keyword[None] keyword[for] identifier[i] , identifier[line] keyword[in] identifier[enumerate] ( identifier[inputFile] ): identifier[row] = identifier[createRowFromPlinkSpacedOutput] ( identifier[line] ) keyword[if] identifier[i] == literal[int] : identifier[headerIndex] = identifier[dict] ([ ( identifier[row] [ identifier[j] ], identifier[j] ) keyword[for] identifier[j] keyword[in] identifier[xrange] ( identifier[len] ( identifier[row] )) ]) keyword[for] identifier[columnName] keyword[in] [ literal[string] , literal[string] , literal[string] ]: keyword[if] identifier[columnName] keyword[not] keyword[in] identifier[headerIndex] : identifier[msg] = literal[string] literal[string] % identifier[locals] () keyword[raise] identifier[ProgramError] ( identifier[msg] ) keyword[else] : identifier[snpName] = identifier[row] [ identifier[headerIndex] [ literal[string] ]] identifier[allele1] = identifier[row] [ identifier[headerIndex] [ literal[string] ]] identifier[allele2] = identifier[row] [ identifier[headerIndex] [ literal[string] ]] identifier[alleles] = identifier[set] ([ identifier[allele1] , identifier[allele2] ]) keyword[if] literal[string] keyword[in] identifier[alleles] : identifier[alleles] . identifier[remove] ( literal[string] ) identifier[goldAlleles] [ identifier[snpName] ]= identifier[alleles] identifier[toFlipOutputFile] = keyword[None] keyword[try] : identifier[toFlipOutputFile] = identifier[open] ( identifier[outPrefix] + literal[string] , literal[string] ) keyword[except] identifier[IOError] : identifier[msg] = literal[string] literal[string] % identifier[locals] () keyword[raise] identifier[ProgramError] ( identifier[msg] ) identifier[toRemoveOutputFile] = keyword[None] keyword[try] : identifier[toRemoveOutputFile] = identifier[open] ( identifier[outPrefix] + literal[string] , literal[string] ) keyword[except] identifier[IOError] : identifier[msg] = literal[string] % identifier[locals] () keyword[raise] identifier[ProgramError] ( identifier[msg] ) identifier[toRemoveOutputFileExplanation] = keyword[None] keyword[try] : identifier[toRemoveOutputFileExplanation] = identifier[open] ( identifier[outPrefix] + literal[string] , literal[string] , ) identifier[print] >> identifier[toRemoveOutputFileExplanation] , literal[string] . identifier[join] ([ literal[string] , literal[string] , literal[string] , literal[string] ]) keyword[except] identifier[IOError] : identifier[msg] = literal[string] % identifier[locals] () keyword[raise] identifier[ProgramError] ( identifier[msg] ) keyword[for] identifier[snpName] keyword[in] identifier[goldAlleles] . identifier[iterkeys] (): identifier[alleles1] = identifier[goldAlleles] [ identifier[snpName] ] identifier[alleles2] = identifier[sourceAlleles] [ identifier[snpName] ] keyword[if] ( identifier[len] ( identifier[alleles1] )== literal[int] ) keyword[and] ( identifier[len] ( identifier[alleles2] )== literal[int] ): keyword[if] (({ literal[string] , literal[string] }== identifier[alleles1] keyword[and] { literal[string] , literal[string] }== identifier[alleles2] ) keyword[or] ({ literal[string] , literal[string] }== identifier[alleles1] keyword[and] { literal[string] , literal[string] }== identifier[alleles2] )): identifier[print] >> identifier[toRemoveOutputFile] , identifier[snpName] identifier[print] >> identifier[toRemoveOutputFileExplanation] , literal[string] . identifier[join] ([ identifier[snpName] , literal[string] , literal[string] . identifier[join] ( identifier[alleles1] ), literal[string] . identifier[join] ( identifier[alleles2] ), ]) keyword[else] : keyword[if] identifier[alleles1] != identifier[alleles2] : keyword[if] identifier[flipGenotype] ( identifier[alleles1] )== identifier[alleles2] : identifier[print] >> identifier[toFlipOutputFile] , identifier[snpName] keyword[else] : identifier[print] >> identifier[toRemoveOutputFile] , identifier[snpName] identifier[print] >> identifier[toRemoveOutputFileExplanation] , literal[string] . identifier[join] ([ identifier[snpName] , literal[string] , literal[string] . identifier[join] ( identifier[alleles1] ), literal[string] . identifier[join] ( identifier[alleles2] ), ]) keyword[else] : identifier[print] >> identifier[toRemoveOutputFile] , identifier[snpName] identifier[tmp_allele1] = literal[string] . identifier[join] ( identifier[alleles1] ) keyword[if] identifier[len] ( identifier[alleles1] )== literal[int] : identifier[tmp_allele1] += identifier[tmp_allele1] identifier[tmp_allele2] = literal[string] . identifier[join] ( identifier[alleles1] ) keyword[if] identifier[len] ( identifier[alleles1] )== literal[int] : identifier[tmp_allele2] += identifier[tmp_allele2] identifier[print] >> identifier[toRemoveOutputFileExplanation] , literal[string] . identifier[join] ([ identifier[snpName] , literal[string] , identifier[tmp_allele1] , identifier[tmp_allele2] ]) identifier[toFlipOutputFile] . identifier[close] () identifier[toRemoveOutputFile] . identifier[close] () identifier[toRemoveOutputFileExplanation] . identifier[close] ()
def findFlippedSNPs(goldFrqFile1, sourceAlleles, outPrefix): """Find flipped SNPs and flip them in the data1.""" goldAlleles = {} with open(goldFrqFile1, 'r') as inputFile: headerIndex = None for (i, line) in enumerate(inputFile): row = createRowFromPlinkSpacedOutput(line) if i == 0: # This is the header headerIndex = dict([(row[j], j) for j in xrange(len(row))]) # Checking the columns for columnName in ['SNP', 'A1', 'A2']: if columnName not in headerIndex: msg = '%(fileName)s: no column named %(columnName)s' % locals() raise ProgramError(msg) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['columnName']] # depends on [control=['if'], data=[]] else: snpName = row[headerIndex['SNP']] allele1 = row[headerIndex['A1']] allele2 = row[headerIndex['A2']] alleles = set([allele1, allele2]) if '0' in alleles: alleles.remove('0') # depends on [control=['if'], data=['alleles']] goldAlleles[snpName] = alleles # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['inputFile']] # Finding the SNPs to flip toFlipOutputFile = None try: toFlipOutputFile = open(outPrefix + '.snp_to_flip_in_reference', 'w') # depends on [control=['try'], data=[]] except IOError: msg = "%(outPrefix)s.snp_to_flip_in_reference: can't write file" % locals() raise ProgramError(msg) # depends on [control=['except'], data=[]] toRemoveOutputFile = None try: toRemoveOutputFile = open(outPrefix + '.snp_to_remove', 'w') # depends on [control=['try'], data=[]] except IOError: msg = "%(outPrefix)s.snp_to_remove: can't write file" % locals() raise ProgramError(msg) # depends on [control=['except'], data=[]] toRemoveOutputFileExplanation = None try: toRemoveOutputFileExplanation = open(outPrefix + '.snp_to_remove.explanation', 'w') (print >> toRemoveOutputFileExplanation, '\t'.join(['Name', 'Reason', 'Alleles 1', 'Alleles 2'])) # depends on [control=['try'], data=[]] except IOError: msg = "%(outPrefix)s.snp_to_remove: can't write file" % locals() raise ProgramError(msg) # depends on [control=['except'], data=[]] for snpName in goldAlleles.iterkeys(): alleles1 = goldAlleles[snpName] alleles2 = sourceAlleles[snpName] if len(alleles1) == 2 and len(alleles2) == 2: # Both are heterozygous if {'A', 'T'} == alleles1 and {'A', 'T'} == alleles2 or ({'C', 'G'} == alleles1 and {'C', 'G'} == alleles2): # We can't flip those..., so we remove them (print >> toRemoveOutputFile, snpName) (print >> toRemoveOutputFileExplanation, '\t'.join([snpName, 'Undetermined', ''.join(alleles1), ''.join(alleles2)])) # depends on [control=['if'], data=[]] elif alleles1 != alleles2: # Let's try the flip one if flipGenotype(alleles1) == alleles2: # We need to flip it (print >> toFlipOutputFile, snpName) # depends on [control=['if'], data=[]] else: # Those SNP are discordant... (print >> toRemoveOutputFile, snpName) (print >> toRemoveOutputFileExplanation, '\t'.join([snpName, 'Invalid', ''.join(alleles1), ''.join(alleles2)])) # depends on [control=['if'], data=['alleles1', 'alleles2']] # depends on [control=['if'], data=[]] else: # We want to remove this SNP, because there is at least one # homozygous individual (print >> toRemoveOutputFile, snpName) tmp_allele1 = ''.join(alleles1) if len(alleles1) == 1: tmp_allele1 += tmp_allele1 # depends on [control=['if'], data=[]] tmp_allele2 = ''.join(alleles1) if len(alleles1) == 1: tmp_allele2 += tmp_allele2 # depends on [control=['if'], data=[]] (print >> toRemoveOutputFileExplanation, '\t'.join([snpName, 'Homozygous', tmp_allele1, tmp_allele2])) # depends on [control=['for'], data=['snpName']] # Closing output files toFlipOutputFile.close() toRemoveOutputFile.close() toRemoveOutputFileExplanation.close()
def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None, network_type='walk', timeout=180, memory=None, max_query_area_size=50*1000*50*1000, custom_osm_filter=None): """ Download OSM ways and nodes within a bounding box from the Overpass API. Parameters ---------- lat_min : float southern latitude of bounding box lng_min : float eastern longitude of bounding box lat_max : float northern latitude of bounding box lng_max : float western longitude of bounding box network_type : string Specify the network type where value of 'walk' includes roadways where pedestrians are allowed and pedestrian pathways and 'drive' includes driveable roadways. timeout : int the timeout interval for requests and to pass to Overpass API memory : int server memory allocation size for the query, in bytes. If none, server will use its default allocation size max_query_area_size : float max area for any part of the geometry, in the units the geometry is in: any polygon bigger will get divided up for multiple queries to Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in area, if units are meters)) custom_osm_filter : string, optional specify custom arguments for the way["highway"] query to OSM. Must follow Overpass API schema. For example to request highway ways that are service roads use: '["highway"="service"]' Returns ------- response_json : dict """ # create a filter to exclude certain kinds of ways based on the requested # network_type if custom_osm_filter is None: request_filter = osm_filter(network_type) else: request_filter = custom_osm_filter response_jsons_list = [] response_jsons = [] # server memory allocation in bytes formatted for Overpass API query if memory is None: maxsize = '' else: maxsize = '[maxsize:{}]'.format(memory) # define the Overpass API query # way["highway"] denotes ways with highway keys and {filters} returns # ways with the requested key/value. the '>' makes it recurse so we get # ways and way nodes. maxsize is in bytes. # turn bbox into a polygon and project to local UTM polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min), (lng_min, lat_max), (lng_max, lat_max)]) geometry_proj, crs_proj = project_geometry(polygon, crs={'init': 'epsg:4326'}) # subdivide the bbox area poly if it exceeds the max area size # (in meters), then project back to WGS84 geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry( geometry_proj, max_query_area_size=max_query_area_size) geometry, crs = project_geometry(geometry_proj_consolidated_subdivided, crs=crs_proj, to_latlong=True) log('Requesting network data within bounding box from Overpass API ' 'in {:,} request(s)'.format(len(geometry))) start_time = time.time() # loop through each polygon in the geometry for poly in geometry: # represent bbox as lng_max, lat_min, lng_min, lat_max and round # lat-longs to 8 decimal places to create # consistent URL strings lng_max, lat_min, lng_min, lat_max = poly.bounds query_template = '[out:json][timeout:{timeout}]{maxsize};' \ '(way["highway"]' \ '{filters}({lat_min:.8f},{lng_max:.8f},' \ '{lat_max:.8f},{lng_min:.8f});>;);out;' query_str = query_template.format(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min, lng_max=lng_max, filters=request_filter, timeout=timeout, maxsize=maxsize) response_json = overpass_request(data={'data': query_str}, timeout=timeout) response_jsons_list.append(response_json) log('Downloaded OSM network data within bounding box from Overpass ' 'API in {:,} request(s) and' ' {:,.2f} seconds'.format(len(geometry), time.time()-start_time)) # stitch together individual json results for json in response_jsons_list: try: response_jsons.extend(json['elements']) except KeyError: pass # remove duplicate records resulting from the json stitching start_time = time.time() record_count = len(response_jsons) if record_count == 0: raise Exception('Query resulted in no data. Check your query ' 'parameters: {}'.format(query_str)) else: response_jsons_df = pd.DataFrame.from_records(response_jsons, index='id') nodes = response_jsons_df[response_jsons_df['type'] == 'node'] nodes = nodes[~nodes.index.duplicated(keep='first')] ways = response_jsons_df[response_jsons_df['type'] == 'way'] ways = ways[~ways.index.duplicated(keep='first')] response_jsons_df = pd.concat([nodes, ways], axis=0) response_jsons_df.reset_index(inplace=True) response_jsons = response_jsons_df.to_dict(orient='records') if record_count - len(response_jsons) > 0: log('{:,} duplicate records removed. Took {:,.2f} seconds'.format( record_count - len(response_jsons), time.time() - start_time)) return {'elements': response_jsons}
def function[osm_net_download, parameter[lat_min, lng_min, lat_max, lng_max, network_type, timeout, memory, max_query_area_size, custom_osm_filter]]: constant[ Download OSM ways and nodes within a bounding box from the Overpass API. Parameters ---------- lat_min : float southern latitude of bounding box lng_min : float eastern longitude of bounding box lat_max : float northern latitude of bounding box lng_max : float western longitude of bounding box network_type : string Specify the network type where value of 'walk' includes roadways where pedestrians are allowed and pedestrian pathways and 'drive' includes driveable roadways. timeout : int the timeout interval for requests and to pass to Overpass API memory : int server memory allocation size for the query, in bytes. If none, server will use its default allocation size max_query_area_size : float max area for any part of the geometry, in the units the geometry is in: any polygon bigger will get divided up for multiple queries to Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in area, if units are meters)) custom_osm_filter : string, optional specify custom arguments for the way["highway"] query to OSM. Must follow Overpass API schema. For example to request highway ways that are service roads use: '["highway"="service"]' Returns ------- response_json : dict ] if compare[name[custom_osm_filter] is constant[None]] begin[:] variable[request_filter] assign[=] call[name[osm_filter], parameter[name[network_type]]] variable[response_jsons_list] assign[=] list[[]] variable[response_jsons] assign[=] list[[]] if compare[name[memory] is constant[None]] begin[:] variable[maxsize] assign[=] constant[] variable[polygon] assign[=] call[name[Polygon], parameter[list[[<ast.Tuple object at 0x7da1b0f108e0>, <ast.Tuple object at 0x7da1b0f11690>, <ast.Tuple object at 0x7da1b0f122c0>, <ast.Tuple object at 0x7da1b0f131c0>]]]] <ast.Tuple object at 0x7da1b0f10d90> assign[=] call[name[project_geometry], parameter[name[polygon]]] variable[geometry_proj_consolidated_subdivided] assign[=] call[name[consolidate_subdivide_geometry], parameter[name[geometry_proj]]] <ast.Tuple object at 0x7da1b0f10dc0> assign[=] call[name[project_geometry], parameter[name[geometry_proj_consolidated_subdivided]]] call[name[log], parameter[call[constant[Requesting network data within bounding box from Overpass API in {:,} request(s)].format, parameter[call[name[len], parameter[name[geometry]]]]]]] variable[start_time] assign[=] call[name[time].time, parameter[]] for taget[name[poly]] in starred[name[geometry]] begin[:] <ast.Tuple object at 0x7da1b0f109a0> assign[=] name[poly].bounds variable[query_template] assign[=] constant[[out:json][timeout:{timeout}]{maxsize};(way["highway"]{filters}({lat_min:.8f},{lng_max:.8f},{lat_max:.8f},{lng_min:.8f});>;);out;] variable[query_str] assign[=] call[name[query_template].format, parameter[]] variable[response_json] assign[=] call[name[overpass_request], parameter[]] call[name[response_jsons_list].append, parameter[name[response_json]]] call[name[log], parameter[call[constant[Downloaded OSM network data within bounding box from Overpass API in {:,} request(s) and {:,.2f} seconds].format, parameter[call[name[len], parameter[name[geometry]]], binary_operation[call[name[time].time, parameter[]] - name[start_time]]]]]] for taget[name[json]] in starred[name[response_jsons_list]] begin[:] <ast.Try object at 0x7da1b0f11090> variable[start_time] assign[=] call[name[time].time, parameter[]] variable[record_count] assign[=] call[name[len], parameter[name[response_jsons]]] if compare[name[record_count] equal[==] constant[0]] begin[:] <ast.Raise object at 0x7da1b1139540> return[dictionary[[<ast.Constant object at 0x7da1b11a3cd0>], [<ast.Name object at 0x7da1b11a12a0>]]]
keyword[def] identifier[osm_net_download] ( identifier[lat_min] = keyword[None] , identifier[lng_min] = keyword[None] , identifier[lat_max] = keyword[None] , identifier[lng_max] = keyword[None] , identifier[network_type] = literal[string] , identifier[timeout] = literal[int] , identifier[memory] = keyword[None] , identifier[max_query_area_size] = literal[int] * literal[int] * literal[int] * literal[int] , identifier[custom_osm_filter] = keyword[None] ): literal[string] keyword[if] identifier[custom_osm_filter] keyword[is] keyword[None] : identifier[request_filter] = identifier[osm_filter] ( identifier[network_type] ) keyword[else] : identifier[request_filter] = identifier[custom_osm_filter] identifier[response_jsons_list] =[] identifier[response_jsons] =[] keyword[if] identifier[memory] keyword[is] keyword[None] : identifier[maxsize] = literal[string] keyword[else] : identifier[maxsize] = literal[string] . identifier[format] ( identifier[memory] ) identifier[polygon] = identifier[Polygon] ([( identifier[lng_max] , identifier[lat_min] ),( identifier[lng_min] , identifier[lat_min] ), ( identifier[lng_min] , identifier[lat_max] ),( identifier[lng_max] , identifier[lat_max] )]) identifier[geometry_proj] , identifier[crs_proj] = identifier[project_geometry] ( identifier[polygon] , identifier[crs] ={ literal[string] : literal[string] }) identifier[geometry_proj_consolidated_subdivided] = identifier[consolidate_subdivide_geometry] ( identifier[geometry_proj] , identifier[max_query_area_size] = identifier[max_query_area_size] ) identifier[geometry] , identifier[crs] = identifier[project_geometry] ( identifier[geometry_proj_consolidated_subdivided] , identifier[crs] = identifier[crs_proj] , identifier[to_latlong] = keyword[True] ) identifier[log] ( literal[string] literal[string] . identifier[format] ( identifier[len] ( identifier[geometry] ))) identifier[start_time] = identifier[time] . identifier[time] () keyword[for] identifier[poly] keyword[in] identifier[geometry] : identifier[lng_max] , identifier[lat_min] , identifier[lng_min] , identifier[lat_max] = identifier[poly] . identifier[bounds] identifier[query_template] = literal[string] literal[string] literal[string] literal[string] identifier[query_str] = identifier[query_template] . identifier[format] ( identifier[lat_max] = identifier[lat_max] , identifier[lat_min] = identifier[lat_min] , identifier[lng_min] = identifier[lng_min] , identifier[lng_max] = identifier[lng_max] , identifier[filters] = identifier[request_filter] , identifier[timeout] = identifier[timeout] , identifier[maxsize] = identifier[maxsize] ) identifier[response_json] = identifier[overpass_request] ( identifier[data] ={ literal[string] : identifier[query_str] }, identifier[timeout] = identifier[timeout] ) identifier[response_jsons_list] . identifier[append] ( identifier[response_json] ) identifier[log] ( literal[string] literal[string] literal[string] . identifier[format] ( identifier[len] ( identifier[geometry] ), identifier[time] . identifier[time] ()- identifier[start_time] )) keyword[for] identifier[json] keyword[in] identifier[response_jsons_list] : keyword[try] : identifier[response_jsons] . identifier[extend] ( identifier[json] [ literal[string] ]) keyword[except] identifier[KeyError] : keyword[pass] identifier[start_time] = identifier[time] . identifier[time] () identifier[record_count] = identifier[len] ( identifier[response_jsons] ) keyword[if] identifier[record_count] == literal[int] : keyword[raise] identifier[Exception] ( literal[string] literal[string] . identifier[format] ( identifier[query_str] )) keyword[else] : identifier[response_jsons_df] = identifier[pd] . identifier[DataFrame] . identifier[from_records] ( identifier[response_jsons] , identifier[index] = literal[string] ) identifier[nodes] = identifier[response_jsons_df] [ identifier[response_jsons_df] [ literal[string] ]== literal[string] ] identifier[nodes] = identifier[nodes] [~ identifier[nodes] . identifier[index] . identifier[duplicated] ( identifier[keep] = literal[string] )] identifier[ways] = identifier[response_jsons_df] [ identifier[response_jsons_df] [ literal[string] ]== literal[string] ] identifier[ways] = identifier[ways] [~ identifier[ways] . identifier[index] . identifier[duplicated] ( identifier[keep] = literal[string] )] identifier[response_jsons_df] = identifier[pd] . identifier[concat] ([ identifier[nodes] , identifier[ways] ], identifier[axis] = literal[int] ) identifier[response_jsons_df] . identifier[reset_index] ( identifier[inplace] = keyword[True] ) identifier[response_jsons] = identifier[response_jsons_df] . identifier[to_dict] ( identifier[orient] = literal[string] ) keyword[if] identifier[record_count] - identifier[len] ( identifier[response_jsons] )> literal[int] : identifier[log] ( literal[string] . identifier[format] ( identifier[record_count] - identifier[len] ( identifier[response_jsons] ), identifier[time] . identifier[time] ()- identifier[start_time] )) keyword[return] { literal[string] : identifier[response_jsons] }
def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None, network_type='walk', timeout=180, memory=None, max_query_area_size=50 * 1000 * 50 * 1000, custom_osm_filter=None): """ Download OSM ways and nodes within a bounding box from the Overpass API. Parameters ---------- lat_min : float southern latitude of bounding box lng_min : float eastern longitude of bounding box lat_max : float northern latitude of bounding box lng_max : float western longitude of bounding box network_type : string Specify the network type where value of 'walk' includes roadways where pedestrians are allowed and pedestrian pathways and 'drive' includes driveable roadways. timeout : int the timeout interval for requests and to pass to Overpass API memory : int server memory allocation size for the query, in bytes. If none, server will use its default allocation size max_query_area_size : float max area for any part of the geometry, in the units the geometry is in: any polygon bigger will get divided up for multiple queries to Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in area, if units are meters)) custom_osm_filter : string, optional specify custom arguments for the way["highway"] query to OSM. Must follow Overpass API schema. For example to request highway ways that are service roads use: '["highway"="service"]' Returns ------- response_json : dict """ # create a filter to exclude certain kinds of ways based on the requested # network_type if custom_osm_filter is None: request_filter = osm_filter(network_type) # depends on [control=['if'], data=[]] else: request_filter = custom_osm_filter response_jsons_list = [] response_jsons = [] # server memory allocation in bytes formatted for Overpass API query if memory is None: maxsize = '' # depends on [control=['if'], data=[]] else: maxsize = '[maxsize:{}]'.format(memory) # define the Overpass API query # way["highway"] denotes ways with highway keys and {filters} returns # ways with the requested key/value. the '>' makes it recurse so we get # ways and way nodes. maxsize is in bytes. # turn bbox into a polygon and project to local UTM polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min), (lng_min, lat_max), (lng_max, lat_max)]) (geometry_proj, crs_proj) = project_geometry(polygon, crs={'init': 'epsg:4326'}) # subdivide the bbox area poly if it exceeds the max area size # (in meters), then project back to WGS84 geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(geometry_proj, max_query_area_size=max_query_area_size) (geometry, crs) = project_geometry(geometry_proj_consolidated_subdivided, crs=crs_proj, to_latlong=True) log('Requesting network data within bounding box from Overpass API in {:,} request(s)'.format(len(geometry))) start_time = time.time() # loop through each polygon in the geometry for poly in geometry: # represent bbox as lng_max, lat_min, lng_min, lat_max and round # lat-longs to 8 decimal places to create # consistent URL strings (lng_max, lat_min, lng_min, lat_max) = poly.bounds query_template = '[out:json][timeout:{timeout}]{maxsize};(way["highway"]{filters}({lat_min:.8f},{lng_max:.8f},{lat_max:.8f},{lng_min:.8f});>;);out;' query_str = query_template.format(lat_max=lat_max, lat_min=lat_min, lng_min=lng_min, lng_max=lng_max, filters=request_filter, timeout=timeout, maxsize=maxsize) response_json = overpass_request(data={'data': query_str}, timeout=timeout) response_jsons_list.append(response_json) # depends on [control=['for'], data=['poly']] log('Downloaded OSM network data within bounding box from Overpass API in {:,} request(s) and {:,.2f} seconds'.format(len(geometry), time.time() - start_time)) # stitch together individual json results for json in response_jsons_list: try: response_jsons.extend(json['elements']) # depends on [control=['try'], data=[]] except KeyError: pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['json']] # remove duplicate records resulting from the json stitching start_time = time.time() record_count = len(response_jsons) if record_count == 0: raise Exception('Query resulted in no data. Check your query parameters: {}'.format(query_str)) # depends on [control=['if'], data=[]] else: response_jsons_df = pd.DataFrame.from_records(response_jsons, index='id') nodes = response_jsons_df[response_jsons_df['type'] == 'node'] nodes = nodes[~nodes.index.duplicated(keep='first')] ways = response_jsons_df[response_jsons_df['type'] == 'way'] ways = ways[~ways.index.duplicated(keep='first')] response_jsons_df = pd.concat([nodes, ways], axis=0) response_jsons_df.reset_index(inplace=True) response_jsons = response_jsons_df.to_dict(orient='records') if record_count - len(response_jsons) > 0: log('{:,} duplicate records removed. Took {:,.2f} seconds'.format(record_count - len(response_jsons), time.time() - start_time)) # depends on [control=['if'], data=[]] return {'elements': response_jsons}
def _get_path(): """Guarantee that /usr/local/bin and /usr/bin are in PATH""" if _path: return _path[0] environ_paths = set(os.environ['PATH'].split(':')) environ_paths.add('/usr/local/bin') environ_paths.add('/usr/bin') _path.append(':'.join(environ_paths)) logger.debug('PATH = %s', _path[-1]) return _path[0]
def function[_get_path, parameter[]]: constant[Guarantee that /usr/local/bin and /usr/bin are in PATH] if name[_path] begin[:] return[call[name[_path]][constant[0]]] variable[environ_paths] assign[=] call[name[set], parameter[call[call[name[os].environ][constant[PATH]].split, parameter[constant[:]]]]] call[name[environ_paths].add, parameter[constant[/usr/local/bin]]] call[name[environ_paths].add, parameter[constant[/usr/bin]]] call[name[_path].append, parameter[call[constant[:].join, parameter[name[environ_paths]]]]] call[name[logger].debug, parameter[constant[PATH = %s], call[name[_path]][<ast.UnaryOp object at 0x7da20c6c6200>]]] return[call[name[_path]][constant[0]]]
keyword[def] identifier[_get_path] (): literal[string] keyword[if] identifier[_path] : keyword[return] identifier[_path] [ literal[int] ] identifier[environ_paths] = identifier[set] ( identifier[os] . identifier[environ] [ literal[string] ]. identifier[split] ( literal[string] )) identifier[environ_paths] . identifier[add] ( literal[string] ) identifier[environ_paths] . identifier[add] ( literal[string] ) identifier[_path] . identifier[append] ( literal[string] . identifier[join] ( identifier[environ_paths] )) identifier[logger] . identifier[debug] ( literal[string] , identifier[_path] [- literal[int] ]) keyword[return] identifier[_path] [ literal[int] ]
def _get_path(): """Guarantee that /usr/local/bin and /usr/bin are in PATH""" if _path: return _path[0] # depends on [control=['if'], data=[]] environ_paths = set(os.environ['PATH'].split(':')) environ_paths.add('/usr/local/bin') environ_paths.add('/usr/bin') _path.append(':'.join(environ_paths)) logger.debug('PATH = %s', _path[-1]) return _path[0]
def _replace_constant_methods(self): """Replaces conventional distribution methods by its constant counterparts.""" self.cumulative_distribution = self._constant_cumulative_distribution self.percent_point = self._constant_percent_point self.probability_density = self._constant_probability_density self.sample = self._constant_sample
def function[_replace_constant_methods, parameter[self]]: constant[Replaces conventional distribution methods by its constant counterparts.] name[self].cumulative_distribution assign[=] name[self]._constant_cumulative_distribution name[self].percent_point assign[=] name[self]._constant_percent_point name[self].probability_density assign[=] name[self]._constant_probability_density name[self].sample assign[=] name[self]._constant_sample
keyword[def] identifier[_replace_constant_methods] ( identifier[self] ): literal[string] identifier[self] . identifier[cumulative_distribution] = identifier[self] . identifier[_constant_cumulative_distribution] identifier[self] . identifier[percent_point] = identifier[self] . identifier[_constant_percent_point] identifier[self] . identifier[probability_density] = identifier[self] . identifier[_constant_probability_density] identifier[self] . identifier[sample] = identifier[self] . identifier[_constant_sample]
def _replace_constant_methods(self): """Replaces conventional distribution methods by its constant counterparts.""" self.cumulative_distribution = self._constant_cumulative_distribution self.percent_point = self._constant_percent_point self.probability_density = self._constant_probability_density self.sample = self._constant_sample
def load_locations(self, location_file=None): """Load locations into this resolver from the given *location_file*, which should contain one JSON object per line representing a location. If *location_file* is not specified, an internal location database is used.""" if location_file is None: contents = pkgutil.get_data(__package__, 'data/locations.json') contents_string = contents.decode("ascii") locations = contents_string.split('\n') else: from .cli import open_file with open_file(location_file, 'rb') as input: locations = input.readlines() for location_string in locations: if location_string.strip(): location = Location(known=True, **json.loads(location_string)) self.location_id_to_location[location.id] = location self.add_location(location)
def function[load_locations, parameter[self, location_file]]: constant[Load locations into this resolver from the given *location_file*, which should contain one JSON object per line representing a location. If *location_file* is not specified, an internal location database is used.] if compare[name[location_file] is constant[None]] begin[:] variable[contents] assign[=] call[name[pkgutil].get_data, parameter[name[__package__], constant[data/locations.json]]] variable[contents_string] assign[=] call[name[contents].decode, parameter[constant[ascii]]] variable[locations] assign[=] call[name[contents_string].split, parameter[constant[ ]]] for taget[name[location_string]] in starred[name[locations]] begin[:] if call[name[location_string].strip, parameter[]] begin[:] variable[location] assign[=] call[name[Location], parameter[]] call[name[self].location_id_to_location][name[location].id] assign[=] name[location] call[name[self].add_location, parameter[name[location]]]
keyword[def] identifier[load_locations] ( identifier[self] , identifier[location_file] = keyword[None] ): literal[string] keyword[if] identifier[location_file] keyword[is] keyword[None] : identifier[contents] = identifier[pkgutil] . identifier[get_data] ( identifier[__package__] , literal[string] ) identifier[contents_string] = identifier[contents] . identifier[decode] ( literal[string] ) identifier[locations] = identifier[contents_string] . identifier[split] ( literal[string] ) keyword[else] : keyword[from] . identifier[cli] keyword[import] identifier[open_file] keyword[with] identifier[open_file] ( identifier[location_file] , literal[string] ) keyword[as] identifier[input] : identifier[locations] = identifier[input] . identifier[readlines] () keyword[for] identifier[location_string] keyword[in] identifier[locations] : keyword[if] identifier[location_string] . identifier[strip] (): identifier[location] = identifier[Location] ( identifier[known] = keyword[True] ,** identifier[json] . identifier[loads] ( identifier[location_string] )) identifier[self] . identifier[location_id_to_location] [ identifier[location] . identifier[id] ]= identifier[location] identifier[self] . identifier[add_location] ( identifier[location] )
def load_locations(self, location_file=None): """Load locations into this resolver from the given *location_file*, which should contain one JSON object per line representing a location. If *location_file* is not specified, an internal location database is used.""" if location_file is None: contents = pkgutil.get_data(__package__, 'data/locations.json') contents_string = contents.decode('ascii') locations = contents_string.split('\n') # depends on [control=['if'], data=[]] else: from .cli import open_file with open_file(location_file, 'rb') as input: locations = input.readlines() # depends on [control=['with'], data=['input']] for location_string in locations: if location_string.strip(): location = Location(known=True, **json.loads(location_string)) self.location_id_to_location[location.id] = location self.add_location(location) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['location_string']]
def _restore_isolated(sampleset, bqm, isolated): """Return samples-like by adding isolated variables into sampleset in a way that minimizes the energy (relative to the other non-isolated variables). """ samples = sampleset.record.sample variables = sampleset.variables new_samples = np.empty((len(sampleset), len(isolated)), dtype=samples.dtype) # we don't let the isolated variables interact with each other for now because # it will slow this down substantially for col, v in enumerate(isolated): try: neighbours, biases = zip(*((u, bias) for u, bias in bqm.adj[v].items() if u in variables)) # ignore other isolates except ValueError: # happens when only neighbors are other isolated variables new_samples[:, col] = bqm.linear[v] <= 0 continue idxs = [variables.index[u] for u in neighbours] # figure out which value for v would minimize the energy # v(h_v + \sum_u J_uv * u) new_samples[:, col] = samples[:, idxs].dot(biases) < -bqm.linear[v] if bqm.vartype is dimod.SPIN: new_samples = 2*new_samples - 1 return np.concatenate((samples, new_samples), axis=1), list(variables) + isolated
def function[_restore_isolated, parameter[sampleset, bqm, isolated]]: constant[Return samples-like by adding isolated variables into sampleset in a way that minimizes the energy (relative to the other non-isolated variables). ] variable[samples] assign[=] name[sampleset].record.sample variable[variables] assign[=] name[sampleset].variables variable[new_samples] assign[=] call[name[np].empty, parameter[tuple[[<ast.Call object at 0x7da1b0f6a8c0>, <ast.Call object at 0x7da1b0f6a950>]]]] for taget[tuple[[<ast.Name object at 0x7da1b0f6aad0>, <ast.Name object at 0x7da1b0f6ab00>]]] in starred[call[name[enumerate], parameter[name[isolated]]]] begin[:] <ast.Try object at 0x7da1b0f6abc0> variable[idxs] assign[=] <ast.ListComp object at 0x7da1b0f6af20> call[name[new_samples]][tuple[[<ast.Slice object at 0x7da1b0f69900>, <ast.Name object at 0x7da1b0f698d0>]]] assign[=] compare[call[call[name[samples]][tuple[[<ast.Slice object at 0x7da1b0f6b280>, <ast.Name object at 0x7da1b0f6b250>]]].dot, parameter[name[biases]]] less[<] <ast.UnaryOp object at 0x7da1b0f6b1f0>] if compare[name[bqm].vartype is name[dimod].SPIN] begin[:] variable[new_samples] assign[=] binary_operation[binary_operation[constant[2] * name[new_samples]] - constant[1]] return[tuple[[<ast.Call object at 0x7da1b0f69330>, <ast.BinOp object at 0x7da1b0f69570>]]]
keyword[def] identifier[_restore_isolated] ( identifier[sampleset] , identifier[bqm] , identifier[isolated] ): literal[string] identifier[samples] = identifier[sampleset] . identifier[record] . identifier[sample] identifier[variables] = identifier[sampleset] . identifier[variables] identifier[new_samples] = identifier[np] . identifier[empty] (( identifier[len] ( identifier[sampleset] ), identifier[len] ( identifier[isolated] )), identifier[dtype] = identifier[samples] . identifier[dtype] ) keyword[for] identifier[col] , identifier[v] keyword[in] identifier[enumerate] ( identifier[isolated] ): keyword[try] : identifier[neighbours] , identifier[biases] = identifier[zip] (*(( identifier[u] , identifier[bias] ) keyword[for] identifier[u] , identifier[bias] keyword[in] identifier[bqm] . identifier[adj] [ identifier[v] ]. identifier[items] () keyword[if] identifier[u] keyword[in] identifier[variables] )) keyword[except] identifier[ValueError] : identifier[new_samples] [:, identifier[col] ]= identifier[bqm] . identifier[linear] [ identifier[v] ]<= literal[int] keyword[continue] identifier[idxs] =[ identifier[variables] . identifier[index] [ identifier[u] ] keyword[for] identifier[u] keyword[in] identifier[neighbours] ] identifier[new_samples] [:, identifier[col] ]= identifier[samples] [:, identifier[idxs] ]. identifier[dot] ( identifier[biases] )<- identifier[bqm] . identifier[linear] [ identifier[v] ] keyword[if] identifier[bqm] . identifier[vartype] keyword[is] identifier[dimod] . identifier[SPIN] : identifier[new_samples] = literal[int] * identifier[new_samples] - literal[int] keyword[return] identifier[np] . identifier[concatenate] (( identifier[samples] , identifier[new_samples] ), identifier[axis] = literal[int] ), identifier[list] ( identifier[variables] )+ identifier[isolated]
def _restore_isolated(sampleset, bqm, isolated): """Return samples-like by adding isolated variables into sampleset in a way that minimizes the energy (relative to the other non-isolated variables). """ samples = sampleset.record.sample variables = sampleset.variables new_samples = np.empty((len(sampleset), len(isolated)), dtype=samples.dtype) # we don't let the isolated variables interact with each other for now because # it will slow this down substantially for (col, v) in enumerate(isolated): try: (neighbours, biases) = zip(*((u, bias) for (u, bias) in bqm.adj[v].items() if u in variables)) # ignore other isolates # depends on [control=['try'], data=[]] except ValueError: # happens when only neighbors are other isolated variables new_samples[:, col] = bqm.linear[v] <= 0 continue # depends on [control=['except'], data=[]] idxs = [variables.index[u] for u in neighbours] # figure out which value for v would minimize the energy # v(h_v + \sum_u J_uv * u) new_samples[:, col] = samples[:, idxs].dot(biases) < -bqm.linear[v] # depends on [control=['for'], data=[]] if bqm.vartype is dimod.SPIN: new_samples = 2 * new_samples - 1 # depends on [control=['if'], data=[]] return (np.concatenate((samples, new_samples), axis=1), list(variables) + isolated)
def _generate_lastnames_variations(lastnames): """Generate variations for lastnames. Note: This method follows the assumption that the first last name is the main one. E.g. For 'Caro Estevez', this method generates: ['Caro', 'Caro Estevez']. In the case the lastnames are dashed, it splits them in two. """ if not lastnames: return [] split_lastnames = [split_lastname for lastname in lastnames for split_lastname in lastname.split('-')] lastnames_variations = split_lastnames if len(split_lastnames) > 1: # Generate lastnames concatenation if there are more than one lastname after split. lastnames_variations.append(u' '.join([lastname for lastname in split_lastnames])) return lastnames_variations
def function[_generate_lastnames_variations, parameter[lastnames]]: constant[Generate variations for lastnames. Note: This method follows the assumption that the first last name is the main one. E.g. For 'Caro Estevez', this method generates: ['Caro', 'Caro Estevez']. In the case the lastnames are dashed, it splits them in two. ] if <ast.UnaryOp object at 0x7da18dc07b50> begin[:] return[list[[]]] variable[split_lastnames] assign[=] <ast.ListComp object at 0x7da18dc04d00> variable[lastnames_variations] assign[=] name[split_lastnames] if compare[call[name[len], parameter[name[split_lastnames]]] greater[>] constant[1]] begin[:] call[name[lastnames_variations].append, parameter[call[constant[ ].join, parameter[<ast.ListComp object at 0x7da18dc04730>]]]] return[name[lastnames_variations]]
keyword[def] identifier[_generate_lastnames_variations] ( identifier[lastnames] ): literal[string] keyword[if] keyword[not] identifier[lastnames] : keyword[return] [] identifier[split_lastnames] =[ identifier[split_lastname] keyword[for] identifier[lastname] keyword[in] identifier[lastnames] keyword[for] identifier[split_lastname] keyword[in] identifier[lastname] . identifier[split] ( literal[string] )] identifier[lastnames_variations] = identifier[split_lastnames] keyword[if] identifier[len] ( identifier[split_lastnames] )> literal[int] : identifier[lastnames_variations] . identifier[append] ( literal[string] . identifier[join] ([ identifier[lastname] keyword[for] identifier[lastname] keyword[in] identifier[split_lastnames] ])) keyword[return] identifier[lastnames_variations]
def _generate_lastnames_variations(lastnames): """Generate variations for lastnames. Note: This method follows the assumption that the first last name is the main one. E.g. For 'Caro Estevez', this method generates: ['Caro', 'Caro Estevez']. In the case the lastnames are dashed, it splits them in two. """ if not lastnames: return [] # depends on [control=['if'], data=[]] split_lastnames = [split_lastname for lastname in lastnames for split_lastname in lastname.split('-')] lastnames_variations = split_lastnames if len(split_lastnames) > 1: # Generate lastnames concatenation if there are more than one lastname after split. lastnames_variations.append(u' '.join([lastname for lastname in split_lastnames])) # depends on [control=['if'], data=[]] return lastnames_variations
def setup_observations(self): """ main entry point for setting up observations """ obs_methods = [self.setup_water_budget_obs,self.setup_hyd, self.setup_smp,self.setup_hob,self.setup_hds, self.setup_sfr_obs] obs_types = ["mflist water budget obs","hyd file", "external obs-sim smp files","hob","hds","sfr"] self.obs_dfs = {} for obs_method, obs_type in zip(obs_methods,obs_types): self.log("processing obs type {0}".format(obs_type)) obs_method() self.log("processing obs type {0}".format(obs_type))
def function[setup_observations, parameter[self]]: constant[ main entry point for setting up observations ] variable[obs_methods] assign[=] list[[<ast.Attribute object at 0x7da1b1d51630>, <ast.Attribute object at 0x7da1b1d51690>, <ast.Attribute object at 0x7da1b1d516f0>, <ast.Attribute object at 0x7da1b1d51750>, <ast.Attribute object at 0x7da1b1d517b0>, <ast.Attribute object at 0x7da1b1d51810>]] variable[obs_types] assign[=] list[[<ast.Constant object at 0x7da1b1d51930>, <ast.Constant object at 0x7da1b1d51960>, <ast.Constant object at 0x7da1b1d51990>, <ast.Constant object at 0x7da1b1d519c0>, <ast.Constant object at 0x7da1b1d519f0>, <ast.Constant object at 0x7da1b1d51a20>]] name[self].obs_dfs assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da1b1d51cf0>, <ast.Name object at 0x7da1b1d51d20>]]] in starred[call[name[zip], parameter[name[obs_methods], name[obs_types]]]] begin[:] call[name[self].log, parameter[call[constant[processing obs type {0}].format, parameter[name[obs_type]]]]] call[name[obs_method], parameter[]] call[name[self].log, parameter[call[constant[processing obs type {0}].format, parameter[name[obs_type]]]]]
keyword[def] identifier[setup_observations] ( identifier[self] ): literal[string] identifier[obs_methods] =[ identifier[self] . identifier[setup_water_budget_obs] , identifier[self] . identifier[setup_hyd] , identifier[self] . identifier[setup_smp] , identifier[self] . identifier[setup_hob] , identifier[self] . identifier[setup_hds] , identifier[self] . identifier[setup_sfr_obs] ] identifier[obs_types] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] identifier[self] . identifier[obs_dfs] ={} keyword[for] identifier[obs_method] , identifier[obs_type] keyword[in] identifier[zip] ( identifier[obs_methods] , identifier[obs_types] ): identifier[self] . identifier[log] ( literal[string] . identifier[format] ( identifier[obs_type] )) identifier[obs_method] () identifier[self] . identifier[log] ( literal[string] . identifier[format] ( identifier[obs_type] ))
def setup_observations(self): """ main entry point for setting up observations """ obs_methods = [self.setup_water_budget_obs, self.setup_hyd, self.setup_smp, self.setup_hob, self.setup_hds, self.setup_sfr_obs] obs_types = ['mflist water budget obs', 'hyd file', 'external obs-sim smp files', 'hob', 'hds', 'sfr'] self.obs_dfs = {} for (obs_method, obs_type) in zip(obs_methods, obs_types): self.log('processing obs type {0}'.format(obs_type)) obs_method() self.log('processing obs type {0}'.format(obs_type)) # depends on [control=['for'], data=[]]
def _clean_global_uninteresting_paths(self): """Marks paths that do not have any route targets of interest for withdrawal. Since global tables can have paths with route targets that are not interesting any more, we have to clean these paths so that appropriate withdraw are sent out to NC and other peers. Interesting route targets change as VRF are modified or some filter is that specify what route targets are allowed are updated. This clean up should only be done when a route target is no longer considered interesting and some paths with that route target was installing in any of the global table. """ uninteresting_dest_count = 0 interested_rts = self._rt_mgr.global_interested_rts LOG.debug('Cleaning uninteresting paths. Global interested RTs %s', interested_rts) for route_family in [RF_IPv4_VPN, RF_IPv6_VPN, RF_RTC_UC]: # TODO(PH): We currently do not install RT_NLRI paths based on # extended path attributes (RT) if route_family == RF_RTC_UC: continue table = self.get_global_table_by_route_family(route_family) uninteresting_dest_count += \ table.clean_uninteresting_paths(interested_rts) LOG.debug('Found %s number of destinations had uninteresting paths.', uninteresting_dest_count)
def function[_clean_global_uninteresting_paths, parameter[self]]: constant[Marks paths that do not have any route targets of interest for withdrawal. Since global tables can have paths with route targets that are not interesting any more, we have to clean these paths so that appropriate withdraw are sent out to NC and other peers. Interesting route targets change as VRF are modified or some filter is that specify what route targets are allowed are updated. This clean up should only be done when a route target is no longer considered interesting and some paths with that route target was installing in any of the global table. ] variable[uninteresting_dest_count] assign[=] constant[0] variable[interested_rts] assign[=] name[self]._rt_mgr.global_interested_rts call[name[LOG].debug, parameter[constant[Cleaning uninteresting paths. Global interested RTs %s], name[interested_rts]]] for taget[name[route_family]] in starred[list[[<ast.Name object at 0x7da1b1b0f670>, <ast.Name object at 0x7da1b1b0c580>, <ast.Name object at 0x7da1b1b0e710>]]] begin[:] if compare[name[route_family] equal[==] name[RF_RTC_UC]] begin[:] continue variable[table] assign[=] call[name[self].get_global_table_by_route_family, parameter[name[route_family]]] <ast.AugAssign object at 0x7da1b1b0f490> call[name[LOG].debug, parameter[constant[Found %s number of destinations had uninteresting paths.], name[uninteresting_dest_count]]]
keyword[def] identifier[_clean_global_uninteresting_paths] ( identifier[self] ): literal[string] identifier[uninteresting_dest_count] = literal[int] identifier[interested_rts] = identifier[self] . identifier[_rt_mgr] . identifier[global_interested_rts] identifier[LOG] . identifier[debug] ( literal[string] , identifier[interested_rts] ) keyword[for] identifier[route_family] keyword[in] [ identifier[RF_IPv4_VPN] , identifier[RF_IPv6_VPN] , identifier[RF_RTC_UC] ]: keyword[if] identifier[route_family] == identifier[RF_RTC_UC] : keyword[continue] identifier[table] = identifier[self] . identifier[get_global_table_by_route_family] ( identifier[route_family] ) identifier[uninteresting_dest_count] += identifier[table] . identifier[clean_uninteresting_paths] ( identifier[interested_rts] ) identifier[LOG] . identifier[debug] ( literal[string] , identifier[uninteresting_dest_count] )
def _clean_global_uninteresting_paths(self): """Marks paths that do not have any route targets of interest for withdrawal. Since global tables can have paths with route targets that are not interesting any more, we have to clean these paths so that appropriate withdraw are sent out to NC and other peers. Interesting route targets change as VRF are modified or some filter is that specify what route targets are allowed are updated. This clean up should only be done when a route target is no longer considered interesting and some paths with that route target was installing in any of the global table. """ uninteresting_dest_count = 0 interested_rts = self._rt_mgr.global_interested_rts LOG.debug('Cleaning uninteresting paths. Global interested RTs %s', interested_rts) for route_family in [RF_IPv4_VPN, RF_IPv6_VPN, RF_RTC_UC]: # TODO(PH): We currently do not install RT_NLRI paths based on # extended path attributes (RT) if route_family == RF_RTC_UC: continue # depends on [control=['if'], data=[]] table = self.get_global_table_by_route_family(route_family) uninteresting_dest_count += table.clean_uninteresting_paths(interested_rts) # depends on [control=['for'], data=['route_family']] LOG.debug('Found %s number of destinations had uninteresting paths.', uninteresting_dest_count)
def build_calendar_etc(pfeed): """ Given a ProtoFeed, return a DataFrame representing ``calendar.txt`` and a dictionary of the form <service window ID> -> <service ID>, respectively. """ windows = pfeed.service_windows.copy() # Create a service ID for each distinct days_active field and map the # service windows to those service IDs def get_sid(bitlist): return 'srv' + ''.join([str(b) for b in bitlist]) weekdays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'] bitlists = set() # Create a dictionary <service window ID> -> <service ID> d = dict() for index, window in windows.iterrows(): bitlist = window[weekdays].tolist() d[window['service_window_id']] = get_sid(bitlist) bitlists.add(tuple(bitlist)) service_by_window = d # Create calendar start_date = pfeed.meta['start_date'].iat[0] end_date = pfeed.meta['end_date'].iat[0] F = [] for bitlist in bitlists: F.append([get_sid(bitlist)] + list(bitlist) + [start_date, end_date]) calendar = pd.DataFrame(F, columns=( ['service_id'] + weekdays + ['start_date', 'end_date'])) return calendar, service_by_window
def function[build_calendar_etc, parameter[pfeed]]: constant[ Given a ProtoFeed, return a DataFrame representing ``calendar.txt`` and a dictionary of the form <service window ID> -> <service ID>, respectively. ] variable[windows] assign[=] call[name[pfeed].service_windows.copy, parameter[]] def function[get_sid, parameter[bitlist]]: return[binary_operation[constant[srv] + call[constant[].join, parameter[<ast.ListComp object at 0x7da204621c90>]]]] variable[weekdays] assign[=] list[[<ast.Constant object at 0x7da204623490>, <ast.Constant object at 0x7da204620be0>, <ast.Constant object at 0x7da204620220>, <ast.Constant object at 0x7da204623f40>, <ast.Constant object at 0x7da2046229b0>, <ast.Constant object at 0x7da204620940>, <ast.Constant object at 0x7da204623fd0>]] variable[bitlists] assign[=] call[name[set], parameter[]] variable[d] assign[=] call[name[dict], parameter[]] for taget[tuple[[<ast.Name object at 0x7da204622620>, <ast.Name object at 0x7da204622830>]]] in starred[call[name[windows].iterrows, parameter[]]] begin[:] variable[bitlist] assign[=] call[call[name[window]][name[weekdays]].tolist, parameter[]] call[name[d]][call[name[window]][constant[service_window_id]]] assign[=] call[name[get_sid], parameter[name[bitlist]]] call[name[bitlists].add, parameter[call[name[tuple], parameter[name[bitlist]]]]] variable[service_by_window] assign[=] name[d] variable[start_date] assign[=] call[call[name[pfeed].meta][constant[start_date]].iat][constant[0]] variable[end_date] assign[=] call[call[name[pfeed].meta][constant[end_date]].iat][constant[0]] variable[F] assign[=] list[[]] for taget[name[bitlist]] in starred[name[bitlists]] begin[:] call[name[F].append, parameter[binary_operation[binary_operation[list[[<ast.Call object at 0x7da18f09f520>]] + call[name[list], parameter[name[bitlist]]]] + list[[<ast.Name object at 0x7da18f09e530>, <ast.Name object at 0x7da18f09d5a0>]]]]] variable[calendar] assign[=] call[name[pd].DataFrame, parameter[name[F]]] return[tuple[[<ast.Name object at 0x7da18f09cf40>, <ast.Name object at 0x7da18f09f580>]]]
keyword[def] identifier[build_calendar_etc] ( identifier[pfeed] ): literal[string] identifier[windows] = identifier[pfeed] . identifier[service_windows] . identifier[copy] () keyword[def] identifier[get_sid] ( identifier[bitlist] ): keyword[return] literal[string] + literal[string] . identifier[join] ([ identifier[str] ( identifier[b] ) keyword[for] identifier[b] keyword[in] identifier[bitlist] ]) identifier[weekdays] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] identifier[bitlists] = identifier[set] () identifier[d] = identifier[dict] () keyword[for] identifier[index] , identifier[window] keyword[in] identifier[windows] . identifier[iterrows] (): identifier[bitlist] = identifier[window] [ identifier[weekdays] ]. identifier[tolist] () identifier[d] [ identifier[window] [ literal[string] ]]= identifier[get_sid] ( identifier[bitlist] ) identifier[bitlists] . identifier[add] ( identifier[tuple] ( identifier[bitlist] )) identifier[service_by_window] = identifier[d] identifier[start_date] = identifier[pfeed] . identifier[meta] [ literal[string] ]. identifier[iat] [ literal[int] ] identifier[end_date] = identifier[pfeed] . identifier[meta] [ literal[string] ]. identifier[iat] [ literal[int] ] identifier[F] =[] keyword[for] identifier[bitlist] keyword[in] identifier[bitlists] : identifier[F] . identifier[append] ([ identifier[get_sid] ( identifier[bitlist] )]+ identifier[list] ( identifier[bitlist] )+ [ identifier[start_date] , identifier[end_date] ]) identifier[calendar] = identifier[pd] . identifier[DataFrame] ( identifier[F] , identifier[columns] =( [ literal[string] ]+ identifier[weekdays] +[ literal[string] , literal[string] ])) keyword[return] identifier[calendar] , identifier[service_by_window]
def build_calendar_etc(pfeed): """ Given a ProtoFeed, return a DataFrame representing ``calendar.txt`` and a dictionary of the form <service window ID> -> <service ID>, respectively. """ windows = pfeed.service_windows.copy() # Create a service ID for each distinct days_active field and map the # service windows to those service IDs def get_sid(bitlist): return 'srv' + ''.join([str(b) for b in bitlist]) weekdays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'] bitlists = set() # Create a dictionary <service window ID> -> <service ID> d = dict() for (index, window) in windows.iterrows(): bitlist = window[weekdays].tolist() d[window['service_window_id']] = get_sid(bitlist) bitlists.add(tuple(bitlist)) # depends on [control=['for'], data=[]] service_by_window = d # Create calendar start_date = pfeed.meta['start_date'].iat[0] end_date = pfeed.meta['end_date'].iat[0] F = [] for bitlist in bitlists: F.append([get_sid(bitlist)] + list(bitlist) + [start_date, end_date]) # depends on [control=['for'], data=['bitlist']] calendar = pd.DataFrame(F, columns=['service_id'] + weekdays + ['start_date', 'end_date']) return (calendar, service_by_window)
def resources(self, absolute_url=None): ''' Provide a :class:`~bokeh.resources.Resources` that specifies where Bokeh application sessions should load BokehJS resources from. Args: absolute_url (bool): An absolute URL prefix to use for locating resources. If None, relative URLs are used (default: None) ''' if absolute_url: return Resources(mode="server", root_url=absolute_url + self._prefix, path_versioner=StaticHandler.append_version) return Resources(mode="server", root_url=self._prefix, path_versioner=StaticHandler.append_version)
def function[resources, parameter[self, absolute_url]]: constant[ Provide a :class:`~bokeh.resources.Resources` that specifies where Bokeh application sessions should load BokehJS resources from. Args: absolute_url (bool): An absolute URL prefix to use for locating resources. If None, relative URLs are used (default: None) ] if name[absolute_url] begin[:] return[call[name[Resources], parameter[]]] return[call[name[Resources], parameter[]]]
keyword[def] identifier[resources] ( identifier[self] , identifier[absolute_url] = keyword[None] ): literal[string] keyword[if] identifier[absolute_url] : keyword[return] identifier[Resources] ( identifier[mode] = literal[string] , identifier[root_url] = identifier[absolute_url] + identifier[self] . identifier[_prefix] , identifier[path_versioner] = identifier[StaticHandler] . identifier[append_version] ) keyword[return] identifier[Resources] ( identifier[mode] = literal[string] , identifier[root_url] = identifier[self] . identifier[_prefix] , identifier[path_versioner] = identifier[StaticHandler] . identifier[append_version] )
def resources(self, absolute_url=None): """ Provide a :class:`~bokeh.resources.Resources` that specifies where Bokeh application sessions should load BokehJS resources from. Args: absolute_url (bool): An absolute URL prefix to use for locating resources. If None, relative URLs are used (default: None) """ if absolute_url: return Resources(mode='server', root_url=absolute_url + self._prefix, path_versioner=StaticHandler.append_version) # depends on [control=['if'], data=[]] return Resources(mode='server', root_url=self._prefix, path_versioner=StaticHandler.append_version)
def get_serializer_class(self): """gets the class type of the serializer :return: `rest_framework.Serializer` """ klass = None lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field if lookup_url_kwarg in self.kwargs: # Looks like this is a detail... klass = self.get_object().__class__ elif "doctype" in self.request.REQUEST: base = self.model.get_base_class() doctypes = indexable_registry.families[base] try: klass = doctypes[self.request.REQUEST["doctype"]] except KeyError: raise Http404 if hasattr(klass, "get_serializer_class"): return klass.get_serializer_class() # TODO: fix deprecation warning here -- `get_serializer_class` is going away soon! return super(ContentViewSet, self).get_serializer_class()
def function[get_serializer_class, parameter[self]]: constant[gets the class type of the serializer :return: `rest_framework.Serializer` ] variable[klass] assign[=] constant[None] variable[lookup_url_kwarg] assign[=] <ast.BoolOp object at 0x7da1b0a873a0> if compare[name[lookup_url_kwarg] in name[self].kwargs] begin[:] variable[klass] assign[=] call[name[self].get_object, parameter[]].__class__ if call[name[hasattr], parameter[name[klass], constant[get_serializer_class]]] begin[:] return[call[name[klass].get_serializer_class, parameter[]]] return[call[call[name[super], parameter[name[ContentViewSet], name[self]]].get_serializer_class, parameter[]]]
keyword[def] identifier[get_serializer_class] ( identifier[self] ): literal[string] identifier[klass] = keyword[None] identifier[lookup_url_kwarg] = identifier[self] . identifier[lookup_url_kwarg] keyword[or] identifier[self] . identifier[lookup_field] keyword[if] identifier[lookup_url_kwarg] keyword[in] identifier[self] . identifier[kwargs] : identifier[klass] = identifier[self] . identifier[get_object] (). identifier[__class__] keyword[elif] literal[string] keyword[in] identifier[self] . identifier[request] . identifier[REQUEST] : identifier[base] = identifier[self] . identifier[model] . identifier[get_base_class] () identifier[doctypes] = identifier[indexable_registry] . identifier[families] [ identifier[base] ] keyword[try] : identifier[klass] = identifier[doctypes] [ identifier[self] . identifier[request] . identifier[REQUEST] [ literal[string] ]] keyword[except] identifier[KeyError] : keyword[raise] identifier[Http404] keyword[if] identifier[hasattr] ( identifier[klass] , literal[string] ): keyword[return] identifier[klass] . identifier[get_serializer_class] () keyword[return] identifier[super] ( identifier[ContentViewSet] , identifier[self] ). identifier[get_serializer_class] ()
def get_serializer_class(self): """gets the class type of the serializer :return: `rest_framework.Serializer` """ klass = None lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field if lookup_url_kwarg in self.kwargs: # Looks like this is a detail... klass = self.get_object().__class__ # depends on [control=['if'], data=[]] elif 'doctype' in self.request.REQUEST: base = self.model.get_base_class() doctypes = indexable_registry.families[base] try: klass = doctypes[self.request.REQUEST['doctype']] # depends on [control=['try'], data=[]] except KeyError: raise Http404 # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] if hasattr(klass, 'get_serializer_class'): return klass.get_serializer_class() # depends on [control=['if'], data=[]] # TODO: fix deprecation warning here -- `get_serializer_class` is going away soon! return super(ContentViewSet, self).get_serializer_class()
def express_route_gateways(self): """Instance depends on the API version: * 2018-08-01: :class:`ExpressRouteGatewaysOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRouteGatewaysOperations>` """ api_version = self._get_api_version('express_route_gateways') if api_version == '2018-08-01': from .v2018_08_01.operations import ExpressRouteGatewaysOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
def function[express_route_gateways, parameter[self]]: constant[Instance depends on the API version: * 2018-08-01: :class:`ExpressRouteGatewaysOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRouteGatewaysOperations>` ] variable[api_version] assign[=] call[name[self]._get_api_version, parameter[constant[express_route_gateways]]] if compare[name[api_version] equal[==] constant[2018-08-01]] begin[:] from relative_module[v2018_08_01.operations] import module[ExpressRouteGatewaysOperations] return[call[name[OperationClass], parameter[name[self]._client, name[self].config, call[name[Serializer], parameter[call[name[self]._models_dict, parameter[name[api_version]]]]], call[name[Deserializer], parameter[call[name[self]._models_dict, parameter[name[api_version]]]]]]]]
keyword[def] identifier[express_route_gateways] ( identifier[self] ): literal[string] identifier[api_version] = identifier[self] . identifier[_get_api_version] ( literal[string] ) keyword[if] identifier[api_version] == literal[string] : keyword[from] . identifier[v2018_08_01] . identifier[operations] keyword[import] identifier[ExpressRouteGatewaysOperations] keyword[as] identifier[OperationClass] keyword[else] : keyword[raise] identifier[NotImplementedError] ( literal[string] . identifier[format] ( identifier[api_version] )) keyword[return] identifier[OperationClass] ( identifier[self] . identifier[_client] , identifier[self] . identifier[config] , identifier[Serializer] ( identifier[self] . identifier[_models_dict] ( identifier[api_version] )), identifier[Deserializer] ( identifier[self] . identifier[_models_dict] ( identifier[api_version] )))
def express_route_gateways(self): """Instance depends on the API version: * 2018-08-01: :class:`ExpressRouteGatewaysOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRouteGatewaysOperations>` """ api_version = self._get_api_version('express_route_gateways') if api_version == '2018-08-01': from .v2018_08_01.operations import ExpressRouteGatewaysOperations as OperationClass # depends on [control=['if'], data=[]] else: raise NotImplementedError('APIVersion {} is not available'.format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
def get_hwclock(): ''' Get current hardware clock setting (UTC or localtime) CLI Example: .. code-block:: bash salt '*' timezone.get_hwclock ''' if salt.utils.path.which('timedatectl'): ret = _timedatectl() for line in (x.strip() for x in ret['stdout'].splitlines()): if 'rtc in local tz' in line.lower(): try: if line.split(':')[-1].strip().lower() == 'yes': return 'localtime' else: return 'UTC' except IndexError: pass msg = ('Failed to parse timedatectl output: {0}\n' 'Please file an issue with SaltStack').format(ret['stdout']) raise CommandExecutionError(msg) else: os_family = __grains__['os_family'] for family in ('RedHat', 'Suse', 'NILinuxRT'): if family in os_family: return _get_adjtime_timezone() if 'Debian' in __grains__['os_family']: # Original way to look up hwclock on Debian-based systems try: with salt.utils.files.fopen('/etc/default/rcS', 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if re.match(r'^\s*#', line): continue if 'UTC=' in line: is_utc = line.rstrip('\n').split('=')[-1].lower() if is_utc == 'yes': return 'UTC' else: return 'localtime' except IOError as exc: pass # Since Wheezy return _get_adjtime_timezone() if 'Gentoo' in __grains__['os_family']: if not os.path.exists('/etc/adjtime'): offset_file = '/etc/conf.d/hwclock' try: with salt.utils.files.fopen(offset_file, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.startswith('clock='): line = line.rstrip('\n') line = line.split('=')[-1].strip('\'"') if line == 'UTC': return line if line == 'local': return 'LOCAL' raise CommandExecutionError( 'Correct offset value not found in {0}' .format(offset_file) ) except IOError as exc: raise CommandExecutionError( 'Problem reading offset file {0}: {1}' .format(offset_file, exc.strerror) ) return _get_adjtime_timezone() if 'Solaris' in __grains__['os_family']: offset_file = '/etc/rtc_config' try: with salt.utils.files.fopen(offset_file, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.startswith('zone_info=GMT'): return 'UTC' return 'localtime' except IOError as exc: if exc.errno == errno.ENOENT: # offset file does not exist return 'UTC' raise CommandExecutionError( 'Problem reading offset file {0}: {1}' .format(offset_file, exc.strerror) ) if 'AIX' in __grains__['os_family']: offset_file = '/etc/environment' try: with salt.utils.files.fopen(offset_file, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.startswith('TZ=UTC'): return 'UTC' return 'localtime' except IOError as exc: if exc.errno == errno.ENOENT: # offset file does not exist return 'UTC' raise CommandExecutionError( 'Problem reading offset file {0}: {1}' .format(offset_file, exc.strerror) )
def function[get_hwclock, parameter[]]: constant[ Get current hardware clock setting (UTC or localtime) CLI Example: .. code-block:: bash salt '*' timezone.get_hwclock ] if call[name[salt].utils.path.which, parameter[constant[timedatectl]]] begin[:] variable[ret] assign[=] call[name[_timedatectl], parameter[]] for taget[name[line]] in starred[<ast.GeneratorExp object at 0x7da1b1fcbc40>] begin[:] if compare[constant[rtc in local tz] in call[name[line].lower, parameter[]]] begin[:] <ast.Try object at 0x7da1b1fcb8e0> variable[msg] assign[=] call[constant[Failed to parse timedatectl output: {0} Please file an issue with SaltStack].format, parameter[call[name[ret]][constant[stdout]]]] <ast.Raise object at 0x7da1b1fcb2b0>
keyword[def] identifier[get_hwclock] (): literal[string] keyword[if] identifier[salt] . identifier[utils] . identifier[path] . identifier[which] ( literal[string] ): identifier[ret] = identifier[_timedatectl] () keyword[for] identifier[line] keyword[in] ( identifier[x] . identifier[strip] () keyword[for] identifier[x] keyword[in] identifier[ret] [ literal[string] ]. identifier[splitlines] ()): keyword[if] literal[string] keyword[in] identifier[line] . identifier[lower] (): keyword[try] : keyword[if] identifier[line] . identifier[split] ( literal[string] )[- literal[int] ]. identifier[strip] (). identifier[lower] ()== literal[string] : keyword[return] literal[string] keyword[else] : keyword[return] literal[string] keyword[except] identifier[IndexError] : keyword[pass] identifier[msg] =( literal[string] literal[string] ). identifier[format] ( identifier[ret] [ literal[string] ]) keyword[raise] identifier[CommandExecutionError] ( identifier[msg] ) keyword[else] : identifier[os_family] = identifier[__grains__] [ literal[string] ] keyword[for] identifier[family] keyword[in] ( literal[string] , literal[string] , literal[string] ): keyword[if] identifier[family] keyword[in] identifier[os_family] : keyword[return] identifier[_get_adjtime_timezone] () keyword[if] literal[string] keyword[in] identifier[__grains__] [ literal[string] ]: keyword[try] : keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( literal[string] , literal[string] ) keyword[as] identifier[fp_] : keyword[for] identifier[line] keyword[in] identifier[fp_] : identifier[line] = identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[line] ) keyword[if] identifier[re] . identifier[match] ( literal[string] , identifier[line] ): keyword[continue] keyword[if] literal[string] keyword[in] identifier[line] : identifier[is_utc] = identifier[line] . identifier[rstrip] ( literal[string] ). identifier[split] ( literal[string] )[- literal[int] ]. identifier[lower] () keyword[if] identifier[is_utc] == literal[string] : keyword[return] literal[string] keyword[else] : keyword[return] literal[string] keyword[except] identifier[IOError] keyword[as] identifier[exc] : keyword[pass] keyword[return] identifier[_get_adjtime_timezone] () keyword[if] literal[string] keyword[in] identifier[__grains__] [ literal[string] ]: keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( literal[string] ): identifier[offset_file] = literal[string] keyword[try] : keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[offset_file] , literal[string] ) keyword[as] identifier[fp_] : keyword[for] identifier[line] keyword[in] identifier[fp_] : identifier[line] = identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[line] ) keyword[if] identifier[line] . identifier[startswith] ( literal[string] ): identifier[line] = identifier[line] . identifier[rstrip] ( literal[string] ) identifier[line] = identifier[line] . identifier[split] ( literal[string] )[- literal[int] ]. identifier[strip] ( literal[string] ) keyword[if] identifier[line] == literal[string] : keyword[return] identifier[line] keyword[if] identifier[line] == literal[string] : keyword[return] literal[string] keyword[raise] identifier[CommandExecutionError] ( literal[string] . identifier[format] ( identifier[offset_file] ) ) keyword[except] identifier[IOError] keyword[as] identifier[exc] : keyword[raise] identifier[CommandExecutionError] ( literal[string] . identifier[format] ( identifier[offset_file] , identifier[exc] . identifier[strerror] ) ) keyword[return] identifier[_get_adjtime_timezone] () keyword[if] literal[string] keyword[in] identifier[__grains__] [ literal[string] ]: identifier[offset_file] = literal[string] keyword[try] : keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[offset_file] , literal[string] ) keyword[as] identifier[fp_] : keyword[for] identifier[line] keyword[in] identifier[fp_] : identifier[line] = identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[line] ) keyword[if] identifier[line] . identifier[startswith] ( literal[string] ): keyword[return] literal[string] keyword[return] literal[string] keyword[except] identifier[IOError] keyword[as] identifier[exc] : keyword[if] identifier[exc] . identifier[errno] == identifier[errno] . identifier[ENOENT] : keyword[return] literal[string] keyword[raise] identifier[CommandExecutionError] ( literal[string] . identifier[format] ( identifier[offset_file] , identifier[exc] . identifier[strerror] ) ) keyword[if] literal[string] keyword[in] identifier[__grains__] [ literal[string] ]: identifier[offset_file] = literal[string] keyword[try] : keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[offset_file] , literal[string] ) keyword[as] identifier[fp_] : keyword[for] identifier[line] keyword[in] identifier[fp_] : identifier[line] = identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[line] ) keyword[if] identifier[line] . identifier[startswith] ( literal[string] ): keyword[return] literal[string] keyword[return] literal[string] keyword[except] identifier[IOError] keyword[as] identifier[exc] : keyword[if] identifier[exc] . identifier[errno] == identifier[errno] . identifier[ENOENT] : keyword[return] literal[string] keyword[raise] identifier[CommandExecutionError] ( literal[string] . identifier[format] ( identifier[offset_file] , identifier[exc] . identifier[strerror] ) )
def get_hwclock(): """ Get current hardware clock setting (UTC or localtime) CLI Example: .. code-block:: bash salt '*' timezone.get_hwclock """ if salt.utils.path.which('timedatectl'): ret = _timedatectl() for line in (x.strip() for x in ret['stdout'].splitlines()): if 'rtc in local tz' in line.lower(): try: if line.split(':')[-1].strip().lower() == 'yes': return 'localtime' # depends on [control=['if'], data=[]] else: return 'UTC' # depends on [control=['try'], data=[]] except IndexError: pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] msg = 'Failed to parse timedatectl output: {0}\nPlease file an issue with SaltStack'.format(ret['stdout']) raise CommandExecutionError(msg) # depends on [control=['if'], data=[]] else: os_family = __grains__['os_family'] for family in ('RedHat', 'Suse', 'NILinuxRT'): if family in os_family: return _get_adjtime_timezone() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['family']] if 'Debian' in __grains__['os_family']: # Original way to look up hwclock on Debian-based systems try: with salt.utils.files.fopen('/etc/default/rcS', 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if re.match('^\\s*#', line): continue # depends on [control=['if'], data=[]] if 'UTC=' in line: is_utc = line.rstrip('\n').split('=')[-1].lower() if is_utc == 'yes': return 'UTC' # depends on [control=['if'], data=[]] else: return 'localtime' # depends on [control=['if'], data=['line']] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['fp_']] # depends on [control=['try'], data=[]] except IOError as exc: pass # depends on [control=['except'], data=[]] # Since Wheezy return _get_adjtime_timezone() # depends on [control=['if'], data=[]] if 'Gentoo' in __grains__['os_family']: if not os.path.exists('/etc/adjtime'): offset_file = '/etc/conf.d/hwclock' try: with salt.utils.files.fopen(offset_file, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.startswith('clock='): line = line.rstrip('\n') line = line.split('=')[-1].strip('\'"') if line == 'UTC': return line # depends on [control=['if'], data=['line']] if line == 'local': return 'LOCAL' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] raise CommandExecutionError('Correct offset value not found in {0}'.format(offset_file)) # depends on [control=['with'], data=['fp_']] # depends on [control=['try'], data=[]] except IOError as exc: raise CommandExecutionError('Problem reading offset file {0}: {1}'.format(offset_file, exc.strerror)) # depends on [control=['except'], data=['exc']] # depends on [control=['if'], data=[]] return _get_adjtime_timezone() # depends on [control=['if'], data=[]] if 'Solaris' in __grains__['os_family']: offset_file = '/etc/rtc_config' try: with salt.utils.files.fopen(offset_file, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.startswith('zone_info=GMT'): return 'UTC' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] return 'localtime' # depends on [control=['with'], data=['fp_']] # depends on [control=['try'], data=[]] except IOError as exc: if exc.errno == errno.ENOENT: # offset file does not exist return 'UTC' # depends on [control=['if'], data=[]] raise CommandExecutionError('Problem reading offset file {0}: {1}'.format(offset_file, exc.strerror)) # depends on [control=['except'], data=['exc']] # depends on [control=['if'], data=[]] if 'AIX' in __grains__['os_family']: offset_file = '/etc/environment' try: with salt.utils.files.fopen(offset_file, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.startswith('TZ=UTC'): return 'UTC' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] return 'localtime' # depends on [control=['with'], data=['fp_']] # depends on [control=['try'], data=[]] except IOError as exc: if exc.errno == errno.ENOENT: # offset file does not exist return 'UTC' # depends on [control=['if'], data=[]] raise CommandExecutionError('Problem reading offset file {0}: {1}'.format(offset_file, exc.strerror)) # depends on [control=['except'], data=['exc']] # depends on [control=['if'], data=[]]
def restore_collection(backup): """ Restore from a collection backup. Args: backup (dict): """ for k, v in six.iteritems(backup): del tf.get_collection_ref(k)[:] tf.get_collection_ref(k).extend(v)
def function[restore_collection, parameter[backup]]: constant[ Restore from a collection backup. Args: backup (dict): ] for taget[tuple[[<ast.Name object at 0x7da18f721d20>, <ast.Name object at 0x7da18f7234c0>]]] in starred[call[name[six].iteritems, parameter[name[backup]]]] begin[:] <ast.Delete object at 0x7da18f7224d0> call[call[name[tf].get_collection_ref, parameter[name[k]]].extend, parameter[name[v]]]
keyword[def] identifier[restore_collection] ( identifier[backup] ): literal[string] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[six] . identifier[iteritems] ( identifier[backup] ): keyword[del] identifier[tf] . identifier[get_collection_ref] ( identifier[k] )[:] identifier[tf] . identifier[get_collection_ref] ( identifier[k] ). identifier[extend] ( identifier[v] )
def restore_collection(backup): """ Restore from a collection backup. Args: backup (dict): """ for (k, v) in six.iteritems(backup): del tf.get_collection_ref(k)[:] tf.get_collection_ref(k).extend(v) # depends on [control=['for'], data=[]]
def handle_entityref(self, entity): ''' Internal for parsing ''' inTag = self._inTag if len(inTag) > 0: inTag[-1].appendText('&%s;' %(entity,)) else: raise MultipleRootNodeException()
def function[handle_entityref, parameter[self, entity]]: constant[ Internal for parsing ] variable[inTag] assign[=] name[self]._inTag if compare[call[name[len], parameter[name[inTag]]] greater[>] constant[0]] begin[:] call[call[name[inTag]][<ast.UnaryOp object at 0x7da1b10c17e0>].appendText, parameter[binary_operation[constant[&%s;] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b10c1570>]]]]]
keyword[def] identifier[handle_entityref] ( identifier[self] , identifier[entity] ): literal[string] identifier[inTag] = identifier[self] . identifier[_inTag] keyword[if] identifier[len] ( identifier[inTag] )> literal[int] : identifier[inTag] [- literal[int] ]. identifier[appendText] ( literal[string] %( identifier[entity] ,)) keyword[else] : keyword[raise] identifier[MultipleRootNodeException] ()
def handle_entityref(self, entity): """ Internal for parsing """ inTag = self._inTag if len(inTag) > 0: inTag[-1].appendText('&%s;' % (entity,)) # depends on [control=['if'], data=[]] else: raise MultipleRootNodeException()
def _setOptionValueAdvAudit(option, value): ''' Helper function to update the Advanced Audit policy on the machine. This function modifies the two ``audit.csv`` files in the following locations: C:\\Windows\\Security\\Audit\\audit.csv C:\\Windows\\System32\\GroupPolicy\\Machine\\Microsoft\\Windows NT\\Audit\\audit.csv Then it applies those settings using ``auditpol`` After that, it updates ``__context__`` with the new setting Args: option (str): The name of the option to set value (str): The value to set. ['None', '0', '1', '2', '3'] Returns: bool: ``True`` if successful, otherwise ``False`` ''' # Set the values in both audit.csv files if not _set_audit_file_data(option=option, value=value): raise CommandExecutionError('Failed to set audit.csv option: {0}' ''.format(option)) # Apply the settings locally if not _set_auditpol_data(option=option, value=value): # Only log this error, it will be in effect the next time the machine # updates its policy log.debug('Failed to apply audit setting: {0}'.format(option)) # Update __context__ if value is None: log.debug('LGPO: Removing Advanced Audit data: {0}'.format(option)) __context__['lgpo.adv_audit_data'].pop(option) else: log.debug('LGPO: Updating Advanced Audit data: {0}: {1}' ''.format(option, value)) __context__['lgpo.adv_audit_data'][option] = value return True
def function[_setOptionValueAdvAudit, parameter[option, value]]: constant[ Helper function to update the Advanced Audit policy on the machine. This function modifies the two ``audit.csv`` files in the following locations: C:\Windows\Security\Audit\audit.csv C:\Windows\System32\GroupPolicy\Machine\Microsoft\Windows NT\Audit\audit.csv Then it applies those settings using ``auditpol`` After that, it updates ``__context__`` with the new setting Args: option (str): The name of the option to set value (str): The value to set. ['None', '0', '1', '2', '3'] Returns: bool: ``True`` if successful, otherwise ``False`` ] if <ast.UnaryOp object at 0x7da207f01240> begin[:] <ast.Raise object at 0x7da207f012a0> if <ast.UnaryOp object at 0x7da207f01ba0> begin[:] call[name[log].debug, parameter[call[constant[Failed to apply audit setting: {0}].format, parameter[name[option]]]]] if compare[name[value] is constant[None]] begin[:] call[name[log].debug, parameter[call[constant[LGPO: Removing Advanced Audit data: {0}].format, parameter[name[option]]]]] call[call[name[__context__]][constant[lgpo.adv_audit_data]].pop, parameter[name[option]]] return[constant[True]]
keyword[def] identifier[_setOptionValueAdvAudit] ( identifier[option] , identifier[value] ): literal[string] keyword[if] keyword[not] identifier[_set_audit_file_data] ( identifier[option] = identifier[option] , identifier[value] = identifier[value] ): keyword[raise] identifier[CommandExecutionError] ( literal[string] literal[string] . identifier[format] ( identifier[option] )) keyword[if] keyword[not] identifier[_set_auditpol_data] ( identifier[option] = identifier[option] , identifier[value] = identifier[value] ): identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[option] )) keyword[if] identifier[value] keyword[is] keyword[None] : identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[option] )) identifier[__context__] [ literal[string] ]. identifier[pop] ( identifier[option] ) keyword[else] : identifier[log] . identifier[debug] ( literal[string] literal[string] . identifier[format] ( identifier[option] , identifier[value] )) identifier[__context__] [ literal[string] ][ identifier[option] ]= identifier[value] keyword[return] keyword[True]
def _setOptionValueAdvAudit(option, value): """ Helper function to update the Advanced Audit policy on the machine. This function modifies the two ``audit.csv`` files in the following locations: C:\\Windows\\Security\\Audit\\audit.csv C:\\Windows\\System32\\GroupPolicy\\Machine\\Microsoft\\Windows NT\\Audit\\audit.csv Then it applies those settings using ``auditpol`` After that, it updates ``__context__`` with the new setting Args: option (str): The name of the option to set value (str): The value to set. ['None', '0', '1', '2', '3'] Returns: bool: ``True`` if successful, otherwise ``False`` """ # Set the values in both audit.csv files if not _set_audit_file_data(option=option, value=value): raise CommandExecutionError('Failed to set audit.csv option: {0}'.format(option)) # depends on [control=['if'], data=[]] # Apply the settings locally if not _set_auditpol_data(option=option, value=value): # Only log this error, it will be in effect the next time the machine # updates its policy log.debug('Failed to apply audit setting: {0}'.format(option)) # depends on [control=['if'], data=[]] # Update __context__ if value is None: log.debug('LGPO: Removing Advanced Audit data: {0}'.format(option)) __context__['lgpo.adv_audit_data'].pop(option) # depends on [control=['if'], data=[]] else: log.debug('LGPO: Updating Advanced Audit data: {0}: {1}'.format(option, value)) __context__['lgpo.adv_audit_data'][option] = value return True
def main(): """Main entry-point for oz's cli""" # Hack to make user code available for import sys.path.append(".") # Run the specified action oz.initialize() retr = optfn.run(list(oz._actions.values())) if retr == optfn.ERROR_RETURN_CODE: sys.exit(-1) elif retr == None: sys.exit(0) elif isinstance(retr, int): sys.exit(retr) else: raise Exception("Unexpected return value from action: %s" % retr)
def function[main, parameter[]]: constant[Main entry-point for oz's cli] call[name[sys].path.append, parameter[constant[.]]] call[name[oz].initialize, parameter[]] variable[retr] assign[=] call[name[optfn].run, parameter[call[name[list], parameter[call[name[oz]._actions.values, parameter[]]]]]] if compare[name[retr] equal[==] name[optfn].ERROR_RETURN_CODE] begin[:] call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da2047ead10>]]
keyword[def] identifier[main] (): literal[string] identifier[sys] . identifier[path] . identifier[append] ( literal[string] ) identifier[oz] . identifier[initialize] () identifier[retr] = identifier[optfn] . identifier[run] ( identifier[list] ( identifier[oz] . identifier[_actions] . identifier[values] ())) keyword[if] identifier[retr] == identifier[optfn] . identifier[ERROR_RETURN_CODE] : identifier[sys] . identifier[exit] (- literal[int] ) keyword[elif] identifier[retr] == keyword[None] : identifier[sys] . identifier[exit] ( literal[int] ) keyword[elif] identifier[isinstance] ( identifier[retr] , identifier[int] ): identifier[sys] . identifier[exit] ( identifier[retr] ) keyword[else] : keyword[raise] identifier[Exception] ( literal[string] % identifier[retr] )
def main(): """Main entry-point for oz's cli""" # Hack to make user code available for import sys.path.append('.') # Run the specified action oz.initialize() retr = optfn.run(list(oz._actions.values())) if retr == optfn.ERROR_RETURN_CODE: sys.exit(-1) # depends on [control=['if'], data=[]] elif retr == None: sys.exit(0) # depends on [control=['if'], data=[]] elif isinstance(retr, int): sys.exit(retr) # depends on [control=['if'], data=[]] else: raise Exception('Unexpected return value from action: %s' % retr)
def get_all_sources(self): """Returns all sources for all batches of this Executor.""" result = [] for batch in self.batches: result.extend(batch.sources) return result
def function[get_all_sources, parameter[self]]: constant[Returns all sources for all batches of this Executor.] variable[result] assign[=] list[[]] for taget[name[batch]] in starred[name[self].batches] begin[:] call[name[result].extend, parameter[name[batch].sources]] return[name[result]]
keyword[def] identifier[get_all_sources] ( identifier[self] ): literal[string] identifier[result] =[] keyword[for] identifier[batch] keyword[in] identifier[self] . identifier[batches] : identifier[result] . identifier[extend] ( identifier[batch] . identifier[sources] ) keyword[return] identifier[result]
def get_all_sources(self): """Returns all sources for all batches of this Executor.""" result = [] for batch in self.batches: result.extend(batch.sources) # depends on [control=['for'], data=['batch']] return result
def load(cli, yaml_filename): """Creates waybill shims from a given yaml file definiations""" """Expected Definition: - name: NAME docker_id: IMAGE - name: NAME docker_id: IMAGE """ with open(yaml_filename, 'rb') as filehandle: for waybill in yaml.load(filehandle.read()): cli.create(waybill.name, waybill.docker_id)
def function[load, parameter[cli, yaml_filename]]: constant[Creates waybill shims from a given yaml file definiations] constant[Expected Definition: - name: NAME docker_id: IMAGE - name: NAME docker_id: IMAGE ] with call[name[open], parameter[name[yaml_filename], constant[rb]]] begin[:] for taget[name[waybill]] in starred[call[name[yaml].load, parameter[call[name[filehandle].read, parameter[]]]]] begin[:] call[name[cli].create, parameter[name[waybill].name, name[waybill].docker_id]]
keyword[def] identifier[load] ( identifier[cli] , identifier[yaml_filename] ): literal[string] literal[string] keyword[with] identifier[open] ( identifier[yaml_filename] , literal[string] ) keyword[as] identifier[filehandle] : keyword[for] identifier[waybill] keyword[in] identifier[yaml] . identifier[load] ( identifier[filehandle] . identifier[read] ()): identifier[cli] . identifier[create] ( identifier[waybill] . identifier[name] , identifier[waybill] . identifier[docker_id] )
def load(cli, yaml_filename): """Creates waybill shims from a given yaml file definiations""" 'Expected Definition:\n - name: NAME\n docker_id: IMAGE\n - name: NAME\n docker_id: IMAGE\n ' with open(yaml_filename, 'rb') as filehandle: for waybill in yaml.load(filehandle.read()): cli.create(waybill.name, waybill.docker_id) # depends on [control=['for'], data=['waybill']] # depends on [control=['with'], data=['filehandle']]
def incremental_neighbor_graph(X, precomputed=False, k=None, epsilon=None, weighting='none'): '''See neighbor_graph.''' assert ((k is not None) or (epsilon is not None) ), "Must provide `k` or `epsilon`" assert (_issequence(k) ^ _issequence(epsilon) ), "Exactly one of `k` or `epsilon` must be a sequence." assert weighting in ('binary','none'), "Invalid weighting param: " + weighting is_weighted = weighting == 'none' if precomputed: D = X else: D = pairwise_distances(X, metric='euclidean') # pre-sort for efficiency order = np.argsort(D)[:,1:] if k is None: k = D.shape[0] # generate the sequence of graphs # TODO: convert the core of these loops to Cython for speed W = np.zeros_like(D) I = np.arange(D.shape[0]) if _issequence(k): # varied k, fixed epsilon if epsilon is not None: D[D > epsilon] = 0 old_k = 0 for new_k in k: idx = order[:, old_k:new_k] dist = D[I, idx.T] W[I, idx.T] = dist if is_weighted else 1 yield Graph.from_adj_matrix(W) old_k = new_k else: # varied epsilon, fixed k idx = order[:,:k] dist = D[I, idx.T].T old_i = np.zeros(D.shape[0], dtype=int) for eps in epsilon: for i, row in enumerate(dist): oi = old_i[i] ni = oi + np.searchsorted(row[oi:], eps) rr = row[oi:ni] W[i, idx[i,oi:ni]] = rr if is_weighted else 1 old_i[i] = ni yield Graph.from_adj_matrix(W)
def function[incremental_neighbor_graph, parameter[X, precomputed, k, epsilon, weighting]]: constant[See neighbor_graph.] assert[<ast.BoolOp object at 0x7da1b26adf90>] assert[binary_operation[call[name[_issequence], parameter[name[k]]] <ast.BitXor object at 0x7da2590d6b00> call[name[_issequence], parameter[name[epsilon]]]]] assert[compare[name[weighting] in tuple[[<ast.Constant object at 0x7da1b26addb0>, <ast.Constant object at 0x7da1b26af400>]]]] variable[is_weighted] assign[=] compare[name[weighting] equal[==] constant[none]] if name[precomputed] begin[:] variable[D] assign[=] name[X] variable[order] assign[=] call[call[name[np].argsort, parameter[name[D]]]][tuple[[<ast.Slice object at 0x7da20c6e4ca0>, <ast.Slice object at 0x7da20c6e6f50>]]] if compare[name[k] is constant[None]] begin[:] variable[k] assign[=] call[name[D].shape][constant[0]] variable[W] assign[=] call[name[np].zeros_like, parameter[name[D]]] variable[I] assign[=] call[name[np].arange, parameter[call[name[D].shape][constant[0]]]] if call[name[_issequence], parameter[name[k]]] begin[:] if compare[name[epsilon] is_not constant[None]] begin[:] call[name[D]][compare[name[D] greater[>] name[epsilon]]] assign[=] constant[0] variable[old_k] assign[=] constant[0] for taget[name[new_k]] in starred[name[k]] begin[:] variable[idx] assign[=] call[name[order]][tuple[[<ast.Slice object at 0x7da2041d8520>, <ast.Slice object at 0x7da2041d8550>]]] variable[dist] assign[=] call[name[D]][tuple[[<ast.Name object at 0x7da20c76c9a0>, <ast.Attribute object at 0x7da20c76e620>]]] call[name[W]][tuple[[<ast.Name object at 0x7da20c76d1b0>, <ast.Attribute object at 0x7da20c76dd50>]]] assign[=] <ast.IfExp object at 0x7da20c76e2f0> <ast.Yield object at 0x7da20c76f6a0> variable[old_k] assign[=] name[new_k]
keyword[def] identifier[incremental_neighbor_graph] ( identifier[X] , identifier[precomputed] = keyword[False] , identifier[k] = keyword[None] , identifier[epsilon] = keyword[None] , identifier[weighting] = literal[string] ): literal[string] keyword[assert] (( identifier[k] keyword[is] keyword[not] keyword[None] ) keyword[or] ( identifier[epsilon] keyword[is] keyword[not] keyword[None] ) ), literal[string] keyword[assert] ( identifier[_issequence] ( identifier[k] )^ identifier[_issequence] ( identifier[epsilon] ) ), literal[string] keyword[assert] identifier[weighting] keyword[in] ( literal[string] , literal[string] ), literal[string] + identifier[weighting] identifier[is_weighted] = identifier[weighting] == literal[string] keyword[if] identifier[precomputed] : identifier[D] = identifier[X] keyword[else] : identifier[D] = identifier[pairwise_distances] ( identifier[X] , identifier[metric] = literal[string] ) identifier[order] = identifier[np] . identifier[argsort] ( identifier[D] )[:, literal[int] :] keyword[if] identifier[k] keyword[is] keyword[None] : identifier[k] = identifier[D] . identifier[shape] [ literal[int] ] identifier[W] = identifier[np] . identifier[zeros_like] ( identifier[D] ) identifier[I] = identifier[np] . identifier[arange] ( identifier[D] . identifier[shape] [ literal[int] ]) keyword[if] identifier[_issequence] ( identifier[k] ): keyword[if] identifier[epsilon] keyword[is] keyword[not] keyword[None] : identifier[D] [ identifier[D] > identifier[epsilon] ]= literal[int] identifier[old_k] = literal[int] keyword[for] identifier[new_k] keyword[in] identifier[k] : identifier[idx] = identifier[order] [:, identifier[old_k] : identifier[new_k] ] identifier[dist] = identifier[D] [ identifier[I] , identifier[idx] . identifier[T] ] identifier[W] [ identifier[I] , identifier[idx] . identifier[T] ]= identifier[dist] keyword[if] identifier[is_weighted] keyword[else] literal[int] keyword[yield] identifier[Graph] . identifier[from_adj_matrix] ( identifier[W] ) identifier[old_k] = identifier[new_k] keyword[else] : identifier[idx] = identifier[order] [:,: identifier[k] ] identifier[dist] = identifier[D] [ identifier[I] , identifier[idx] . identifier[T] ]. identifier[T] identifier[old_i] = identifier[np] . identifier[zeros] ( identifier[D] . identifier[shape] [ literal[int] ], identifier[dtype] = identifier[int] ) keyword[for] identifier[eps] keyword[in] identifier[epsilon] : keyword[for] identifier[i] , identifier[row] keyword[in] identifier[enumerate] ( identifier[dist] ): identifier[oi] = identifier[old_i] [ identifier[i] ] identifier[ni] = identifier[oi] + identifier[np] . identifier[searchsorted] ( identifier[row] [ identifier[oi] :], identifier[eps] ) identifier[rr] = identifier[row] [ identifier[oi] : identifier[ni] ] identifier[W] [ identifier[i] , identifier[idx] [ identifier[i] , identifier[oi] : identifier[ni] ]]= identifier[rr] keyword[if] identifier[is_weighted] keyword[else] literal[int] identifier[old_i] [ identifier[i] ]= identifier[ni] keyword[yield] identifier[Graph] . identifier[from_adj_matrix] ( identifier[W] )
def incremental_neighbor_graph(X, precomputed=False, k=None, epsilon=None, weighting='none'): """See neighbor_graph.""" assert k is not None or epsilon is not None, 'Must provide `k` or `epsilon`' assert _issequence(k) ^ _issequence(epsilon), 'Exactly one of `k` or `epsilon` must be a sequence.' assert weighting in ('binary', 'none'), 'Invalid weighting param: ' + weighting is_weighted = weighting == 'none' if precomputed: D = X # depends on [control=['if'], data=[]] else: D = pairwise_distances(X, metric='euclidean') # pre-sort for efficiency order = np.argsort(D)[:, 1:] if k is None: k = D.shape[0] # depends on [control=['if'], data=['k']] # generate the sequence of graphs # TODO: convert the core of these loops to Cython for speed W = np.zeros_like(D) I = np.arange(D.shape[0]) if _issequence(k): # varied k, fixed epsilon if epsilon is not None: D[D > epsilon] = 0 # depends on [control=['if'], data=['epsilon']] old_k = 0 for new_k in k: idx = order[:, old_k:new_k] dist = D[I, idx.T] W[I, idx.T] = dist if is_weighted else 1 yield Graph.from_adj_matrix(W) old_k = new_k # depends on [control=['for'], data=['new_k']] # depends on [control=['if'], data=[]] else: # varied epsilon, fixed k idx = order[:, :k] dist = D[I, idx.T].T old_i = np.zeros(D.shape[0], dtype=int) for eps in epsilon: for (i, row) in enumerate(dist): oi = old_i[i] ni = oi + np.searchsorted(row[oi:], eps) rr = row[oi:ni] W[i, idx[i, oi:ni]] = rr if is_weighted else 1 old_i[i] = ni # depends on [control=['for'], data=[]] yield Graph.from_adj_matrix(W) # depends on [control=['for'], data=['eps']]
def handle_reply(self, msg): """Handle a reply message related to the current request. Reply messages not related to the current request go up to the base class method. Parameters ---------- msg : Message object The reply message to dispatch. """ # this may also result in reply_cb being None if no # reply_cb was passed to the request method if msg.mid is not None: _request, reply_cb, _inform_cb, user_data, timeout_handle = \ self._pop_async_request(msg.mid, None) else: request, _reply_cb, _inform_cb, _user_data, timeout_handle = \ self._peek_async_request(None, msg.name) if request is not None and request.mid is None: # we didn't send a mid so this is the request we want _request, reply_cb, _inform_cb, user_data, timeout_handle = \ self._pop_async_request(None, msg.name) else: reply_cb, user_data = None, None if timeout_handle is not None: self.ioloop.remove_timeout(timeout_handle) if reply_cb is None: reply_cb = super(AsyncClient, self).handle_reply # override user_data since handle_reply takes no user_data user_data = None try: if user_data is None: reply_cb(msg) else: reply_cb(msg, *user_data) except Exception: e_type, e_value, trace = sys.exc_info() reason = "\n".join(traceback.format_exception( e_type, e_value, trace, self._tb_limit)) self._logger.error("Callback reply %s FAIL: %s" % (msg.name, reason))
def function[handle_reply, parameter[self, msg]]: constant[Handle a reply message related to the current request. Reply messages not related to the current request go up to the base class method. Parameters ---------- msg : Message object The reply message to dispatch. ] if compare[name[msg].mid is_not constant[None]] begin[:] <ast.Tuple object at 0x7da1b04312d0> assign[=] call[name[self]._pop_async_request, parameter[name[msg].mid, constant[None]]] if compare[name[timeout_handle] is_not constant[None]] begin[:] call[name[self].ioloop.remove_timeout, parameter[name[timeout_handle]]] if compare[name[reply_cb] is constant[None]] begin[:] variable[reply_cb] assign[=] call[name[super], parameter[name[AsyncClient], name[self]]].handle_reply variable[user_data] assign[=] constant[None] <ast.Try object at 0x7da1b05d9150>
keyword[def] identifier[handle_reply] ( identifier[self] , identifier[msg] ): literal[string] keyword[if] identifier[msg] . identifier[mid] keyword[is] keyword[not] keyword[None] : identifier[_request] , identifier[reply_cb] , identifier[_inform_cb] , identifier[user_data] , identifier[timeout_handle] = identifier[self] . identifier[_pop_async_request] ( identifier[msg] . identifier[mid] , keyword[None] ) keyword[else] : identifier[request] , identifier[_reply_cb] , identifier[_inform_cb] , identifier[_user_data] , identifier[timeout_handle] = identifier[self] . identifier[_peek_async_request] ( keyword[None] , identifier[msg] . identifier[name] ) keyword[if] identifier[request] keyword[is] keyword[not] keyword[None] keyword[and] identifier[request] . identifier[mid] keyword[is] keyword[None] : identifier[_request] , identifier[reply_cb] , identifier[_inform_cb] , identifier[user_data] , identifier[timeout_handle] = identifier[self] . identifier[_pop_async_request] ( keyword[None] , identifier[msg] . identifier[name] ) keyword[else] : identifier[reply_cb] , identifier[user_data] = keyword[None] , keyword[None] keyword[if] identifier[timeout_handle] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[ioloop] . identifier[remove_timeout] ( identifier[timeout_handle] ) keyword[if] identifier[reply_cb] keyword[is] keyword[None] : identifier[reply_cb] = identifier[super] ( identifier[AsyncClient] , identifier[self] ). identifier[handle_reply] identifier[user_data] = keyword[None] keyword[try] : keyword[if] identifier[user_data] keyword[is] keyword[None] : identifier[reply_cb] ( identifier[msg] ) keyword[else] : identifier[reply_cb] ( identifier[msg] ,* identifier[user_data] ) keyword[except] identifier[Exception] : identifier[e_type] , identifier[e_value] , identifier[trace] = identifier[sys] . identifier[exc_info] () identifier[reason] = literal[string] . identifier[join] ( identifier[traceback] . identifier[format_exception] ( identifier[e_type] , identifier[e_value] , identifier[trace] , identifier[self] . identifier[_tb_limit] )) identifier[self] . identifier[_logger] . identifier[error] ( literal[string] % ( identifier[msg] . identifier[name] , identifier[reason] ))
def handle_reply(self, msg): """Handle a reply message related to the current request. Reply messages not related to the current request go up to the base class method. Parameters ---------- msg : Message object The reply message to dispatch. """ # this may also result in reply_cb being None if no # reply_cb was passed to the request method if msg.mid is not None: (_request, reply_cb, _inform_cb, user_data, timeout_handle) = self._pop_async_request(msg.mid, None) # depends on [control=['if'], data=[]] else: (request, _reply_cb, _inform_cb, _user_data, timeout_handle) = self._peek_async_request(None, msg.name) if request is not None and request.mid is None: # we didn't send a mid so this is the request we want (_request, reply_cb, _inform_cb, user_data, timeout_handle) = self._pop_async_request(None, msg.name) # depends on [control=['if'], data=[]] else: (reply_cb, user_data) = (None, None) if timeout_handle is not None: self.ioloop.remove_timeout(timeout_handle) # depends on [control=['if'], data=['timeout_handle']] if reply_cb is None: reply_cb = super(AsyncClient, self).handle_reply # override user_data since handle_reply takes no user_data user_data = None # depends on [control=['if'], data=['reply_cb']] try: if user_data is None: reply_cb(msg) # depends on [control=['if'], data=[]] else: reply_cb(msg, *user_data) # depends on [control=['try'], data=[]] except Exception: (e_type, e_value, trace) = sys.exc_info() reason = '\n'.join(traceback.format_exception(e_type, e_value, trace, self._tb_limit)) self._logger.error('Callback reply %s FAIL: %s' % (msg.name, reason)) # depends on [control=['except'], data=[]]
def SaveResourceUsage(self, client_id, status): """Update the resource usage of the hunt.""" # Per client stats. self.hunt_obj.ProcessClientResourcesStats(client_id, status) # Overall hunt resource usage. self.UpdateProtoResources(status)
def function[SaveResourceUsage, parameter[self, client_id, status]]: constant[Update the resource usage of the hunt.] call[name[self].hunt_obj.ProcessClientResourcesStats, parameter[name[client_id], name[status]]] call[name[self].UpdateProtoResources, parameter[name[status]]]
keyword[def] identifier[SaveResourceUsage] ( identifier[self] , identifier[client_id] , identifier[status] ): literal[string] identifier[self] . identifier[hunt_obj] . identifier[ProcessClientResourcesStats] ( identifier[client_id] , identifier[status] ) identifier[self] . identifier[UpdateProtoResources] ( identifier[status] )
def SaveResourceUsage(self, client_id, status): """Update the resource usage of the hunt.""" # Per client stats. self.hunt_obj.ProcessClientResourcesStats(client_id, status) # Overall hunt resource usage. self.UpdateProtoResources(status)
def total_stored(self, wanted, slots=None): """ Calculates the total number of items of that type in the current window or given slot range. Args: wanted: function(Slot) or Slot or itemID or (itemID, metadata) """ if slots is None: slots = self.window.slots wanted = make_slot_check(wanted) return sum(slot.amount for slot in slots if wanted(slot))
def function[total_stored, parameter[self, wanted, slots]]: constant[ Calculates the total number of items of that type in the current window or given slot range. Args: wanted: function(Slot) or Slot or itemID or (itemID, metadata) ] if compare[name[slots] is constant[None]] begin[:] variable[slots] assign[=] name[self].window.slots variable[wanted] assign[=] call[name[make_slot_check], parameter[name[wanted]]] return[call[name[sum], parameter[<ast.GeneratorExp object at 0x7da1b28518d0>]]]
keyword[def] identifier[total_stored] ( identifier[self] , identifier[wanted] , identifier[slots] = keyword[None] ): literal[string] keyword[if] identifier[slots] keyword[is] keyword[None] : identifier[slots] = identifier[self] . identifier[window] . identifier[slots] identifier[wanted] = identifier[make_slot_check] ( identifier[wanted] ) keyword[return] identifier[sum] ( identifier[slot] . identifier[amount] keyword[for] identifier[slot] keyword[in] identifier[slots] keyword[if] identifier[wanted] ( identifier[slot] ))
def total_stored(self, wanted, slots=None): """ Calculates the total number of items of that type in the current window or given slot range. Args: wanted: function(Slot) or Slot or itemID or (itemID, metadata) """ if slots is None: slots = self.window.slots # depends on [control=['if'], data=['slots']] wanted = make_slot_check(wanted) return sum((slot.amount for slot in slots if wanted(slot)))
async def create_and_store_my_did(wallet_handle: int, did_json: str) -> (str, str): """ Creates keys (signing and encryption keys) for a new DID (owned by the caller of the library). Identity's DID must be either explicitly provided, or taken as the first 16 bit of verkey. Saves the Identity DID with keys in a secured Wallet, so that it can be used to sign and encrypt transactions. :param wallet_handle: wallet handler (created by open_wallet). :param did_json: Identity information as json. Example: { "did": string, (optional; if not provided and cid param is false then the first 16 bit of the verkey will be used as a new DID; if not provided and cid is true then the full verkey will be used as a new DID; if provided, then keys will be replaced - key rotation use case) "seed": string, (optional) Seed that allows deterministic key creation (if not set random one will be created). Can be UTF-8, base64 or hex string. "crypto_type": string, (optional; if not set then ed25519 curve is used; currently only 'ed25519' value is supported for this field) "cid": bool, (optional; if not set then false is used;) } :return: DID and verkey (for verification of signature) """ logger = logging.getLogger(__name__) logger.debug("create_and_store_my_did: >>> wallet_handle: %r, did_json: %r", wallet_handle, did_json) if not hasattr(create_and_store_my_did, "cb"): logger.debug("create_wallet: Creating callback") create_and_store_my_did.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p, c_char_p)) c_wallet_handle = c_int32(wallet_handle) c_did_json = c_char_p(did_json.encode('utf-8')) did, verkey = await do_call('indy_create_and_store_my_did', c_wallet_handle, c_did_json, create_and_store_my_did.cb) res = (did.decode(), verkey.decode()) logger.debug("create_and_store_my_did: <<< res: %r", res) return res
<ast.AsyncFunctionDef object at 0x7da18c4cf9a0>
keyword[async] keyword[def] identifier[create_and_store_my_did] ( identifier[wallet_handle] : identifier[int] , identifier[did_json] : identifier[str] )->( identifier[str] , identifier[str] ): literal[string] identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] ) identifier[logger] . identifier[debug] ( literal[string] , identifier[wallet_handle] , identifier[did_json] ) keyword[if] keyword[not] identifier[hasattr] ( identifier[create_and_store_my_did] , literal[string] ): identifier[logger] . identifier[debug] ( literal[string] ) identifier[create_and_store_my_did] . identifier[cb] = identifier[create_cb] ( identifier[CFUNCTYPE] ( keyword[None] , identifier[c_int32] , identifier[c_int32] , identifier[c_char_p] , identifier[c_char_p] )) identifier[c_wallet_handle] = identifier[c_int32] ( identifier[wallet_handle] ) identifier[c_did_json] = identifier[c_char_p] ( identifier[did_json] . identifier[encode] ( literal[string] )) identifier[did] , identifier[verkey] = keyword[await] identifier[do_call] ( literal[string] , identifier[c_wallet_handle] , identifier[c_did_json] , identifier[create_and_store_my_did] . identifier[cb] ) identifier[res] =( identifier[did] . identifier[decode] (), identifier[verkey] . identifier[decode] ()) identifier[logger] . identifier[debug] ( literal[string] , identifier[res] ) keyword[return] identifier[res]
async def create_and_store_my_did(wallet_handle: int, did_json: str) -> (str, str): """ Creates keys (signing and encryption keys) for a new DID (owned by the caller of the library). Identity's DID must be either explicitly provided, or taken as the first 16 bit of verkey. Saves the Identity DID with keys in a secured Wallet, so that it can be used to sign and encrypt transactions. :param wallet_handle: wallet handler (created by open_wallet). :param did_json: Identity information as json. Example: { "did": string, (optional; if not provided and cid param is false then the first 16 bit of the verkey will be used as a new DID; if not provided and cid is true then the full verkey will be used as a new DID; if provided, then keys will be replaced - key rotation use case) "seed": string, (optional) Seed that allows deterministic key creation (if not set random one will be created). Can be UTF-8, base64 or hex string. "crypto_type": string, (optional; if not set then ed25519 curve is used; currently only 'ed25519' value is supported for this field) "cid": bool, (optional; if not set then false is used;) } :return: DID and verkey (for verification of signature) """ logger = logging.getLogger(__name__) logger.debug('create_and_store_my_did: >>> wallet_handle: %r, did_json: %r', wallet_handle, did_json) if not hasattr(create_and_store_my_did, 'cb'): logger.debug('create_wallet: Creating callback') create_and_store_my_did.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p, c_char_p)) # depends on [control=['if'], data=[]] c_wallet_handle = c_int32(wallet_handle) c_did_json = c_char_p(did_json.encode('utf-8')) (did, verkey) = await do_call('indy_create_and_store_my_did', c_wallet_handle, c_did_json, create_and_store_my_did.cb) res = (did.decode(), verkey.decode()) logger.debug('create_and_store_my_did: <<< res: %r', res) return res
def cut_microsoft_quote(html_message): ''' Cuts splitter block and all following blocks. ''' #use EXSLT extensions to have a regex match() function with lxml ns = {"re": "http://exslt.org/regular-expressions"} #general pattern: @style='border:none;border-top:solid <color> 1.0pt;padding:3.0pt 0<unit> 0<unit> 0<unit>' #outlook 2007, 2010 (international) <color=#B5C4DF> <unit=cm> #outlook 2007, 2010 (american) <color=#B5C4DF> <unit=pt> #outlook 2013 (international) <color=#E1E1E1> <unit=cm> #outlook 2013 (american) <color=#E1E1E1> <unit=pt> #also handles a variant with a space after the semicolon splitter = html_message.xpath( #outlook 2007, 2010, 2013 (international, american) "//div[@style[re:match(., 'border:none; ?border-top:solid #(E1E1E1|B5C4DF) 1.0pt; ?" "padding:3.0pt 0(in|cm) 0(in|cm) 0(in|cm)')]]|" #windows mail "//div[@style='padding-top: 5px; " "border-top-color: rgb(229, 229, 229); " "border-top-width: 1px; border-top-style: solid;']" , namespaces=ns ) if splitter: splitter = splitter[0] #outlook 2010 if splitter == splitter.getparent().getchildren()[0]: splitter = splitter.getparent() else: #outlook 2003 splitter = html_message.xpath( "//div" "/div[@class='MsoNormal' and @align='center' " "and @style='text-align:center']" "/font" "/span" "/hr[@size='3' and @width='100%' and @align='center' " "and @tabindex='-1']" ) if len(splitter): splitter = splitter[0] splitter = splitter.getparent().getparent() splitter = splitter.getparent().getparent() if len(splitter): parent = splitter.getparent() after_splitter = splitter.getnext() while after_splitter is not None: parent.remove(after_splitter) after_splitter = splitter.getnext() parent.remove(splitter) return True return False
def function[cut_microsoft_quote, parameter[html_message]]: constant[ Cuts splitter block and all following blocks. ] variable[ns] assign[=] dictionary[[<ast.Constant object at 0x7da1b22e8c40>], [<ast.Constant object at 0x7da1b22ebfa0>]] variable[splitter] assign[=] call[name[html_message].xpath, parameter[constant[//div[@style[re:match(., 'border:none; ?border-top:solid #(E1E1E1|B5C4DF) 1.0pt; ?padding:3.0pt 0(in|cm) 0(in|cm) 0(in|cm)')]]|//div[@style='padding-top: 5px; border-top-color: rgb(229, 229, 229); border-top-width: 1px; border-top-style: solid;']]]] if name[splitter] begin[:] variable[splitter] assign[=] call[name[splitter]][constant[0]] if compare[name[splitter] equal[==] call[call[call[name[splitter].getparent, parameter[]].getchildren, parameter[]]][constant[0]]] begin[:] variable[splitter] assign[=] call[name[splitter].getparent, parameter[]] if call[name[len], parameter[name[splitter]]] begin[:] variable[parent] assign[=] call[name[splitter].getparent, parameter[]] variable[after_splitter] assign[=] call[name[splitter].getnext, parameter[]] while compare[name[after_splitter] is_not constant[None]] begin[:] call[name[parent].remove, parameter[name[after_splitter]]] variable[after_splitter] assign[=] call[name[splitter].getnext, parameter[]] call[name[parent].remove, parameter[name[splitter]]] return[constant[True]] return[constant[False]]
keyword[def] identifier[cut_microsoft_quote] ( identifier[html_message] ): literal[string] identifier[ns] ={ literal[string] : literal[string] } identifier[splitter] = identifier[html_message] . identifier[xpath] ( literal[string] literal[string] literal[string] literal[string] literal[string] , identifier[namespaces] = identifier[ns] ) keyword[if] identifier[splitter] : identifier[splitter] = identifier[splitter] [ literal[int] ] keyword[if] identifier[splitter] == identifier[splitter] . identifier[getparent] (). identifier[getchildren] ()[ literal[int] ]: identifier[splitter] = identifier[splitter] . identifier[getparent] () keyword[else] : identifier[splitter] = identifier[html_message] . identifier[xpath] ( literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] ) keyword[if] identifier[len] ( identifier[splitter] ): identifier[splitter] = identifier[splitter] [ literal[int] ] identifier[splitter] = identifier[splitter] . identifier[getparent] (). identifier[getparent] () identifier[splitter] = identifier[splitter] . identifier[getparent] (). identifier[getparent] () keyword[if] identifier[len] ( identifier[splitter] ): identifier[parent] = identifier[splitter] . identifier[getparent] () identifier[after_splitter] = identifier[splitter] . identifier[getnext] () keyword[while] identifier[after_splitter] keyword[is] keyword[not] keyword[None] : identifier[parent] . identifier[remove] ( identifier[after_splitter] ) identifier[after_splitter] = identifier[splitter] . identifier[getnext] () identifier[parent] . identifier[remove] ( identifier[splitter] ) keyword[return] keyword[True] keyword[return] keyword[False]
def cut_microsoft_quote(html_message): """ Cuts splitter block and all following blocks. """ #use EXSLT extensions to have a regex match() function with lxml ns = {'re': 'http://exslt.org/regular-expressions'} #general pattern: @style='border:none;border-top:solid <color> 1.0pt;padding:3.0pt 0<unit> 0<unit> 0<unit>' #outlook 2007, 2010 (international) <color=#B5C4DF> <unit=cm> #outlook 2007, 2010 (american) <color=#B5C4DF> <unit=pt> #outlook 2013 (international) <color=#E1E1E1> <unit=cm> #outlook 2013 (american) <color=#E1E1E1> <unit=pt> #also handles a variant with a space after the semicolon #outlook 2007, 2010, 2013 (international, american) #windows mail splitter = html_message.xpath("//div[@style[re:match(., 'border:none; ?border-top:solid #(E1E1E1|B5C4DF) 1.0pt; ?padding:3.0pt 0(in|cm) 0(in|cm) 0(in|cm)')]]|//div[@style='padding-top: 5px; border-top-color: rgb(229, 229, 229); border-top-width: 1px; border-top-style: solid;']", namespaces=ns) if splitter: splitter = splitter[0] #outlook 2010 if splitter == splitter.getparent().getchildren()[0]: splitter = splitter.getparent() # depends on [control=['if'], data=['splitter']] # depends on [control=['if'], data=[]] else: #outlook 2003 splitter = html_message.xpath("//div/div[@class='MsoNormal' and @align='center' and @style='text-align:center']/font/span/hr[@size='3' and @width='100%' and @align='center' and @tabindex='-1']") if len(splitter): splitter = splitter[0] splitter = splitter.getparent().getparent() splitter = splitter.getparent().getparent() # depends on [control=['if'], data=[]] if len(splitter): parent = splitter.getparent() after_splitter = splitter.getnext() while after_splitter is not None: parent.remove(after_splitter) after_splitter = splitter.getnext() # depends on [control=['while'], data=['after_splitter']] parent.remove(splitter) return True # depends on [control=['if'], data=[]] return False
def setup(self): """ Do any setup work needed to run i3status modules """ for conf_name in self.py3_config["i3s_modules"]: module = I3statusModule(conf_name, self) self.i3modules[conf_name] = module if module.is_time_module: self.time_modules.append(module)
def function[setup, parameter[self]]: constant[ Do any setup work needed to run i3status modules ] for taget[name[conf_name]] in starred[call[name[self].py3_config][constant[i3s_modules]]] begin[:] variable[module] assign[=] call[name[I3statusModule], parameter[name[conf_name], name[self]]] call[name[self].i3modules][name[conf_name]] assign[=] name[module] if name[module].is_time_module begin[:] call[name[self].time_modules.append, parameter[name[module]]]
keyword[def] identifier[setup] ( identifier[self] ): literal[string] keyword[for] identifier[conf_name] keyword[in] identifier[self] . identifier[py3_config] [ literal[string] ]: identifier[module] = identifier[I3statusModule] ( identifier[conf_name] , identifier[self] ) identifier[self] . identifier[i3modules] [ identifier[conf_name] ]= identifier[module] keyword[if] identifier[module] . identifier[is_time_module] : identifier[self] . identifier[time_modules] . identifier[append] ( identifier[module] )
def setup(self): """ Do any setup work needed to run i3status modules """ for conf_name in self.py3_config['i3s_modules']: module = I3statusModule(conf_name, self) self.i3modules[conf_name] = module if module.is_time_module: self.time_modules.append(module) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['conf_name']]
def set_identities(self,identities): """Set identities in the disco#info object. Remove all existing identities from `self`. :Parameters: - `identities`: list of identities or identity properties (jid,node,category,type,name). :Types: - `identities`: sequence of `DiscoIdentity` or sequence of sequences """ for identity in self.identities: identity.remove() for identity in identities: try: self.add_identity(identity.name,identity.category,identity.type) except AttributeError: self.add_identity(*identity)
def function[set_identities, parameter[self, identities]]: constant[Set identities in the disco#info object. Remove all existing identities from `self`. :Parameters: - `identities`: list of identities or identity properties (jid,node,category,type,name). :Types: - `identities`: sequence of `DiscoIdentity` or sequence of sequences ] for taget[name[identity]] in starred[name[self].identities] begin[:] call[name[identity].remove, parameter[]] for taget[name[identity]] in starred[name[identities]] begin[:] <ast.Try object at 0x7da20c6c45b0>
keyword[def] identifier[set_identities] ( identifier[self] , identifier[identities] ): literal[string] keyword[for] identifier[identity] keyword[in] identifier[self] . identifier[identities] : identifier[identity] . identifier[remove] () keyword[for] identifier[identity] keyword[in] identifier[identities] : keyword[try] : identifier[self] . identifier[add_identity] ( identifier[identity] . identifier[name] , identifier[identity] . identifier[category] , identifier[identity] . identifier[type] ) keyword[except] identifier[AttributeError] : identifier[self] . identifier[add_identity] (* identifier[identity] )
def set_identities(self, identities): """Set identities in the disco#info object. Remove all existing identities from `self`. :Parameters: - `identities`: list of identities or identity properties (jid,node,category,type,name). :Types: - `identities`: sequence of `DiscoIdentity` or sequence of sequences """ for identity in self.identities: identity.remove() # depends on [control=['for'], data=['identity']] for identity in identities: try: self.add_identity(identity.name, identity.category, identity.type) # depends on [control=['try'], data=[]] except AttributeError: self.add_identity(*identity) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['identity']]
def settimeout(self, timeout): """ Set the timeout to the websocket. timeout: timeout time(second). """ self.sock_opt.timeout = timeout if self.sock: self.sock.settimeout(timeout)
def function[settimeout, parameter[self, timeout]]: constant[ Set the timeout to the websocket. timeout: timeout time(second). ] name[self].sock_opt.timeout assign[=] name[timeout] if name[self].sock begin[:] call[name[self].sock.settimeout, parameter[name[timeout]]]
keyword[def] identifier[settimeout] ( identifier[self] , identifier[timeout] ): literal[string] identifier[self] . identifier[sock_opt] . identifier[timeout] = identifier[timeout] keyword[if] identifier[self] . identifier[sock] : identifier[self] . identifier[sock] . identifier[settimeout] ( identifier[timeout] )
def settimeout(self, timeout): """ Set the timeout to the websocket. timeout: timeout time(second). """ self.sock_opt.timeout = timeout if self.sock: self.sock.settimeout(timeout) # depends on [control=['if'], data=[]]
def set_mode(self, mode): """ :param mode: a str, one of [home, away, night] :return: nothing """ values = {"desired_state": {"mode": mode}} response = self.api_interface.set_device_state(self, values) self._update_state_from_response(response)
def function[set_mode, parameter[self, mode]]: constant[ :param mode: a str, one of [home, away, night] :return: nothing ] variable[values] assign[=] dictionary[[<ast.Constant object at 0x7da1b255e230>], [<ast.Dict object at 0x7da1b255e260>]] variable[response] assign[=] call[name[self].api_interface.set_device_state, parameter[name[self], name[values]]] call[name[self]._update_state_from_response, parameter[name[response]]]
keyword[def] identifier[set_mode] ( identifier[self] , identifier[mode] ): literal[string] identifier[values] ={ literal[string] :{ literal[string] : identifier[mode] }} identifier[response] = identifier[self] . identifier[api_interface] . identifier[set_device_state] ( identifier[self] , identifier[values] ) identifier[self] . identifier[_update_state_from_response] ( identifier[response] )
def set_mode(self, mode): """ :param mode: a str, one of [home, away, night] :return: nothing """ values = {'desired_state': {'mode': mode}} response = self.api_interface.set_device_state(self, values) self._update_state_from_response(response)
def make_multi_entry(plist, pkg_pyvers, ver_dict): """Generate Python interpreter version entries.""" for pyver in pkg_pyvers: pver = pyver[2] + "." + pyver[3:] plist.append("Python {0}: {1}".format(pver, ops_to_words(ver_dict[pyver])))
def function[make_multi_entry, parameter[plist, pkg_pyvers, ver_dict]]: constant[Generate Python interpreter version entries.] for taget[name[pyver]] in starred[name[pkg_pyvers]] begin[:] variable[pver] assign[=] binary_operation[binary_operation[call[name[pyver]][constant[2]] + constant[.]] + call[name[pyver]][<ast.Slice object at 0x7da1b02094b0>]] call[name[plist].append, parameter[call[constant[Python {0}: {1}].format, parameter[name[pver], call[name[ops_to_words], parameter[call[name[ver_dict]][name[pyver]]]]]]]]
keyword[def] identifier[make_multi_entry] ( identifier[plist] , identifier[pkg_pyvers] , identifier[ver_dict] ): literal[string] keyword[for] identifier[pyver] keyword[in] identifier[pkg_pyvers] : identifier[pver] = identifier[pyver] [ literal[int] ]+ literal[string] + identifier[pyver] [ literal[int] :] identifier[plist] . identifier[append] ( literal[string] . identifier[format] ( identifier[pver] , identifier[ops_to_words] ( identifier[ver_dict] [ identifier[pyver] ])))
def make_multi_entry(plist, pkg_pyvers, ver_dict): """Generate Python interpreter version entries.""" for pyver in pkg_pyvers: pver = pyver[2] + '.' + pyver[3:] plist.append('Python {0}: {1}'.format(pver, ops_to_words(ver_dict[pyver]))) # depends on [control=['for'], data=['pyver']]
def explode(self): """ Fill members with contactgroup_members :return:None """ # We do not want a same hg to be explode again and again # so we tag it for tmp_cg in list(self.items.values()): tmp_cg.already_exploded = False for contactgroup in list(self.items.values()): if contactgroup.already_exploded: continue # get_contacts_by_explosion is a recursive # function, so we must tag hg so we do not loop for tmp_cg in list(self.items.values()): tmp_cg.rec_tag = False contactgroup.get_contacts_by_explosion(self) # We clean the tags for tmp_cg in list(self.items.values()): if hasattr(tmp_cg, 'rec_tag'): del tmp_cg.rec_tag del tmp_cg.already_exploded
def function[explode, parameter[self]]: constant[ Fill members with contactgroup_members :return:None ] for taget[name[tmp_cg]] in starred[call[name[list], parameter[call[name[self].items.values, parameter[]]]]] begin[:] name[tmp_cg].already_exploded assign[=] constant[False] for taget[name[contactgroup]] in starred[call[name[list], parameter[call[name[self].items.values, parameter[]]]]] begin[:] if name[contactgroup].already_exploded begin[:] continue for taget[name[tmp_cg]] in starred[call[name[list], parameter[call[name[self].items.values, parameter[]]]]] begin[:] name[tmp_cg].rec_tag assign[=] constant[False] call[name[contactgroup].get_contacts_by_explosion, parameter[name[self]]] for taget[name[tmp_cg]] in starred[call[name[list], parameter[call[name[self].items.values, parameter[]]]]] begin[:] if call[name[hasattr], parameter[name[tmp_cg], constant[rec_tag]]] begin[:] <ast.Delete object at 0x7da18fe920e0> <ast.Delete object at 0x7da18fe90f10>
keyword[def] identifier[explode] ( identifier[self] ): literal[string] keyword[for] identifier[tmp_cg] keyword[in] identifier[list] ( identifier[self] . identifier[items] . identifier[values] ()): identifier[tmp_cg] . identifier[already_exploded] = keyword[False] keyword[for] identifier[contactgroup] keyword[in] identifier[list] ( identifier[self] . identifier[items] . identifier[values] ()): keyword[if] identifier[contactgroup] . identifier[already_exploded] : keyword[continue] keyword[for] identifier[tmp_cg] keyword[in] identifier[list] ( identifier[self] . identifier[items] . identifier[values] ()): identifier[tmp_cg] . identifier[rec_tag] = keyword[False] identifier[contactgroup] . identifier[get_contacts_by_explosion] ( identifier[self] ) keyword[for] identifier[tmp_cg] keyword[in] identifier[list] ( identifier[self] . identifier[items] . identifier[values] ()): keyword[if] identifier[hasattr] ( identifier[tmp_cg] , literal[string] ): keyword[del] identifier[tmp_cg] . identifier[rec_tag] keyword[del] identifier[tmp_cg] . identifier[already_exploded]
def explode(self): """ Fill members with contactgroup_members :return:None """ # We do not want a same hg to be explode again and again # so we tag it for tmp_cg in list(self.items.values()): tmp_cg.already_exploded = False # depends on [control=['for'], data=['tmp_cg']] for contactgroup in list(self.items.values()): if contactgroup.already_exploded: continue # depends on [control=['if'], data=[]] # get_contacts_by_explosion is a recursive # function, so we must tag hg so we do not loop for tmp_cg in list(self.items.values()): tmp_cg.rec_tag = False # depends on [control=['for'], data=['tmp_cg']] contactgroup.get_contacts_by_explosion(self) # depends on [control=['for'], data=['contactgroup']] # We clean the tags for tmp_cg in list(self.items.values()): if hasattr(tmp_cg, 'rec_tag'): del tmp_cg.rec_tag # depends on [control=['if'], data=[]] del tmp_cg.already_exploded # depends on [control=['for'], data=['tmp_cg']]
def create_dir(self, params, delete=False): """ creates a subdirectory for the experiment, and deletes existing files, if the delete flag is true. then writes the current experiment.cfg file in the folder. """ # create experiment path and subdir fullpath = os.path.join(params['path'], params['name']) self.mkdir(fullpath) # delete old histories if --del flag is active if delete and os.path.exists(fullpath): os.system('rm %s/*' % fullpath) # write a config file for this single exp. in the folder self.write_config_file(params, fullpath)
def function[create_dir, parameter[self, params, delete]]: constant[ creates a subdirectory for the experiment, and deletes existing files, if the delete flag is true. then writes the current experiment.cfg file in the folder. ] variable[fullpath] assign[=] call[name[os].path.join, parameter[call[name[params]][constant[path]], call[name[params]][constant[name]]]] call[name[self].mkdir, parameter[name[fullpath]]] if <ast.BoolOp object at 0x7da1b088bf40> begin[:] call[name[os].system, parameter[binary_operation[constant[rm %s/*] <ast.Mod object at 0x7da2590d6920> name[fullpath]]]] call[name[self].write_config_file, parameter[name[params], name[fullpath]]]
keyword[def] identifier[create_dir] ( identifier[self] , identifier[params] , identifier[delete] = keyword[False] ): literal[string] identifier[fullpath] = identifier[os] . identifier[path] . identifier[join] ( identifier[params] [ literal[string] ], identifier[params] [ literal[string] ]) identifier[self] . identifier[mkdir] ( identifier[fullpath] ) keyword[if] identifier[delete] keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[fullpath] ): identifier[os] . identifier[system] ( literal[string] % identifier[fullpath] ) identifier[self] . identifier[write_config_file] ( identifier[params] , identifier[fullpath] )
def create_dir(self, params, delete=False): """ creates a subdirectory for the experiment, and deletes existing files, if the delete flag is true. then writes the current experiment.cfg file in the folder. """ # create experiment path and subdir fullpath = os.path.join(params['path'], params['name']) self.mkdir(fullpath) # delete old histories if --del flag is active if delete and os.path.exists(fullpath): os.system('rm %s/*' % fullpath) # depends on [control=['if'], data=[]] # write a config file for this single exp. in the folder self.write_config_file(params, fullpath)
def get_timer_event_definition(self, timerEventDefinition): """ Parse the timerEventDefinition node and return an instance of TimerEventDefinition This currently only supports the timeDate node for specifying an expiry time for the timer. """ timeDate = first(self.xpath('.//bpmn:timeDate')) return TimerEventDefinition( self.node.get('name', timeDate.text), self.parser.parse_condition( timeDate.text, None, None, None, None, self))
def function[get_timer_event_definition, parameter[self, timerEventDefinition]]: constant[ Parse the timerEventDefinition node and return an instance of TimerEventDefinition This currently only supports the timeDate node for specifying an expiry time for the timer. ] variable[timeDate] assign[=] call[name[first], parameter[call[name[self].xpath, parameter[constant[.//bpmn:timeDate]]]]] return[call[name[TimerEventDefinition], parameter[call[name[self].node.get, parameter[constant[name], name[timeDate].text]], call[name[self].parser.parse_condition, parameter[name[timeDate].text, constant[None], constant[None], constant[None], constant[None], name[self]]]]]]
keyword[def] identifier[get_timer_event_definition] ( identifier[self] , identifier[timerEventDefinition] ): literal[string] identifier[timeDate] = identifier[first] ( identifier[self] . identifier[xpath] ( literal[string] )) keyword[return] identifier[TimerEventDefinition] ( identifier[self] . identifier[node] . identifier[get] ( literal[string] , identifier[timeDate] . identifier[text] ), identifier[self] . identifier[parser] . identifier[parse_condition] ( identifier[timeDate] . identifier[text] , keyword[None] , keyword[None] , keyword[None] , keyword[None] , identifier[self] ))
def get_timer_event_definition(self, timerEventDefinition): """ Parse the timerEventDefinition node and return an instance of TimerEventDefinition This currently only supports the timeDate node for specifying an expiry time for the timer. """ timeDate = first(self.xpath('.//bpmn:timeDate')) return TimerEventDefinition(self.node.get('name', timeDate.text), self.parser.parse_condition(timeDate.text, None, None, None, None, self))
def has_symbol(self, symbol, as_of=None): """ Return True if the 'symbol' exists in this library AND the symbol isn't deleted in the specified as_of. It's possible for a deleted symbol to exist in older snapshots. Parameters ---------- symbol : `str` symbol name for the item as_of : `str` or int or `datetime.datetime` Return the data as it was as_of the point in time. `int` : specific version number `str` : snapshot name which contains the version `datetime.datetime` : the version of the data that existed as_of the requested point in time """ try: # Always use the primary for has_symbol, it's safer self._read_metadata(symbol, as_of=as_of, read_preference=ReadPreference.PRIMARY) return True except NoDataFoundException: return False
def function[has_symbol, parameter[self, symbol, as_of]]: constant[ Return True if the 'symbol' exists in this library AND the symbol isn't deleted in the specified as_of. It's possible for a deleted symbol to exist in older snapshots. Parameters ---------- symbol : `str` symbol name for the item as_of : `str` or int or `datetime.datetime` Return the data as it was as_of the point in time. `int` : specific version number `str` : snapshot name which contains the version `datetime.datetime` : the version of the data that existed as_of the requested point in time ] <ast.Try object at 0x7da204621300>
keyword[def] identifier[has_symbol] ( identifier[self] , identifier[symbol] , identifier[as_of] = keyword[None] ): literal[string] keyword[try] : identifier[self] . identifier[_read_metadata] ( identifier[symbol] , identifier[as_of] = identifier[as_of] , identifier[read_preference] = identifier[ReadPreference] . identifier[PRIMARY] ) keyword[return] keyword[True] keyword[except] identifier[NoDataFoundException] : keyword[return] keyword[False]
def has_symbol(self, symbol, as_of=None): """ Return True if the 'symbol' exists in this library AND the symbol isn't deleted in the specified as_of. It's possible for a deleted symbol to exist in older snapshots. Parameters ---------- symbol : `str` symbol name for the item as_of : `str` or int or `datetime.datetime` Return the data as it was as_of the point in time. `int` : specific version number `str` : snapshot name which contains the version `datetime.datetime` : the version of the data that existed as_of the requested point in time """ try: # Always use the primary for has_symbol, it's safer self._read_metadata(symbol, as_of=as_of, read_preference=ReadPreference.PRIMARY) return True # depends on [control=['try'], data=[]] except NoDataFoundException: return False # depends on [control=['except'], data=[]]
def delete_user(self, user_id): """Delete user specified user. :param str user_id: the ID of the user to delete (Required) :returns: void """ api = self._get_api(iam.AccountAdminApi) api.delete_user(user_id) return
def function[delete_user, parameter[self, user_id]]: constant[Delete user specified user. :param str user_id: the ID of the user to delete (Required) :returns: void ] variable[api] assign[=] call[name[self]._get_api, parameter[name[iam].AccountAdminApi]] call[name[api].delete_user, parameter[name[user_id]]] return[None]
keyword[def] identifier[delete_user] ( identifier[self] , identifier[user_id] ): literal[string] identifier[api] = identifier[self] . identifier[_get_api] ( identifier[iam] . identifier[AccountAdminApi] ) identifier[api] . identifier[delete_user] ( identifier[user_id] ) keyword[return]
def delete_user(self, user_id): """Delete user specified user. :param str user_id: the ID of the user to delete (Required) :returns: void """ api = self._get_api(iam.AccountAdminApi) api.delete_user(user_id) return
def filter_from_mapping(self, mapping, backend=None): """ Return mappings that either exactly correspond to the given `mapping` tuple, or, if the second item of `mapping` is `None`, include mappings that only match the first item of `mapping` (useful to show all mappings for a given project). """ def mapping_filter(key_item): key, item = key_item return ( (mapping is None or item.mapping == mapping or (mapping[1] is None and item.mapping is not None and item.mapping[0] == mapping[0])) and (backend is None or item.backend == backend) ) items = [item for item in six.iteritems(self) if mapping_filter(item)] aliases = collections.OrderedDict( sorted(items, key=lambda alias: alias[1].mapping if alias[1] is not None else (0, 0)) ) return aliases
def function[filter_from_mapping, parameter[self, mapping, backend]]: constant[ Return mappings that either exactly correspond to the given `mapping` tuple, or, if the second item of `mapping` is `None`, include mappings that only match the first item of `mapping` (useful to show all mappings for a given project). ] def function[mapping_filter, parameter[key_item]]: <ast.Tuple object at 0x7da1b26aca90> assign[=] name[key_item] return[<ast.BoolOp object at 0x7da1b26af9a0>] variable[items] assign[=] <ast.ListComp object at 0x7da1b1913850> variable[aliases] assign[=] call[name[collections].OrderedDict, parameter[call[name[sorted], parameter[name[items]]]]] return[name[aliases]]
keyword[def] identifier[filter_from_mapping] ( identifier[self] , identifier[mapping] , identifier[backend] = keyword[None] ): literal[string] keyword[def] identifier[mapping_filter] ( identifier[key_item] ): identifier[key] , identifier[item] = identifier[key_item] keyword[return] ( ( identifier[mapping] keyword[is] keyword[None] keyword[or] identifier[item] . identifier[mapping] == identifier[mapping] keyword[or] ( identifier[mapping] [ literal[int] ] keyword[is] keyword[None] keyword[and] identifier[item] . identifier[mapping] keyword[is] keyword[not] keyword[None] keyword[and] identifier[item] . identifier[mapping] [ literal[int] ]== identifier[mapping] [ literal[int] ])) keyword[and] ( identifier[backend] keyword[is] keyword[None] keyword[or] identifier[item] . identifier[backend] == identifier[backend] ) ) identifier[items] =[ identifier[item] keyword[for] identifier[item] keyword[in] identifier[six] . identifier[iteritems] ( identifier[self] ) keyword[if] identifier[mapping_filter] ( identifier[item] )] identifier[aliases] = identifier[collections] . identifier[OrderedDict] ( identifier[sorted] ( identifier[items] , identifier[key] = keyword[lambda] identifier[alias] : identifier[alias] [ literal[int] ]. identifier[mapping] keyword[if] identifier[alias] [ literal[int] ] keyword[is] keyword[not] keyword[None] keyword[else] ( literal[int] , literal[int] )) ) keyword[return] identifier[aliases]
def filter_from_mapping(self, mapping, backend=None): """ Return mappings that either exactly correspond to the given `mapping` tuple, or, if the second item of `mapping` is `None`, include mappings that only match the first item of `mapping` (useful to show all mappings for a given project). """ def mapping_filter(key_item): (key, item) = key_item return (mapping is None or item.mapping == mapping or (mapping[1] is None and item.mapping is not None and (item.mapping[0] == mapping[0]))) and (backend is None or item.backend == backend) items = [item for item in six.iteritems(self) if mapping_filter(item)] aliases = collections.OrderedDict(sorted(items, key=lambda alias: alias[1].mapping if alias[1] is not None else (0, 0))) return aliases
def staticfy(html_file, args=argparse.ArgumentParser()): """ Staticfy method. Loop through each line of the file and replaces the old links """ # unpack arguments static_endpoint = args.static_endpoint or 'static' framework = args.framework or os.getenv('STATICFY_FRAMEWORK', 'flask') add_tags = args.add_tags or {} exc_tags = args.exc_tags or {} namespace = args.namespace or {} # default tags tags = {('img', 'src'), ('link', 'href'), ('script', 'src')} # generate additional_tags add_tags = {(tag, attr) for tag, attr in add_tags.items()} tags.update(add_tags) # remove tags if any was specified exc_tags = {(tag, attr) for tag, attr in exc_tags.items()} tags = tags - exc_tags # get elements we're interested in matches = get_elements(html_file, tags) # transform old links to new links transformed = transform(matches, framework, namespace, static_endpoint) return replace_lines(html_file, transformed)
def function[staticfy, parameter[html_file, args]]: constant[ Staticfy method. Loop through each line of the file and replaces the old links ] variable[static_endpoint] assign[=] <ast.BoolOp object at 0x7da18c4ccb50> variable[framework] assign[=] <ast.BoolOp object at 0x7da18c4cf9d0> variable[add_tags] assign[=] <ast.BoolOp object at 0x7da18c4cf850> variable[exc_tags] assign[=] <ast.BoolOp object at 0x7da18c4cd6f0> variable[namespace] assign[=] <ast.BoolOp object at 0x7da18dc04b20> variable[tags] assign[=] <ast.Set object at 0x7da18dc05b40> variable[add_tags] assign[=] <ast.SetComp object at 0x7da18dc05450> call[name[tags].update, parameter[name[add_tags]]] variable[exc_tags] assign[=] <ast.SetComp object at 0x7da18dc04d90> variable[tags] assign[=] binary_operation[name[tags] - name[exc_tags]] variable[matches] assign[=] call[name[get_elements], parameter[name[html_file], name[tags]]] variable[transformed] assign[=] call[name[transform], parameter[name[matches], name[framework], name[namespace], name[static_endpoint]]] return[call[name[replace_lines], parameter[name[html_file], name[transformed]]]]
keyword[def] identifier[staticfy] ( identifier[html_file] , identifier[args] = identifier[argparse] . identifier[ArgumentParser] ()): literal[string] identifier[static_endpoint] = identifier[args] . identifier[static_endpoint] keyword[or] literal[string] identifier[framework] = identifier[args] . identifier[framework] keyword[or] identifier[os] . identifier[getenv] ( literal[string] , literal[string] ) identifier[add_tags] = identifier[args] . identifier[add_tags] keyword[or] {} identifier[exc_tags] = identifier[args] . identifier[exc_tags] keyword[or] {} identifier[namespace] = identifier[args] . identifier[namespace] keyword[or] {} identifier[tags] ={( literal[string] , literal[string] ),( literal[string] , literal[string] ),( literal[string] , literal[string] )} identifier[add_tags] ={( identifier[tag] , identifier[attr] ) keyword[for] identifier[tag] , identifier[attr] keyword[in] identifier[add_tags] . identifier[items] ()} identifier[tags] . identifier[update] ( identifier[add_tags] ) identifier[exc_tags] ={( identifier[tag] , identifier[attr] ) keyword[for] identifier[tag] , identifier[attr] keyword[in] identifier[exc_tags] . identifier[items] ()} identifier[tags] = identifier[tags] - identifier[exc_tags] identifier[matches] = identifier[get_elements] ( identifier[html_file] , identifier[tags] ) identifier[transformed] = identifier[transform] ( identifier[matches] , identifier[framework] , identifier[namespace] , identifier[static_endpoint] ) keyword[return] identifier[replace_lines] ( identifier[html_file] , identifier[transformed] )
def staticfy(html_file, args=argparse.ArgumentParser()): """ Staticfy method. Loop through each line of the file and replaces the old links """ # unpack arguments static_endpoint = args.static_endpoint or 'static' framework = args.framework or os.getenv('STATICFY_FRAMEWORK', 'flask') add_tags = args.add_tags or {} exc_tags = args.exc_tags or {} namespace = args.namespace or {} # default tags tags = {('img', 'src'), ('link', 'href'), ('script', 'src')} # generate additional_tags add_tags = {(tag, attr) for (tag, attr) in add_tags.items()} tags.update(add_tags) # remove tags if any was specified exc_tags = {(tag, attr) for (tag, attr) in exc_tags.items()} tags = tags - exc_tags # get elements we're interested in matches = get_elements(html_file, tags) # transform old links to new links transformed = transform(matches, framework, namespace, static_endpoint) return replace_lines(html_file, transformed)
def do_alias(self, args: argparse.Namespace) -> None: """Manage aliases""" func = getattr(args, 'func', None) if func is not None: # Call whatever sub-command function was selected func(self, args) else: # No sub-command was provided, so call help self.do_help('alias')
def function[do_alias, parameter[self, args]]: constant[Manage aliases] variable[func] assign[=] call[name[getattr], parameter[name[args], constant[func], constant[None]]] if compare[name[func] is_not constant[None]] begin[:] call[name[func], parameter[name[self], name[args]]]
keyword[def] identifier[do_alias] ( identifier[self] , identifier[args] : identifier[argparse] . identifier[Namespace] )-> keyword[None] : literal[string] identifier[func] = identifier[getattr] ( identifier[args] , literal[string] , keyword[None] ) keyword[if] identifier[func] keyword[is] keyword[not] keyword[None] : identifier[func] ( identifier[self] , identifier[args] ) keyword[else] : identifier[self] . identifier[do_help] ( literal[string] )
def do_alias(self, args: argparse.Namespace) -> None: """Manage aliases""" func = getattr(args, 'func', None) if func is not None: # Call whatever sub-command function was selected func(self, args) # depends on [control=['if'], data=['func']] else: # No sub-command was provided, so call help self.do_help('alias')
def methods(self): """Iterate over all of the method defined in this class and its parents. :returns: The methods defined on the class. :rtype: iterable(FunctionDef) """ done = {} for astroid in itertools.chain(iter((self,)), self.ancestors()): for meth in astroid.mymethods(): if meth.name in done: continue done[meth.name] = None yield meth
def function[methods, parameter[self]]: constant[Iterate over all of the method defined in this class and its parents. :returns: The methods defined on the class. :rtype: iterable(FunctionDef) ] variable[done] assign[=] dictionary[[], []] for taget[name[astroid]] in starred[call[name[itertools].chain, parameter[call[name[iter], parameter[tuple[[<ast.Name object at 0x7da1b1e79180>]]]], call[name[self].ancestors, parameter[]]]]] begin[:] for taget[name[meth]] in starred[call[name[astroid].mymethods, parameter[]]] begin[:] if compare[name[meth].name in name[done]] begin[:] continue call[name[done]][name[meth].name] assign[=] constant[None] <ast.Yield object at 0x7da1b1e74790>
keyword[def] identifier[methods] ( identifier[self] ): literal[string] identifier[done] ={} keyword[for] identifier[astroid] keyword[in] identifier[itertools] . identifier[chain] ( identifier[iter] (( identifier[self] ,)), identifier[self] . identifier[ancestors] ()): keyword[for] identifier[meth] keyword[in] identifier[astroid] . identifier[mymethods] (): keyword[if] identifier[meth] . identifier[name] keyword[in] identifier[done] : keyword[continue] identifier[done] [ identifier[meth] . identifier[name] ]= keyword[None] keyword[yield] identifier[meth]
def methods(self): """Iterate over all of the method defined in this class and its parents. :returns: The methods defined on the class. :rtype: iterable(FunctionDef) """ done = {} for astroid in itertools.chain(iter((self,)), self.ancestors()): for meth in astroid.mymethods(): if meth.name in done: continue # depends on [control=['if'], data=[]] done[meth.name] = None yield meth # depends on [control=['for'], data=['meth']] # depends on [control=['for'], data=['astroid']]
def add_channel(self, channel_name, datatype, channel_type, data_url, file_format, file_type, exceptions=None, resolution=None, windowrange=None, readonly=None): """ Arguments: channel_name (str): Channel Name is the specific name of a specific series of data. Standard naming convention is to do ImageTypeIterationNumber or NameSubProjectName. datatype (str): The data type is the storage method of data in the channel. It can be uint8, uint16, uint32, uint64, or float32. channel_type (str): The channel type is the kind of data being stored in the channel. It can be image, annotation, or timeseries. data_url (str): This url points to the root directory of the files. Dropbox (or any data requiring authentication to download such as private s3) is not an acceptable HTTP Server. See additional instructions in documentation online to format s3 properly so it is http accessible. file_format (str): File format refers to the overarching kind of data, as in slices (normal image data) or catmaid (tile-based). file_type (str): File type refers to the specific type of file that the data is stored in, as in, tiff, png, or tif. exceptions (int): Exceptions is an option to enable the possibility for annotations to contradict each other (assign different values to the same point). 1 corresponds to True, 0 corresponds to False. resolution (int): Resolution is the starting resolution of the data being uploaded to the channel. windowrange (int, int): Window range is the maximum and minimum pixel values for a particular image. This is used so that the image can be displayed in a readable way for viewing through RESTful calls readonly (int): This option allows the user to control if, after the initial data commit, the channel is read-only. Generally this is suggested with data that will be publicly viewable. Returns: None """ self.channels[channel_name] = [ channel_name.strip().replace(" ", ""), datatype, channel_type.lower(), data_url, file_format, file_type, exceptions, resolution, windowrange, readonly ]
def function[add_channel, parameter[self, channel_name, datatype, channel_type, data_url, file_format, file_type, exceptions, resolution, windowrange, readonly]]: constant[ Arguments: channel_name (str): Channel Name is the specific name of a specific series of data. Standard naming convention is to do ImageTypeIterationNumber or NameSubProjectName. datatype (str): The data type is the storage method of data in the channel. It can be uint8, uint16, uint32, uint64, or float32. channel_type (str): The channel type is the kind of data being stored in the channel. It can be image, annotation, or timeseries. data_url (str): This url points to the root directory of the files. Dropbox (or any data requiring authentication to download such as private s3) is not an acceptable HTTP Server. See additional instructions in documentation online to format s3 properly so it is http accessible. file_format (str): File format refers to the overarching kind of data, as in slices (normal image data) or catmaid (tile-based). file_type (str): File type refers to the specific type of file that the data is stored in, as in, tiff, png, or tif. exceptions (int): Exceptions is an option to enable the possibility for annotations to contradict each other (assign different values to the same point). 1 corresponds to True, 0 corresponds to False. resolution (int): Resolution is the starting resolution of the data being uploaded to the channel. windowrange (int, int): Window range is the maximum and minimum pixel values for a particular image. This is used so that the image can be displayed in a readable way for viewing through RESTful calls readonly (int): This option allows the user to control if, after the initial data commit, the channel is read-only. Generally this is suggested with data that will be publicly viewable. Returns: None ] call[name[self].channels][name[channel_name]] assign[=] list[[<ast.Call object at 0x7da1b020c8e0>, <ast.Name object at 0x7da1b020c400>, <ast.Call object at 0x7da1b020c4f0>, <ast.Name object at 0x7da1b020cf70>, <ast.Name object at 0x7da1b020de70>, <ast.Name object at 0x7da1b020e560>, <ast.Name object at 0x7da1b020e800>, <ast.Name object at 0x7da1b020e2c0>, <ast.Name object at 0x7da1b020c7c0>, <ast.Name object at 0x7da1b020c130>]]
keyword[def] identifier[add_channel] ( identifier[self] , identifier[channel_name] , identifier[datatype] , identifier[channel_type] , identifier[data_url] , identifier[file_format] , identifier[file_type] , identifier[exceptions] = keyword[None] , identifier[resolution] = keyword[None] , identifier[windowrange] = keyword[None] , identifier[readonly] = keyword[None] ): literal[string] identifier[self] . identifier[channels] [ identifier[channel_name] ]=[ identifier[channel_name] . identifier[strip] (). identifier[replace] ( literal[string] , literal[string] ), identifier[datatype] , identifier[channel_type] . identifier[lower] (), identifier[data_url] , identifier[file_format] , identifier[file_type] , identifier[exceptions] , identifier[resolution] , identifier[windowrange] , identifier[readonly] ]
def add_channel(self, channel_name, datatype, channel_type, data_url, file_format, file_type, exceptions=None, resolution=None, windowrange=None, readonly=None): """ Arguments: channel_name (str): Channel Name is the specific name of a specific series of data. Standard naming convention is to do ImageTypeIterationNumber or NameSubProjectName. datatype (str): The data type is the storage method of data in the channel. It can be uint8, uint16, uint32, uint64, or float32. channel_type (str): The channel type is the kind of data being stored in the channel. It can be image, annotation, or timeseries. data_url (str): This url points to the root directory of the files. Dropbox (or any data requiring authentication to download such as private s3) is not an acceptable HTTP Server. See additional instructions in documentation online to format s3 properly so it is http accessible. file_format (str): File format refers to the overarching kind of data, as in slices (normal image data) or catmaid (tile-based). file_type (str): File type refers to the specific type of file that the data is stored in, as in, tiff, png, or tif. exceptions (int): Exceptions is an option to enable the possibility for annotations to contradict each other (assign different values to the same point). 1 corresponds to True, 0 corresponds to False. resolution (int): Resolution is the starting resolution of the data being uploaded to the channel. windowrange (int, int): Window range is the maximum and minimum pixel values for a particular image. This is used so that the image can be displayed in a readable way for viewing through RESTful calls readonly (int): This option allows the user to control if, after the initial data commit, the channel is read-only. Generally this is suggested with data that will be publicly viewable. Returns: None """ self.channels[channel_name] = [channel_name.strip().replace(' ', ''), datatype, channel_type.lower(), data_url, file_format, file_type, exceptions, resolution, windowrange, readonly]
def create_blueprint(endpoints): """Create Invenio-Deposit-UI blueprint. See: :data:`invenio_deposit.config.DEPOSIT_RECORDS_UI_ENDPOINTS`. :param endpoints: List of endpoints configuration. :returns: The configured blueprint. """ from invenio_records_ui.views import create_url_rule blueprint = Blueprint( 'invenio_deposit_ui', __name__, static_folder='../static', template_folder='../templates', url_prefix='', ) @blueprint.errorhandler(PIDDeletedError) def tombstone_errorhandler(error): """Render tombstone page.""" return render_template( current_app.config['DEPOSIT_UI_TOMBSTONE_TEMPLATE'], pid=error.pid, record=error.record or {}, ), 410 for endpoint, options in (endpoints or {}).items(): options = deepcopy(options) options.pop('jsonschema', None) options.pop('schemaform', None) blueprint.add_url_rule(**create_url_rule(endpoint, **options)) @blueprint.route('/deposit') @login_required def index(): """List user deposits.""" return render_template(current_app.config['DEPOSIT_UI_INDEX_TEMPLATE']) @blueprint.route('/deposit/new') @login_required def new(): """Create new deposit.""" deposit_type = request.values.get('type') return render_template( current_app.config['DEPOSIT_UI_NEW_TEMPLATE'], record={'_deposit': {'id': None}}, jsonschema=current_deposit.jsonschemas[deposit_type], schemaform=current_deposit.schemaforms[deposit_type], ) return blueprint
def function[create_blueprint, parameter[endpoints]]: constant[Create Invenio-Deposit-UI blueprint. See: :data:`invenio_deposit.config.DEPOSIT_RECORDS_UI_ENDPOINTS`. :param endpoints: List of endpoints configuration. :returns: The configured blueprint. ] from relative_module[invenio_records_ui.views] import module[create_url_rule] variable[blueprint] assign[=] call[name[Blueprint], parameter[constant[invenio_deposit_ui], name[__name__]]] def function[tombstone_errorhandler, parameter[error]]: constant[Render tombstone page.] return[tuple[[<ast.Call object at 0x7da1afe718d0>, <ast.Constant object at 0x7da1afe703d0>]]] for taget[tuple[[<ast.Name object at 0x7da1afe713f0>, <ast.Name object at 0x7da1afe73400>]]] in starred[call[<ast.BoolOp object at 0x7da1afe71450>.items, parameter[]]] begin[:] variable[options] assign[=] call[name[deepcopy], parameter[name[options]]] call[name[options].pop, parameter[constant[jsonschema], constant[None]]] call[name[options].pop, parameter[constant[schemaform], constant[None]]] call[name[blueprint].add_url_rule, parameter[]] def function[index, parameter[]]: constant[List user deposits.] return[call[name[render_template], parameter[call[name[current_app].config][constant[DEPOSIT_UI_INDEX_TEMPLATE]]]]] def function[new, parameter[]]: constant[Create new deposit.] variable[deposit_type] assign[=] call[name[request].values.get, parameter[constant[type]]] return[call[name[render_template], parameter[call[name[current_app].config][constant[DEPOSIT_UI_NEW_TEMPLATE]]]]] return[name[blueprint]]
keyword[def] identifier[create_blueprint] ( identifier[endpoints] ): literal[string] keyword[from] identifier[invenio_records_ui] . identifier[views] keyword[import] identifier[create_url_rule] identifier[blueprint] = identifier[Blueprint] ( literal[string] , identifier[__name__] , identifier[static_folder] = literal[string] , identifier[template_folder] = literal[string] , identifier[url_prefix] = literal[string] , ) @ identifier[blueprint] . identifier[errorhandler] ( identifier[PIDDeletedError] ) keyword[def] identifier[tombstone_errorhandler] ( identifier[error] ): literal[string] keyword[return] identifier[render_template] ( identifier[current_app] . identifier[config] [ literal[string] ], identifier[pid] = identifier[error] . identifier[pid] , identifier[record] = identifier[error] . identifier[record] keyword[or] {}, ), literal[int] keyword[for] identifier[endpoint] , identifier[options] keyword[in] ( identifier[endpoints] keyword[or] {}). identifier[items] (): identifier[options] = identifier[deepcopy] ( identifier[options] ) identifier[options] . identifier[pop] ( literal[string] , keyword[None] ) identifier[options] . identifier[pop] ( literal[string] , keyword[None] ) identifier[blueprint] . identifier[add_url_rule] (** identifier[create_url_rule] ( identifier[endpoint] ,** identifier[options] )) @ identifier[blueprint] . identifier[route] ( literal[string] ) @ identifier[login_required] keyword[def] identifier[index] (): literal[string] keyword[return] identifier[render_template] ( identifier[current_app] . identifier[config] [ literal[string] ]) @ identifier[blueprint] . identifier[route] ( literal[string] ) @ identifier[login_required] keyword[def] identifier[new] (): literal[string] identifier[deposit_type] = identifier[request] . identifier[values] . identifier[get] ( literal[string] ) keyword[return] identifier[render_template] ( identifier[current_app] . identifier[config] [ literal[string] ], identifier[record] ={ literal[string] :{ literal[string] : keyword[None] }}, identifier[jsonschema] = identifier[current_deposit] . identifier[jsonschemas] [ identifier[deposit_type] ], identifier[schemaform] = identifier[current_deposit] . identifier[schemaforms] [ identifier[deposit_type] ], ) keyword[return] identifier[blueprint]
def create_blueprint(endpoints): """Create Invenio-Deposit-UI blueprint. See: :data:`invenio_deposit.config.DEPOSIT_RECORDS_UI_ENDPOINTS`. :param endpoints: List of endpoints configuration. :returns: The configured blueprint. """ from invenio_records_ui.views import create_url_rule blueprint = Blueprint('invenio_deposit_ui', __name__, static_folder='../static', template_folder='../templates', url_prefix='') @blueprint.errorhandler(PIDDeletedError) def tombstone_errorhandler(error): """Render tombstone page.""" return (render_template(current_app.config['DEPOSIT_UI_TOMBSTONE_TEMPLATE'], pid=error.pid, record=error.record or {}), 410) for (endpoint, options) in (endpoints or {}).items(): options = deepcopy(options) options.pop('jsonschema', None) options.pop('schemaform', None) blueprint.add_url_rule(**create_url_rule(endpoint, **options)) # depends on [control=['for'], data=[]] @blueprint.route('/deposit') @login_required def index(): """List user deposits.""" return render_template(current_app.config['DEPOSIT_UI_INDEX_TEMPLATE']) @blueprint.route('/deposit/new') @login_required def new(): """Create new deposit.""" deposit_type = request.values.get('type') return render_template(current_app.config['DEPOSIT_UI_NEW_TEMPLATE'], record={'_deposit': {'id': None}}, jsonschema=current_deposit.jsonschemas[deposit_type], schemaform=current_deposit.schemaforms[deposit_type]) return blueprint
def get_specidentitem_percolator_data(item, xmlns): """Loop through SpecIdentificationItem children. Find percolator data by matching to a dict lookup. Return a dict containing percolator data""" percomap = {'{0}userParam'.format(xmlns): PERCO_HEADERMAP, } percodata = {} for child in item: try: percoscore = percomap[child.tag][child.attrib['name']] except KeyError: continue else: percodata[percoscore] = child.attrib['value'] outkeys = [y for x in list(percomap.values()) for y in list(x.values())] for key in outkeys: try: percodata[key] except KeyError: percodata[key] = 'NA' return percodata
def function[get_specidentitem_percolator_data, parameter[item, xmlns]]: constant[Loop through SpecIdentificationItem children. Find percolator data by matching to a dict lookup. Return a dict containing percolator data] variable[percomap] assign[=] dictionary[[<ast.Call object at 0x7da1b24ac160>], [<ast.Name object at 0x7da1b24af340>]] variable[percodata] assign[=] dictionary[[], []] for taget[name[child]] in starred[name[item]] begin[:] <ast.Try object at 0x7da1b24acc40> variable[outkeys] assign[=] <ast.ListComp object at 0x7da1b24ae170> for taget[name[key]] in starred[name[outkeys]] begin[:] <ast.Try object at 0x7da1b24ad960> return[name[percodata]]
keyword[def] identifier[get_specidentitem_percolator_data] ( identifier[item] , identifier[xmlns] ): literal[string] identifier[percomap] ={ literal[string] . identifier[format] ( identifier[xmlns] ): identifier[PERCO_HEADERMAP] ,} identifier[percodata] ={} keyword[for] identifier[child] keyword[in] identifier[item] : keyword[try] : identifier[percoscore] = identifier[percomap] [ identifier[child] . identifier[tag] ][ identifier[child] . identifier[attrib] [ literal[string] ]] keyword[except] identifier[KeyError] : keyword[continue] keyword[else] : identifier[percodata] [ identifier[percoscore] ]= identifier[child] . identifier[attrib] [ literal[string] ] identifier[outkeys] =[ identifier[y] keyword[for] identifier[x] keyword[in] identifier[list] ( identifier[percomap] . identifier[values] ()) keyword[for] identifier[y] keyword[in] identifier[list] ( identifier[x] . identifier[values] ())] keyword[for] identifier[key] keyword[in] identifier[outkeys] : keyword[try] : identifier[percodata] [ identifier[key] ] keyword[except] identifier[KeyError] : identifier[percodata] [ identifier[key] ]= literal[string] keyword[return] identifier[percodata]
def get_specidentitem_percolator_data(item, xmlns): """Loop through SpecIdentificationItem children. Find percolator data by matching to a dict lookup. Return a dict containing percolator data""" percomap = {'{0}userParam'.format(xmlns): PERCO_HEADERMAP} percodata = {} for child in item: try: percoscore = percomap[child.tag][child.attrib['name']] # depends on [control=['try'], data=[]] except KeyError: continue # depends on [control=['except'], data=[]] else: percodata[percoscore] = child.attrib['value'] # depends on [control=['for'], data=['child']] outkeys = [y for x in list(percomap.values()) for y in list(x.values())] for key in outkeys: try: percodata[key] # depends on [control=['try'], data=[]] except KeyError: percodata[key] = 'NA' # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['key']] return percodata
def parse_datetime(s: str) -> datetime.date: """Try to parse a datetime object from a standard datetime format or date format.""" for fmt in (CREATION_DATE_FMT, PUBLISHED_DATE_FMT, PUBLISHED_DATE_FMT_2): try: dt = datetime.strptime(s, fmt) except ValueError: pass else: return dt raise ValueError('Incorrect datetime format for {}'.format(s))
def function[parse_datetime, parameter[s]]: constant[Try to parse a datetime object from a standard datetime format or date format.] for taget[name[fmt]] in starred[tuple[[<ast.Name object at 0x7da207f99150>, <ast.Name object at 0x7da207f98fa0>, <ast.Name object at 0x7da207f9ac20>]]] begin[:] <ast.Try object at 0x7da207f9ab30> <ast.Raise object at 0x7da207f992a0>
keyword[def] identifier[parse_datetime] ( identifier[s] : identifier[str] )-> identifier[datetime] . identifier[date] : literal[string] keyword[for] identifier[fmt] keyword[in] ( identifier[CREATION_DATE_FMT] , identifier[PUBLISHED_DATE_FMT] , identifier[PUBLISHED_DATE_FMT_2] ): keyword[try] : identifier[dt] = identifier[datetime] . identifier[strptime] ( identifier[s] , identifier[fmt] ) keyword[except] identifier[ValueError] : keyword[pass] keyword[else] : keyword[return] identifier[dt] keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[s] ))
def parse_datetime(s: str) -> datetime.date: """Try to parse a datetime object from a standard datetime format or date format.""" for fmt in (CREATION_DATE_FMT, PUBLISHED_DATE_FMT, PUBLISHED_DATE_FMT_2): try: dt = datetime.strptime(s, fmt) # depends on [control=['try'], data=[]] except ValueError: pass # depends on [control=['except'], data=[]] else: return dt # depends on [control=['for'], data=['fmt']] raise ValueError('Incorrect datetime format for {}'.format(s))
def getElementText(self, node, preserve_ws=None): """Return the text value of an xml element node. Leading and trailing whitespace is stripped from the value unless the preserve_ws flag is passed with a true value.""" result = [] for child in node.childNodes: nodetype = child.nodeType if nodetype == child.TEXT_NODE or \ nodetype == child.CDATA_SECTION_NODE: result.append(child.nodeValue) value = join(result, '') if preserve_ws is None: value = strip(value) return value
def function[getElementText, parameter[self, node, preserve_ws]]: constant[Return the text value of an xml element node. Leading and trailing whitespace is stripped from the value unless the preserve_ws flag is passed with a true value.] variable[result] assign[=] list[[]] for taget[name[child]] in starred[name[node].childNodes] begin[:] variable[nodetype] assign[=] name[child].nodeType if <ast.BoolOp object at 0x7da1b15f3b80> begin[:] call[name[result].append, parameter[name[child].nodeValue]] variable[value] assign[=] call[name[join], parameter[name[result], constant[]]] if compare[name[preserve_ws] is constant[None]] begin[:] variable[value] assign[=] call[name[strip], parameter[name[value]]] return[name[value]]
keyword[def] identifier[getElementText] ( identifier[self] , identifier[node] , identifier[preserve_ws] = keyword[None] ): literal[string] identifier[result] =[] keyword[for] identifier[child] keyword[in] identifier[node] . identifier[childNodes] : identifier[nodetype] = identifier[child] . identifier[nodeType] keyword[if] identifier[nodetype] == identifier[child] . identifier[TEXT_NODE] keyword[or] identifier[nodetype] == identifier[child] . identifier[CDATA_SECTION_NODE] : identifier[result] . identifier[append] ( identifier[child] . identifier[nodeValue] ) identifier[value] = identifier[join] ( identifier[result] , literal[string] ) keyword[if] identifier[preserve_ws] keyword[is] keyword[None] : identifier[value] = identifier[strip] ( identifier[value] ) keyword[return] identifier[value]
def getElementText(self, node, preserve_ws=None): """Return the text value of an xml element node. Leading and trailing whitespace is stripped from the value unless the preserve_ws flag is passed with a true value.""" result = [] for child in node.childNodes: nodetype = child.nodeType if nodetype == child.TEXT_NODE or nodetype == child.CDATA_SECTION_NODE: result.append(child.nodeValue) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child']] value = join(result, '') if preserve_ws is None: value = strip(value) # depends on [control=['if'], data=[]] return value
def _connectWithContextFactory(ctxFactory, workbench): """Connect using the given context factory. Notifications go to the given workbench. """ endpoint = SSL4ClientEndpoint(reactor, "localhost", 4430, ctxFactory) splash = _Splash(u"Connecting", u"Connecting...") workbench.display(splash) d = endpoint.connect(Factory(workbench)) @d.addBoth def closeSplash(returnValue): workbench.undisplay() return returnValue @d.addErrback def notifyFailure(f): f.trap(ConnectError) d = alert(workbench, u"Couldn't connect", u"Connection failed! " "Check internet connection, or try again later.\n" "Error: {!r}".format(f.value)) return d.addCallback(lambda _result: reactor.stop()) return d
def function[_connectWithContextFactory, parameter[ctxFactory, workbench]]: constant[Connect using the given context factory. Notifications go to the given workbench. ] variable[endpoint] assign[=] call[name[SSL4ClientEndpoint], parameter[name[reactor], constant[localhost], constant[4430], name[ctxFactory]]] variable[splash] assign[=] call[name[_Splash], parameter[constant[Connecting], constant[Connecting...]]] call[name[workbench].display, parameter[name[splash]]] variable[d] assign[=] call[name[endpoint].connect, parameter[call[name[Factory], parameter[name[workbench]]]]] def function[closeSplash, parameter[returnValue]]: call[name[workbench].undisplay, parameter[]] return[name[returnValue]] def function[notifyFailure, parameter[f]]: call[name[f].trap, parameter[name[ConnectError]]] variable[d] assign[=] call[name[alert], parameter[name[workbench], constant[Couldn't connect], call[constant[Connection failed! Check internet connection, or try again later. Error: {!r}].format, parameter[name[f].value]]]] return[call[name[d].addCallback, parameter[<ast.Lambda object at 0x7da20e957190>]]] return[name[d]]
keyword[def] identifier[_connectWithContextFactory] ( identifier[ctxFactory] , identifier[workbench] ): literal[string] identifier[endpoint] = identifier[SSL4ClientEndpoint] ( identifier[reactor] , literal[string] , literal[int] , identifier[ctxFactory] ) identifier[splash] = identifier[_Splash] ( literal[string] , literal[string] ) identifier[workbench] . identifier[display] ( identifier[splash] ) identifier[d] = identifier[endpoint] . identifier[connect] ( identifier[Factory] ( identifier[workbench] )) @ identifier[d] . identifier[addBoth] keyword[def] identifier[closeSplash] ( identifier[returnValue] ): identifier[workbench] . identifier[undisplay] () keyword[return] identifier[returnValue] @ identifier[d] . identifier[addErrback] keyword[def] identifier[notifyFailure] ( identifier[f] ): identifier[f] . identifier[trap] ( identifier[ConnectError] ) identifier[d] = identifier[alert] ( identifier[workbench] , literal[string] , literal[string] literal[string] literal[string] . identifier[format] ( identifier[f] . identifier[value] )) keyword[return] identifier[d] . identifier[addCallback] ( keyword[lambda] identifier[_result] : identifier[reactor] . identifier[stop] ()) keyword[return] identifier[d]
def _connectWithContextFactory(ctxFactory, workbench): """Connect using the given context factory. Notifications go to the given workbench. """ endpoint = SSL4ClientEndpoint(reactor, 'localhost', 4430, ctxFactory) splash = _Splash(u'Connecting', u'Connecting...') workbench.display(splash) d = endpoint.connect(Factory(workbench)) @d.addBoth def closeSplash(returnValue): workbench.undisplay() return returnValue @d.addErrback def notifyFailure(f): f.trap(ConnectError) d = alert(workbench, u"Couldn't connect", u'Connection failed! Check internet connection, or try again later.\nError: {!r}'.format(f.value)) return d.addCallback(lambda _result: reactor.stop()) return d
def double_percent_options_to_metadata(options): """Parse double percent options""" matches = _PERCENT_CELL.findall('# %%' + options) # Fail safe when regexp matching fails #116 # (occurs e.g. if square brackets are found in the title) if not matches: return {'title': options.strip()} matches = matches[0] # Fifth match are JSON metadata if matches[4]: metadata = json_options_to_metadata(matches[4], add_brackets=False) else: metadata = {} # Third match is cell type cell_type = matches[2] if cell_type: metadata['cell_type'] = cell_type[1:-1] # Second and fourth match are description title = [matches[i].strip() for i in [1, 3]] title = [part for part in title if part] if title: title = ' '.join(title) cell_depth = 0 while title.startswith('%'): cell_depth += 1 title = title[1:] if cell_depth: metadata['cell_depth'] = cell_depth metadata['title'] = title.strip() return metadata
def function[double_percent_options_to_metadata, parameter[options]]: constant[Parse double percent options] variable[matches] assign[=] call[name[_PERCENT_CELL].findall, parameter[binary_operation[constant[# %%] + name[options]]]] if <ast.UnaryOp object at 0x7da2054a7640> begin[:] return[dictionary[[<ast.Constant object at 0x7da2054a7c40>], [<ast.Call object at 0x7da2054a4160>]]] variable[matches] assign[=] call[name[matches]][constant[0]] if call[name[matches]][constant[4]] begin[:] variable[metadata] assign[=] call[name[json_options_to_metadata], parameter[call[name[matches]][constant[4]]]] variable[cell_type] assign[=] call[name[matches]][constant[2]] if name[cell_type] begin[:] call[name[metadata]][constant[cell_type]] assign[=] call[name[cell_type]][<ast.Slice object at 0x7da2054a6e90>] variable[title] assign[=] <ast.ListComp object at 0x7da2054a4af0> variable[title] assign[=] <ast.ListComp object at 0x7da2054a40a0> if name[title] begin[:] variable[title] assign[=] call[constant[ ].join, parameter[name[title]]] variable[cell_depth] assign[=] constant[0] while call[name[title].startswith, parameter[constant[%]]] begin[:] <ast.AugAssign object at 0x7da2054a4bb0> variable[title] assign[=] call[name[title]][<ast.Slice object at 0x7da2054a48e0>] if name[cell_depth] begin[:] call[name[metadata]][constant[cell_depth]] assign[=] name[cell_depth] call[name[metadata]][constant[title]] assign[=] call[name[title].strip, parameter[]] return[name[metadata]]
keyword[def] identifier[double_percent_options_to_metadata] ( identifier[options] ): literal[string] identifier[matches] = identifier[_PERCENT_CELL] . identifier[findall] ( literal[string] + identifier[options] ) keyword[if] keyword[not] identifier[matches] : keyword[return] { literal[string] : identifier[options] . identifier[strip] ()} identifier[matches] = identifier[matches] [ literal[int] ] keyword[if] identifier[matches] [ literal[int] ]: identifier[metadata] = identifier[json_options_to_metadata] ( identifier[matches] [ literal[int] ], identifier[add_brackets] = keyword[False] ) keyword[else] : identifier[metadata] ={} identifier[cell_type] = identifier[matches] [ literal[int] ] keyword[if] identifier[cell_type] : identifier[metadata] [ literal[string] ]= identifier[cell_type] [ literal[int] :- literal[int] ] identifier[title] =[ identifier[matches] [ identifier[i] ]. identifier[strip] () keyword[for] identifier[i] keyword[in] [ literal[int] , literal[int] ]] identifier[title] =[ identifier[part] keyword[for] identifier[part] keyword[in] identifier[title] keyword[if] identifier[part] ] keyword[if] identifier[title] : identifier[title] = literal[string] . identifier[join] ( identifier[title] ) identifier[cell_depth] = literal[int] keyword[while] identifier[title] . identifier[startswith] ( literal[string] ): identifier[cell_depth] += literal[int] identifier[title] = identifier[title] [ literal[int] :] keyword[if] identifier[cell_depth] : identifier[metadata] [ literal[string] ]= identifier[cell_depth] identifier[metadata] [ literal[string] ]= identifier[title] . identifier[strip] () keyword[return] identifier[metadata]
def double_percent_options_to_metadata(options): """Parse double percent options""" matches = _PERCENT_CELL.findall('# %%' + options) # Fail safe when regexp matching fails #116 # (occurs e.g. if square brackets are found in the title) if not matches: return {'title': options.strip()} # depends on [control=['if'], data=[]] matches = matches[0] # Fifth match are JSON metadata if matches[4]: metadata = json_options_to_metadata(matches[4], add_brackets=False) # depends on [control=['if'], data=[]] else: metadata = {} # Third match is cell type cell_type = matches[2] if cell_type: metadata['cell_type'] = cell_type[1:-1] # depends on [control=['if'], data=[]] # Second and fourth match are description title = [matches[i].strip() for i in [1, 3]] title = [part for part in title if part] if title: title = ' '.join(title) cell_depth = 0 while title.startswith('%'): cell_depth += 1 title = title[1:] # depends on [control=['while'], data=[]] if cell_depth: metadata['cell_depth'] = cell_depth # depends on [control=['if'], data=[]] metadata['title'] = title.strip() # depends on [control=['if'], data=[]] return metadata
def human2bytes(s: str) -> int: """ Modified from http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/. Attempts to guess the string format based on default symbols set and return the corresponding bytes as an integer. When unable to recognize the format, :exc:`ValueError` is raised. >>> human2bytes('0 B') 0 >>> human2bytes('1 K') 1024 >>> human2bytes('1 M') 1048576 >>> human2bytes('1 Gi') 1073741824 >>> human2bytes('1 tera') 1099511627776 >>> human2bytes('0.5kilo') 512 >>> human2bytes('0.1 byte') 0 >>> human2bytes('1 k') # k is an alias for K 1024 >>> human2bytes('12 foo') Traceback (most recent call last): ... ValueError: can't interpret '12 foo' """ # noqa if not s: raise ValueError("Can't interpret {!r} as integer".format(s)) try: return int(s) except ValueError: pass init = s num = "" while s and s[0:1].isdigit() or s[0:1] == '.': num += s[0] s = s[1:] num = float(num) letter = s.strip() for name, sset in SYMBOLS.items(): if letter in sset: break else: if letter == 'k': # treat 'k' as an alias for 'K' as per https://en.wikipedia.org/wiki/Binary_prefix # noqa sset = SYMBOLS['customary'] letter = letter.upper() else: raise ValueError("can't interpret %r" % init) prefix = {sset[0]: 1} for i, s in enumerate(sset[1:]): prefix[s] = 1 << (i + 1) * 10 return int(num * prefix[letter])
def function[human2bytes, parameter[s]]: constant[ Modified from http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/. Attempts to guess the string format based on default symbols set and return the corresponding bytes as an integer. When unable to recognize the format, :exc:`ValueError` is raised. >>> human2bytes('0 B') 0 >>> human2bytes('1 K') 1024 >>> human2bytes('1 M') 1048576 >>> human2bytes('1 Gi') 1073741824 >>> human2bytes('1 tera') 1099511627776 >>> human2bytes('0.5kilo') 512 >>> human2bytes('0.1 byte') 0 >>> human2bytes('1 k') # k is an alias for K 1024 >>> human2bytes('12 foo') Traceback (most recent call last): ... ValueError: can't interpret '12 foo' ] if <ast.UnaryOp object at 0x7da1b170b4f0> begin[:] <ast.Raise object at 0x7da1b170ad10> <ast.Try object at 0x7da1b170a8c0> variable[init] assign[=] name[s] variable[num] assign[=] constant[] while <ast.BoolOp object at 0x7da1b1708f10> begin[:] <ast.AugAssign object at 0x7da1b1709210> variable[s] assign[=] call[name[s]][<ast.Slice object at 0x7da1b1708e20>] variable[num] assign[=] call[name[float], parameter[name[num]]] variable[letter] assign[=] call[name[s].strip, parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b1708910>, <ast.Name object at 0x7da1b17087f0>]]] in starred[call[name[SYMBOLS].items, parameter[]]] begin[:] if compare[name[letter] in name[sset]] begin[:] break variable[prefix] assign[=] dictionary[[<ast.Subscript object at 0x7da1b18e71f0>], [<ast.Constant object at 0x7da1b18e63e0>]] for taget[tuple[[<ast.Name object at 0x7da1b18e56c0>, <ast.Name object at 0x7da1b18e5a50>]]] in starred[call[name[enumerate], parameter[call[name[sset]][<ast.Slice object at 0x7da1b18e5c00>]]]] begin[:] call[name[prefix]][name[s]] assign[=] binary_operation[constant[1] <ast.LShift object at 0x7da2590d69e0> binary_operation[binary_operation[name[i] + constant[1]] * constant[10]]] return[call[name[int], parameter[binary_operation[name[num] * call[name[prefix]][name[letter]]]]]]
keyword[def] identifier[human2bytes] ( identifier[s] : identifier[str] )-> identifier[int] : literal[string] keyword[if] keyword[not] identifier[s] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[s] )) keyword[try] : keyword[return] identifier[int] ( identifier[s] ) keyword[except] identifier[ValueError] : keyword[pass] identifier[init] = identifier[s] identifier[num] = literal[string] keyword[while] identifier[s] keyword[and] identifier[s] [ literal[int] : literal[int] ]. identifier[isdigit] () keyword[or] identifier[s] [ literal[int] : literal[int] ]== literal[string] : identifier[num] += identifier[s] [ literal[int] ] identifier[s] = identifier[s] [ literal[int] :] identifier[num] = identifier[float] ( identifier[num] ) identifier[letter] = identifier[s] . identifier[strip] () keyword[for] identifier[name] , identifier[sset] keyword[in] identifier[SYMBOLS] . identifier[items] (): keyword[if] identifier[letter] keyword[in] identifier[sset] : keyword[break] keyword[else] : keyword[if] identifier[letter] == literal[string] : identifier[sset] = identifier[SYMBOLS] [ literal[string] ] identifier[letter] = identifier[letter] . identifier[upper] () keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] % identifier[init] ) identifier[prefix] ={ identifier[sset] [ literal[int] ]: literal[int] } keyword[for] identifier[i] , identifier[s] keyword[in] identifier[enumerate] ( identifier[sset] [ literal[int] :]): identifier[prefix] [ identifier[s] ]= literal[int] <<( identifier[i] + literal[int] )* literal[int] keyword[return] identifier[int] ( identifier[num] * identifier[prefix] [ identifier[letter] ])
def human2bytes(s: str) -> int: """ Modified from http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/. Attempts to guess the string format based on default symbols set and return the corresponding bytes as an integer. When unable to recognize the format, :exc:`ValueError` is raised. >>> human2bytes('0 B') 0 >>> human2bytes('1 K') 1024 >>> human2bytes('1 M') 1048576 >>> human2bytes('1 Gi') 1073741824 >>> human2bytes('1 tera') 1099511627776 >>> human2bytes('0.5kilo') 512 >>> human2bytes('0.1 byte') 0 >>> human2bytes('1 k') # k is an alias for K 1024 >>> human2bytes('12 foo') Traceback (most recent call last): ... ValueError: can't interpret '12 foo' """ # noqa if not s: raise ValueError("Can't interpret {!r} as integer".format(s)) # depends on [control=['if'], data=[]] try: return int(s) # depends on [control=['try'], data=[]] except ValueError: pass # depends on [control=['except'], data=[]] init = s num = '' while s and s[0:1].isdigit() or s[0:1] == '.': num += s[0] s = s[1:] # depends on [control=['while'], data=[]] num = float(num) letter = s.strip() for (name, sset) in SYMBOLS.items(): if letter in sset: break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] else: if letter == 'k': # treat 'k' as an alias for 'K' as per https://en.wikipedia.org/wiki/Binary_prefix # noqa sset = SYMBOLS['customary'] letter = letter.upper() # depends on [control=['if'], data=['letter']] else: raise ValueError("can't interpret %r" % init) prefix = {sset[0]: 1} for (i, s) in enumerate(sset[1:]): prefix[s] = 1 << (i + 1) * 10 # depends on [control=['for'], data=[]] return int(num * prefix[letter])
def _ostaunicode(src): # type: (str) -> bytes ''' Internal function to create an OSTA byte string from a source string. ''' if have_py_3: bytename = src else: bytename = src.decode('utf-8') # type: ignore try: enc = bytename.encode('latin-1') encbyte = b'\x08' except (UnicodeEncodeError, UnicodeDecodeError): enc = bytename.encode('utf-16_be') encbyte = b'\x10' return encbyte + enc
def function[_ostaunicode, parameter[src]]: constant[ Internal function to create an OSTA byte string from a source string. ] if name[have_py_3] begin[:] variable[bytename] assign[=] name[src] <ast.Try object at 0x7da1b0f0f790> return[binary_operation[name[encbyte] + name[enc]]]
keyword[def] identifier[_ostaunicode] ( identifier[src] ): literal[string] keyword[if] identifier[have_py_3] : identifier[bytename] = identifier[src] keyword[else] : identifier[bytename] = identifier[src] . identifier[decode] ( literal[string] ) keyword[try] : identifier[enc] = identifier[bytename] . identifier[encode] ( literal[string] ) identifier[encbyte] = literal[string] keyword[except] ( identifier[UnicodeEncodeError] , identifier[UnicodeDecodeError] ): identifier[enc] = identifier[bytename] . identifier[encode] ( literal[string] ) identifier[encbyte] = literal[string] keyword[return] identifier[encbyte] + identifier[enc]
def _ostaunicode(src): # type: (str) -> bytes '\n Internal function to create an OSTA byte string from a source string.\n ' if have_py_3: bytename = src # depends on [control=['if'], data=[]] else: bytename = src.decode('utf-8') # type: ignore try: enc = bytename.encode('latin-1') encbyte = b'\x08' # depends on [control=['try'], data=[]] except (UnicodeEncodeError, UnicodeDecodeError): enc = bytename.encode('utf-16_be') encbyte = b'\x10' # depends on [control=['except'], data=[]] return encbyte + enc
def is_credential_valid(self, credentialID): """ Check if this credential ID is valid. """ cur = self.conn.cursor() cur.execute('SELECT * FROM credentials WHERE id=? LIMIT 1', [credentialID]) results = cur.fetchall() cur.close() return len(results) > 0
def function[is_credential_valid, parameter[self, credentialID]]: constant[ Check if this credential ID is valid. ] variable[cur] assign[=] call[name[self].conn.cursor, parameter[]] call[name[cur].execute, parameter[constant[SELECT * FROM credentials WHERE id=? LIMIT 1], list[[<ast.Name object at 0x7da18fe917b0>]]]] variable[results] assign[=] call[name[cur].fetchall, parameter[]] call[name[cur].close, parameter[]] return[compare[call[name[len], parameter[name[results]]] greater[>] constant[0]]]
keyword[def] identifier[is_credential_valid] ( identifier[self] , identifier[credentialID] ): literal[string] identifier[cur] = identifier[self] . identifier[conn] . identifier[cursor] () identifier[cur] . identifier[execute] ( literal[string] ,[ identifier[credentialID] ]) identifier[results] = identifier[cur] . identifier[fetchall] () identifier[cur] . identifier[close] () keyword[return] identifier[len] ( identifier[results] )> literal[int]
def is_credential_valid(self, credentialID): """ Check if this credential ID is valid. """ cur = self.conn.cursor() cur.execute('SELECT * FROM credentials WHERE id=? LIMIT 1', [credentialID]) results = cur.fetchall() cur.close() return len(results) > 0
def _send_command_to_servers(self, head, body): """Sends a command to all server nodes. Sending command to a server node will cause that server node to invoke ``KVStoreServer.controller`` to execute the command. This function returns after the command has been executed on all server nodes. Parameters ---------- head : int the head of the command. body : str the body of the command. """ check_call(_LIB.MXKVStoreSendCommmandToServers( self.handle, mx_uint(head), c_str(body)))
def function[_send_command_to_servers, parameter[self, head, body]]: constant[Sends a command to all server nodes. Sending command to a server node will cause that server node to invoke ``KVStoreServer.controller`` to execute the command. This function returns after the command has been executed on all server nodes. Parameters ---------- head : int the head of the command. body : str the body of the command. ] call[name[check_call], parameter[call[name[_LIB].MXKVStoreSendCommmandToServers, parameter[name[self].handle, call[name[mx_uint], parameter[name[head]]], call[name[c_str], parameter[name[body]]]]]]]
keyword[def] identifier[_send_command_to_servers] ( identifier[self] , identifier[head] , identifier[body] ): literal[string] identifier[check_call] ( identifier[_LIB] . identifier[MXKVStoreSendCommmandToServers] ( identifier[self] . identifier[handle] , identifier[mx_uint] ( identifier[head] ), identifier[c_str] ( identifier[body] )))
def _send_command_to_servers(self, head, body): """Sends a command to all server nodes. Sending command to a server node will cause that server node to invoke ``KVStoreServer.controller`` to execute the command. This function returns after the command has been executed on all server nodes. Parameters ---------- head : int the head of the command. body : str the body of the command. """ check_call(_LIB.MXKVStoreSendCommmandToServers(self.handle, mx_uint(head), c_str(body)))
def is_tablet_pad(self): """Macro to check if this event is a :class:`~libinput.event.TabletPadEvent`. """ if self in {type(self).TABLET_PAD_BUTTON, type(self).TABLET_PAD_RING, type(self).TABLET_PAD_STRIP}: return True else: return False
def function[is_tablet_pad, parameter[self]]: constant[Macro to check if this event is a :class:`~libinput.event.TabletPadEvent`. ] if compare[name[self] in <ast.Set object at 0x7da1b184bdc0>] begin[:] return[constant[True]]
keyword[def] identifier[is_tablet_pad] ( identifier[self] ): literal[string] keyword[if] identifier[self] keyword[in] { identifier[type] ( identifier[self] ). identifier[TABLET_PAD_BUTTON] , identifier[type] ( identifier[self] ). identifier[TABLET_PAD_RING] , identifier[type] ( identifier[self] ). identifier[TABLET_PAD_STRIP] }: keyword[return] keyword[True] keyword[else] : keyword[return] keyword[False]
def is_tablet_pad(self): """Macro to check if this event is a :class:`~libinput.event.TabletPadEvent`. """ if self in {type(self).TABLET_PAD_BUTTON, type(self).TABLET_PAD_RING, type(self).TABLET_PAD_STRIP}: return True # depends on [control=['if'], data=[]] else: return False
def nextfreeip(self): """ Method searches for the next free ip address in the scope object and returns it as a str value. :return: """ allocated_ips = [ipaddress.ip_address(host['ip']) for host in self.hosts] for ip in self.netaddr: if str(ip).split('.')[-1] == '0': continue if ip not in allocated_ips: return ip
def function[nextfreeip, parameter[self]]: constant[ Method searches for the next free ip address in the scope object and returns it as a str value. :return: ] variable[allocated_ips] assign[=] <ast.ListComp object at 0x7da20c76ebc0> for taget[name[ip]] in starred[name[self].netaddr] begin[:] if compare[call[call[call[name[str], parameter[name[ip]]].split, parameter[constant[.]]]][<ast.UnaryOp object at 0x7da20c76f760>] equal[==] constant[0]] begin[:] continue if compare[name[ip] <ast.NotIn object at 0x7da2590d7190> name[allocated_ips]] begin[:] return[name[ip]]
keyword[def] identifier[nextfreeip] ( identifier[self] ): literal[string] identifier[allocated_ips] =[ identifier[ipaddress] . identifier[ip_address] ( identifier[host] [ literal[string] ]) keyword[for] identifier[host] keyword[in] identifier[self] . identifier[hosts] ] keyword[for] identifier[ip] keyword[in] identifier[self] . identifier[netaddr] : keyword[if] identifier[str] ( identifier[ip] ). identifier[split] ( literal[string] )[- literal[int] ]== literal[string] : keyword[continue] keyword[if] identifier[ip] keyword[not] keyword[in] identifier[allocated_ips] : keyword[return] identifier[ip]
def nextfreeip(self): """ Method searches for the next free ip address in the scope object and returns it as a str value. :return: """ allocated_ips = [ipaddress.ip_address(host['ip']) for host in self.hosts] for ip in self.netaddr: if str(ip).split('.')[-1] == '0': continue # depends on [control=['if'], data=[]] if ip not in allocated_ips: return ip # depends on [control=['if'], data=['ip']] # depends on [control=['for'], data=['ip']]
def deny_assignments(self): """Instance depends on the API version: * 2018-07-01-preview: :class:`DenyAssignmentsOperations<azure.mgmt.authorization.v2018_07_01_preview.operations.DenyAssignmentsOperations>` """ api_version = self._get_api_version('deny_assignments') if api_version == '2018-07-01-preview': from .v2018_07_01_preview.operations import DenyAssignmentsOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
def function[deny_assignments, parameter[self]]: constant[Instance depends on the API version: * 2018-07-01-preview: :class:`DenyAssignmentsOperations<azure.mgmt.authorization.v2018_07_01_preview.operations.DenyAssignmentsOperations>` ] variable[api_version] assign[=] call[name[self]._get_api_version, parameter[constant[deny_assignments]]] if compare[name[api_version] equal[==] constant[2018-07-01-preview]] begin[:] from relative_module[v2018_07_01_preview.operations] import module[DenyAssignmentsOperations] return[call[name[OperationClass], parameter[name[self]._client, name[self].config, call[name[Serializer], parameter[call[name[self]._models_dict, parameter[name[api_version]]]]], call[name[Deserializer], parameter[call[name[self]._models_dict, parameter[name[api_version]]]]]]]]
keyword[def] identifier[deny_assignments] ( identifier[self] ): literal[string] identifier[api_version] = identifier[self] . identifier[_get_api_version] ( literal[string] ) keyword[if] identifier[api_version] == literal[string] : keyword[from] . identifier[v2018_07_01_preview] . identifier[operations] keyword[import] identifier[DenyAssignmentsOperations] keyword[as] identifier[OperationClass] keyword[else] : keyword[raise] identifier[NotImplementedError] ( literal[string] . identifier[format] ( identifier[api_version] )) keyword[return] identifier[OperationClass] ( identifier[self] . identifier[_client] , identifier[self] . identifier[config] , identifier[Serializer] ( identifier[self] . identifier[_models_dict] ( identifier[api_version] )), identifier[Deserializer] ( identifier[self] . identifier[_models_dict] ( identifier[api_version] )))
def deny_assignments(self): """Instance depends on the API version: * 2018-07-01-preview: :class:`DenyAssignmentsOperations<azure.mgmt.authorization.v2018_07_01_preview.operations.DenyAssignmentsOperations>` """ api_version = self._get_api_version('deny_assignments') if api_version == '2018-07-01-preview': from .v2018_07_01_preview.operations import DenyAssignmentsOperations as OperationClass # depends on [control=['if'], data=[]] else: raise NotImplementedError('APIVersion {} is not available'.format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
def make_tmp_name(name): """Generates a tmp name for a file or dir. This is a tempname that sits in the same dir as `name`. If it exists on disk at context exit time, it is deleted. """ path, base = os.path.split(name) tmp_base = ".tmp-%s-%s" % (base, uuid4().hex) tmp_name = os.path.join(path, tmp_base) try: yield tmp_name finally: safe_remove(tmp_name)
def function[make_tmp_name, parameter[name]]: constant[Generates a tmp name for a file or dir. This is a tempname that sits in the same dir as `name`. If it exists on disk at context exit time, it is deleted. ] <ast.Tuple object at 0x7da18f813d00> assign[=] call[name[os].path.split, parameter[name[name]]] variable[tmp_base] assign[=] binary_operation[constant[.tmp-%s-%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f812080>, <ast.Attribute object at 0x7da18f810fd0>]]] variable[tmp_name] assign[=] call[name[os].path.join, parameter[name[path], name[tmp_base]]] <ast.Try object at 0x7da18f8118d0>
keyword[def] identifier[make_tmp_name] ( identifier[name] ): literal[string] identifier[path] , identifier[base] = identifier[os] . identifier[path] . identifier[split] ( identifier[name] ) identifier[tmp_base] = literal[string] %( identifier[base] , identifier[uuid4] (). identifier[hex] ) identifier[tmp_name] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[tmp_base] ) keyword[try] : keyword[yield] identifier[tmp_name] keyword[finally] : identifier[safe_remove] ( identifier[tmp_name] )
def make_tmp_name(name): """Generates a tmp name for a file or dir. This is a tempname that sits in the same dir as `name`. If it exists on disk at context exit time, it is deleted. """ (path, base) = os.path.split(name) tmp_base = '.tmp-%s-%s' % (base, uuid4().hex) tmp_name = os.path.join(path, tmp_base) try: yield tmp_name # depends on [control=['try'], data=[]] finally: safe_remove(tmp_name)
def makescacoldesc(columnname, value, datamanagertype='', datamanagergroup='', options=0, maxlen=0, comment='', valuetype='', keywords={}): """Create description of a scalar column. A description for a scalar column can be created from a name for the column and a data value, which is used only to determine the type of the column. Note that a dict value is also possible. It is possible to create the column description in more detail by giving the data manager name, group, option, and comment as well. The data manager type tells which data manager (storage manager) is used to store the columns. The data manager type and group are explained in more detail in the `casacore Tables <../../casacore/doc/html/group__Tables__module.html>`_ documentation. It returns a dict with fields `name` and `desc` which can thereafter be used to build a table description using function :func:`maketabdesc`. `columname` Name of column `value` Example data value used to determine the column's data type. It is only used if argument `valuetype` is not given. `datamanagertype` Type of data manager which can be one of StandardStMan (default) or IncrementalStMan. The latter one can save disk space if many subsequent cells in the column will have the same value. `datamanagergroup` Data manager group. Only for the expert user. `options` Options. Need not be filled in. `maxlen` Maximum length of string values in a column. Default 0 means unlimited. `comment` Comment: informational for user. `valuetype` A string giving the column's data type. Possible data types are bool (or boolean), uchar (or byte), short, int (or integer), uint, float, double, complex, dcomplex, and string. 'keywords' A dict defining initial keywords for the column. For example:: scd1 = makescacoldesc("col2", "")) scd2 = makescacoldesc("col1", 1, "IncrementalStMan") td = maketabdesc([scd1, scd2]) This creates a table description consisting of an integer column `col1`, and a string column `col2`. `col1` uses the IncrementalStMan storage manager, while `col2` uses the default storage manager StandardStMan. """ vtype = valuetype if vtype == '': vtype = _value_type_name(value) rec2 = {'valueType': vtype, 'dataManagerType': datamanagertype, 'dataManagerGroup': datamanagergroup, 'option': options, 'maxlen': maxlen, 'comment': comment, 'keywords': keywords} return {'name': columnname, 'desc': rec2}
def function[makescacoldesc, parameter[columnname, value, datamanagertype, datamanagergroup, options, maxlen, comment, valuetype, keywords]]: constant[Create description of a scalar column. A description for a scalar column can be created from a name for the column and a data value, which is used only to determine the type of the column. Note that a dict value is also possible. It is possible to create the column description in more detail by giving the data manager name, group, option, and comment as well. The data manager type tells which data manager (storage manager) is used to store the columns. The data manager type and group are explained in more detail in the `casacore Tables <../../casacore/doc/html/group__Tables__module.html>`_ documentation. It returns a dict with fields `name` and `desc` which can thereafter be used to build a table description using function :func:`maketabdesc`. `columname` Name of column `value` Example data value used to determine the column's data type. It is only used if argument `valuetype` is not given. `datamanagertype` Type of data manager which can be one of StandardStMan (default) or IncrementalStMan. The latter one can save disk space if many subsequent cells in the column will have the same value. `datamanagergroup` Data manager group. Only for the expert user. `options` Options. Need not be filled in. `maxlen` Maximum length of string values in a column. Default 0 means unlimited. `comment` Comment: informational for user. `valuetype` A string giving the column's data type. Possible data types are bool (or boolean), uchar (or byte), short, int (or integer), uint, float, double, complex, dcomplex, and string. 'keywords' A dict defining initial keywords for the column. For example:: scd1 = makescacoldesc("col2", "")) scd2 = makescacoldesc("col1", 1, "IncrementalStMan") td = maketabdesc([scd1, scd2]) This creates a table description consisting of an integer column `col1`, and a string column `col2`. `col1` uses the IncrementalStMan storage manager, while `col2` uses the default storage manager StandardStMan. ] variable[vtype] assign[=] name[valuetype] if compare[name[vtype] equal[==] constant[]] begin[:] variable[vtype] assign[=] call[name[_value_type_name], parameter[name[value]]] variable[rec2] assign[=] dictionary[[<ast.Constant object at 0x7da18bcc8970>, <ast.Constant object at 0x7da18bcc9d50>, <ast.Constant object at 0x7da18bcca7d0>, <ast.Constant object at 0x7da18bccad70>, <ast.Constant object at 0x7da18bcca1a0>, <ast.Constant object at 0x7da18bccba60>, <ast.Constant object at 0x7da18bccbca0>], [<ast.Name object at 0x7da18bcc9030>, <ast.Name object at 0x7da18bccb070>, <ast.Name object at 0x7da18bcc8ca0>, <ast.Name object at 0x7da18bccbc70>, <ast.Name object at 0x7da18bccb010>, <ast.Name object at 0x7da1b0d51810>, <ast.Name object at 0x7da1b0d53970>]] return[dictionary[[<ast.Constant object at 0x7da1b0d52fe0>, <ast.Constant object at 0x7da1b0d53a00>], [<ast.Name object at 0x7da1b0d530d0>, <ast.Name object at 0x7da1b0d512a0>]]]
keyword[def] identifier[makescacoldesc] ( identifier[columnname] , identifier[value] , identifier[datamanagertype] = literal[string] , identifier[datamanagergroup] = literal[string] , identifier[options] = literal[int] , identifier[maxlen] = literal[int] , identifier[comment] = literal[string] , identifier[valuetype] = literal[string] , identifier[keywords] ={}): literal[string] identifier[vtype] = identifier[valuetype] keyword[if] identifier[vtype] == literal[string] : identifier[vtype] = identifier[_value_type_name] ( identifier[value] ) identifier[rec2] ={ literal[string] : identifier[vtype] , literal[string] : identifier[datamanagertype] , literal[string] : identifier[datamanagergroup] , literal[string] : identifier[options] , literal[string] : identifier[maxlen] , literal[string] : identifier[comment] , literal[string] : identifier[keywords] } keyword[return] { literal[string] : identifier[columnname] , literal[string] : identifier[rec2] }
def makescacoldesc(columnname, value, datamanagertype='', datamanagergroup='', options=0, maxlen=0, comment='', valuetype='', keywords={}): """Create description of a scalar column. A description for a scalar column can be created from a name for the column and a data value, which is used only to determine the type of the column. Note that a dict value is also possible. It is possible to create the column description in more detail by giving the data manager name, group, option, and comment as well. The data manager type tells which data manager (storage manager) is used to store the columns. The data manager type and group are explained in more detail in the `casacore Tables <../../casacore/doc/html/group__Tables__module.html>`_ documentation. It returns a dict with fields `name` and `desc` which can thereafter be used to build a table description using function :func:`maketabdesc`. `columname` Name of column `value` Example data value used to determine the column's data type. It is only used if argument `valuetype` is not given. `datamanagertype` Type of data manager which can be one of StandardStMan (default) or IncrementalStMan. The latter one can save disk space if many subsequent cells in the column will have the same value. `datamanagergroup` Data manager group. Only for the expert user. `options` Options. Need not be filled in. `maxlen` Maximum length of string values in a column. Default 0 means unlimited. `comment` Comment: informational for user. `valuetype` A string giving the column's data type. Possible data types are bool (or boolean), uchar (or byte), short, int (or integer), uint, float, double, complex, dcomplex, and string. 'keywords' A dict defining initial keywords for the column. For example:: scd1 = makescacoldesc("col2", "")) scd2 = makescacoldesc("col1", 1, "IncrementalStMan") td = maketabdesc([scd1, scd2]) This creates a table description consisting of an integer column `col1`, and a string column `col2`. `col1` uses the IncrementalStMan storage manager, while `col2` uses the default storage manager StandardStMan. """ vtype = valuetype if vtype == '': vtype = _value_type_name(value) # depends on [control=['if'], data=['vtype']] rec2 = {'valueType': vtype, 'dataManagerType': datamanagertype, 'dataManagerGroup': datamanagergroup, 'option': options, 'maxlen': maxlen, 'comment': comment, 'keywords': keywords} return {'name': columnname, 'desc': rec2}
def switch_to_output(self, value=False, **kwargs): """Switch the pin state to a digital output with the provided starting value (True/False for high or low, default is False/low). """ self.direction = digitalio.Direction.OUTPUT self.value = value
def function[switch_to_output, parameter[self, value]]: constant[Switch the pin state to a digital output with the provided starting value (True/False for high or low, default is False/low). ] name[self].direction assign[=] name[digitalio].Direction.OUTPUT name[self].value assign[=] name[value]
keyword[def] identifier[switch_to_output] ( identifier[self] , identifier[value] = keyword[False] ,** identifier[kwargs] ): literal[string] identifier[self] . identifier[direction] = identifier[digitalio] . identifier[Direction] . identifier[OUTPUT] identifier[self] . identifier[value] = identifier[value]
def switch_to_output(self, value=False, **kwargs): """Switch the pin state to a digital output with the provided starting value (True/False for high or low, default is False/low). """ self.direction = digitalio.Direction.OUTPUT self.value = value
def body_block_attribution(tag): "extract the attribution content for figures, tables, videos" attributions = [] if raw_parser.attrib(tag): for attrib_tag in raw_parser.attrib(tag): attributions.append(node_contents_str(attrib_tag)) if raw_parser.permissions(tag): # concatenate content from from the permissions tag for permissions_tag in raw_parser.permissions(tag): attrib_string = '' # add the copyright statement if found attrib_string = join_sentences(attrib_string, node_contents_str(raw_parser.copyright_statement(permissions_tag)), '.') # add the license paragraphs if raw_parser.licence_p(permissions_tag): for licence_p_tag in raw_parser.licence_p(permissions_tag): attrib_string = join_sentences(attrib_string, node_contents_str(licence_p_tag), '.') if attrib_string != '': attributions.append(attrib_string) return attributions
def function[body_block_attribution, parameter[tag]]: constant[extract the attribution content for figures, tables, videos] variable[attributions] assign[=] list[[]] if call[name[raw_parser].attrib, parameter[name[tag]]] begin[:] for taget[name[attrib_tag]] in starred[call[name[raw_parser].attrib, parameter[name[tag]]]] begin[:] call[name[attributions].append, parameter[call[name[node_contents_str], parameter[name[attrib_tag]]]]] if call[name[raw_parser].permissions, parameter[name[tag]]] begin[:] for taget[name[permissions_tag]] in starred[call[name[raw_parser].permissions, parameter[name[tag]]]] begin[:] variable[attrib_string] assign[=] constant[] variable[attrib_string] assign[=] call[name[join_sentences], parameter[name[attrib_string], call[name[node_contents_str], parameter[call[name[raw_parser].copyright_statement, parameter[name[permissions_tag]]]]], constant[.]]] if call[name[raw_parser].licence_p, parameter[name[permissions_tag]]] begin[:] for taget[name[licence_p_tag]] in starred[call[name[raw_parser].licence_p, parameter[name[permissions_tag]]]] begin[:] variable[attrib_string] assign[=] call[name[join_sentences], parameter[name[attrib_string], call[name[node_contents_str], parameter[name[licence_p_tag]]], constant[.]]] if compare[name[attrib_string] not_equal[!=] constant[]] begin[:] call[name[attributions].append, parameter[name[attrib_string]]] return[name[attributions]]
keyword[def] identifier[body_block_attribution] ( identifier[tag] ): literal[string] identifier[attributions] =[] keyword[if] identifier[raw_parser] . identifier[attrib] ( identifier[tag] ): keyword[for] identifier[attrib_tag] keyword[in] identifier[raw_parser] . identifier[attrib] ( identifier[tag] ): identifier[attributions] . identifier[append] ( identifier[node_contents_str] ( identifier[attrib_tag] )) keyword[if] identifier[raw_parser] . identifier[permissions] ( identifier[tag] ): keyword[for] identifier[permissions_tag] keyword[in] identifier[raw_parser] . identifier[permissions] ( identifier[tag] ): identifier[attrib_string] = literal[string] identifier[attrib_string] = identifier[join_sentences] ( identifier[attrib_string] , identifier[node_contents_str] ( identifier[raw_parser] . identifier[copyright_statement] ( identifier[permissions_tag] )), literal[string] ) keyword[if] identifier[raw_parser] . identifier[licence_p] ( identifier[permissions_tag] ): keyword[for] identifier[licence_p_tag] keyword[in] identifier[raw_parser] . identifier[licence_p] ( identifier[permissions_tag] ): identifier[attrib_string] = identifier[join_sentences] ( identifier[attrib_string] , identifier[node_contents_str] ( identifier[licence_p_tag] ), literal[string] ) keyword[if] identifier[attrib_string] != literal[string] : identifier[attributions] . identifier[append] ( identifier[attrib_string] ) keyword[return] identifier[attributions]
def body_block_attribution(tag): """extract the attribution content for figures, tables, videos""" attributions = [] if raw_parser.attrib(tag): for attrib_tag in raw_parser.attrib(tag): attributions.append(node_contents_str(attrib_tag)) # depends on [control=['for'], data=['attrib_tag']] # depends on [control=['if'], data=[]] if raw_parser.permissions(tag): # concatenate content from from the permissions tag for permissions_tag in raw_parser.permissions(tag): attrib_string = '' # add the copyright statement if found attrib_string = join_sentences(attrib_string, node_contents_str(raw_parser.copyright_statement(permissions_tag)), '.') # add the license paragraphs if raw_parser.licence_p(permissions_tag): for licence_p_tag in raw_parser.licence_p(permissions_tag): attrib_string = join_sentences(attrib_string, node_contents_str(licence_p_tag), '.') # depends on [control=['for'], data=['licence_p_tag']] # depends on [control=['if'], data=[]] if attrib_string != '': attributions.append(attrib_string) # depends on [control=['if'], data=['attrib_string']] # depends on [control=['for'], data=['permissions_tag']] # depends on [control=['if'], data=[]] return attributions
def _init_idxs_strpat(self, usr_hdrs): """List of indexes whose values will be strings.""" strpat = self.strpat_hdrs.keys() self.idxs_strpat = [ Idx for Hdr, Idx in self.hdr2idx.items() if Hdr in usr_hdrs and Hdr in strpat]
def function[_init_idxs_strpat, parameter[self, usr_hdrs]]: constant[List of indexes whose values will be strings.] variable[strpat] assign[=] call[name[self].strpat_hdrs.keys, parameter[]] name[self].idxs_strpat assign[=] <ast.ListComp object at 0x7da18f811c90>
keyword[def] identifier[_init_idxs_strpat] ( identifier[self] , identifier[usr_hdrs] ): literal[string] identifier[strpat] = identifier[self] . identifier[strpat_hdrs] . identifier[keys] () identifier[self] . identifier[idxs_strpat] =[ identifier[Idx] keyword[for] identifier[Hdr] , identifier[Idx] keyword[in] identifier[self] . identifier[hdr2idx] . identifier[items] () keyword[if] identifier[Hdr] keyword[in] identifier[usr_hdrs] keyword[and] identifier[Hdr] keyword[in] identifier[strpat] ]
def _init_idxs_strpat(self, usr_hdrs): """List of indexes whose values will be strings.""" strpat = self.strpat_hdrs.keys() self.idxs_strpat = [Idx for (Hdr, Idx) in self.hdr2idx.items() if Hdr in usr_hdrs and Hdr in strpat]
def AddAdapter(self, device_name, system_name): '''Convenience method to add a Bluetooth adapter You have to specify a device name which must be a valid part of an object path, e. g. "hci0", and an arbitrary system name (pretty hostname). Returns the new object path. ''' path = '/org/bluez/' + device_name adapter_properties = { 'UUIDs': dbus.Array([ # Reference: # http://git.kernel.org/cgit/bluetooth/bluez.git/tree/lib/uuid.h # PNP '00001200-0000-1000-8000-00805f9b34fb', # Generic Access Profile '00001800-0000-1000-8000-00805f9b34fb', # Generic Attribute Profile '00001801-0000-1000-8000-00805f9b34fb', # Audio/Video Remote Control Profile (remote) '0000110e-0000-1000-8000-00805f9b34fb', # Audio/Video Remote Control Profile (target) '0000110c-0000-1000-8000-00805f9b34fb', ], variant_level=1), 'Discoverable': dbus.Boolean(True, variant_level=1), 'Discovering': dbus.Boolean(True, variant_level=1), 'Pairable': dbus.Boolean(True, variant_level=1), 'Powered': dbus.Boolean(True, variant_level=1), 'Address': dbus.String('00:01:02:03:04:05', variant_level=1), 'Alias': dbus.String(system_name, variant_level=1), 'Modalias': dbus.String('usb:v1D6Bp0245d050A', variant_level=1), 'Name': dbus.String(system_name, variant_level=1), # Reference: # http://bluetooth-pentest.narod.ru/software/ # bluetooth_class_of_device-service_generator.html 'Class': dbus.UInt32(268, variant_level=1), # Computer, Laptop 'DiscoverableTimeout': dbus.UInt32(180, variant_level=1), 'PairableTimeout': dbus.UInt32(180, variant_level=1), } self.AddObject(path, ADAPTER_IFACE, # Properties adapter_properties, # Methods [ ('RemoveDevice', 'o', '', ''), ('StartDiscovery', '', '', ''), ('StopDiscovery', '', '', ''), ]) adapter = mockobject.objects[path] adapter.AddMethods(MEDIA_IFACE, [ ('RegisterEndpoint', 'oa{sv}', '', ''), ('UnregisterEndpoint', 'o', '', ''), ]) adapter.AddMethods(NETWORK_SERVER_IFACE, [ ('Register', 'ss', '', ''), ('Unregister', 's', '', ''), ]) manager = mockobject.objects['/'] manager.EmitSignal(OBJECT_MANAGER_IFACE, 'InterfacesAdded', 'oa{sa{sv}}', [ dbus.ObjectPath(path), {ADAPTER_IFACE: adapter_properties}, ]) return path
def function[AddAdapter, parameter[self, device_name, system_name]]: constant[Convenience method to add a Bluetooth adapter You have to specify a device name which must be a valid part of an object path, e. g. "hci0", and an arbitrary system name (pretty hostname). Returns the new object path. ] variable[path] assign[=] binary_operation[constant[/org/bluez/] + name[device_name]] variable[adapter_properties] assign[=] dictionary[[<ast.Constant object at 0x7da18f8130d0>, <ast.Constant object at 0x7da18f811450>, <ast.Constant object at 0x7da18f812c20>, <ast.Constant object at 0x7da18f8125c0>, <ast.Constant object at 0x7da18f810df0>, <ast.Constant object at 0x7da18f813fa0>, <ast.Constant object at 0x7da18f8106d0>, <ast.Constant object at 0x7da18f812e90>, <ast.Constant object at 0x7da18f811f60>, <ast.Constant object at 0x7da18f811b40>, <ast.Constant object at 0x7da18f811f90>, <ast.Constant object at 0x7da18f8116c0>], [<ast.Call object at 0x7da18f812fe0>, <ast.Call object at 0x7da18f812440>, <ast.Call object at 0x7da18f8112d0>, <ast.Call object at 0x7da18f8101f0>, <ast.Call object at 0x7da18f8103d0>, <ast.Call object at 0x7da18f811000>, <ast.Call object at 0x7da18f811990>, <ast.Call object at 0x7da18f8103a0>, <ast.Call object at 0x7da18f8114e0>, <ast.Call object at 0x7da18f812290>, <ast.Call object at 0x7da18f811b70>, <ast.Call object at 0x7da18f812b90>]] call[name[self].AddObject, parameter[name[path], name[ADAPTER_IFACE], name[adapter_properties], list[[<ast.Tuple object at 0x7da18f8129e0>, <ast.Tuple object at 0x7da18f8128f0>, <ast.Tuple object at 0x7da18f813370>]]]] variable[adapter] assign[=] call[name[mockobject].objects][name[path]] call[name[adapter].AddMethods, parameter[name[MEDIA_IFACE], list[[<ast.Tuple object at 0x7da18f811bd0>, <ast.Tuple object at 0x7da18f813580>]]]] call[name[adapter].AddMethods, parameter[name[NETWORK_SERVER_IFACE], list[[<ast.Tuple object at 0x7da18f810580>, <ast.Tuple object at 0x7da18f813250>]]]] variable[manager] assign[=] call[name[mockobject].objects][constant[/]] call[name[manager].EmitSignal, parameter[name[OBJECT_MANAGER_IFACE], constant[InterfacesAdded], constant[oa{sa{sv}}], list[[<ast.Call object at 0x7da18f8122f0>, <ast.Dict object at 0x7da20c76db70>]]]] return[name[path]]
keyword[def] identifier[AddAdapter] ( identifier[self] , identifier[device_name] , identifier[system_name] ): literal[string] identifier[path] = literal[string] + identifier[device_name] identifier[adapter_properties] ={ literal[string] : identifier[dbus] . identifier[Array] ([ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , ], identifier[variant_level] = literal[int] ), literal[string] : identifier[dbus] . identifier[Boolean] ( keyword[True] , identifier[variant_level] = literal[int] ), literal[string] : identifier[dbus] . identifier[Boolean] ( keyword[True] , identifier[variant_level] = literal[int] ), literal[string] : identifier[dbus] . identifier[Boolean] ( keyword[True] , identifier[variant_level] = literal[int] ), literal[string] : identifier[dbus] . identifier[Boolean] ( keyword[True] , identifier[variant_level] = literal[int] ), literal[string] : identifier[dbus] . identifier[String] ( literal[string] , identifier[variant_level] = literal[int] ), literal[string] : identifier[dbus] . identifier[String] ( identifier[system_name] , identifier[variant_level] = literal[int] ), literal[string] : identifier[dbus] . identifier[String] ( literal[string] , identifier[variant_level] = literal[int] ), literal[string] : identifier[dbus] . identifier[String] ( identifier[system_name] , identifier[variant_level] = literal[int] ), literal[string] : identifier[dbus] . identifier[UInt32] ( literal[int] , identifier[variant_level] = literal[int] ), literal[string] : identifier[dbus] . identifier[UInt32] ( literal[int] , identifier[variant_level] = literal[int] ), literal[string] : identifier[dbus] . identifier[UInt32] ( literal[int] , identifier[variant_level] = literal[int] ), } identifier[self] . identifier[AddObject] ( identifier[path] , identifier[ADAPTER_IFACE] , identifier[adapter_properties] , [ ( literal[string] , literal[string] , literal[string] , literal[string] ), ( literal[string] , literal[string] , literal[string] , literal[string] ), ( literal[string] , literal[string] , literal[string] , literal[string] ), ]) identifier[adapter] = identifier[mockobject] . identifier[objects] [ identifier[path] ] identifier[adapter] . identifier[AddMethods] ( identifier[MEDIA_IFACE] ,[ ( literal[string] , literal[string] , literal[string] , literal[string] ), ( literal[string] , literal[string] , literal[string] , literal[string] ), ]) identifier[adapter] . identifier[AddMethods] ( identifier[NETWORK_SERVER_IFACE] ,[ ( literal[string] , literal[string] , literal[string] , literal[string] ), ( literal[string] , literal[string] , literal[string] , literal[string] ), ]) identifier[manager] = identifier[mockobject] . identifier[objects] [ literal[string] ] identifier[manager] . identifier[EmitSignal] ( identifier[OBJECT_MANAGER_IFACE] , literal[string] , literal[string] ,[ identifier[dbus] . identifier[ObjectPath] ( identifier[path] ), { identifier[ADAPTER_IFACE] : identifier[adapter_properties] }, ]) keyword[return] identifier[path]
def AddAdapter(self, device_name, system_name): """Convenience method to add a Bluetooth adapter You have to specify a device name which must be a valid part of an object path, e. g. "hci0", and an arbitrary system name (pretty hostname). Returns the new object path. """ path = '/org/bluez/' + device_name # Reference: # http://git.kernel.org/cgit/bluetooth/bluez.git/tree/lib/uuid.h # PNP # Generic Access Profile # Generic Attribute Profile # Audio/Video Remote Control Profile (remote) # Audio/Video Remote Control Profile (target) # Reference: # http://bluetooth-pentest.narod.ru/software/ # bluetooth_class_of_device-service_generator.html # Computer, Laptop adapter_properties = {'UUIDs': dbus.Array(['00001200-0000-1000-8000-00805f9b34fb', '00001800-0000-1000-8000-00805f9b34fb', '00001801-0000-1000-8000-00805f9b34fb', '0000110e-0000-1000-8000-00805f9b34fb', '0000110c-0000-1000-8000-00805f9b34fb'], variant_level=1), 'Discoverable': dbus.Boolean(True, variant_level=1), 'Discovering': dbus.Boolean(True, variant_level=1), 'Pairable': dbus.Boolean(True, variant_level=1), 'Powered': dbus.Boolean(True, variant_level=1), 'Address': dbus.String('00:01:02:03:04:05', variant_level=1), 'Alias': dbus.String(system_name, variant_level=1), 'Modalias': dbus.String('usb:v1D6Bp0245d050A', variant_level=1), 'Name': dbus.String(system_name, variant_level=1), 'Class': dbus.UInt32(268, variant_level=1), 'DiscoverableTimeout': dbus.UInt32(180, variant_level=1), 'PairableTimeout': dbus.UInt32(180, variant_level=1)} # Properties # Methods self.AddObject(path, ADAPTER_IFACE, adapter_properties, [('RemoveDevice', 'o', '', ''), ('StartDiscovery', '', '', ''), ('StopDiscovery', '', '', '')]) adapter = mockobject.objects[path] adapter.AddMethods(MEDIA_IFACE, [('RegisterEndpoint', 'oa{sv}', '', ''), ('UnregisterEndpoint', 'o', '', '')]) adapter.AddMethods(NETWORK_SERVER_IFACE, [('Register', 'ss', '', ''), ('Unregister', 's', '', '')]) manager = mockobject.objects['/'] manager.EmitSignal(OBJECT_MANAGER_IFACE, 'InterfacesAdded', 'oa{sa{sv}}', [dbus.ObjectPath(path), {ADAPTER_IFACE: adapter_properties}]) return path
def convert_double_to_two_registers(doubleValue): """ Convert 32 Bit Value to two 16 Bit Value to send as Modbus Registers doubleValue: Value to be converted return: 16 Bit Register values int[] """ myList = list() myList.append(int(doubleValue & 0x0000FFFF)) #Append Least Significant Word myList.append(int((doubleValue & 0xFFFF0000)>>16)) #Append Most Significant Word return myList
def function[convert_double_to_two_registers, parameter[doubleValue]]: constant[ Convert 32 Bit Value to two 16 Bit Value to send as Modbus Registers doubleValue: Value to be converted return: 16 Bit Register values int[] ] variable[myList] assign[=] call[name[list], parameter[]] call[name[myList].append, parameter[call[name[int], parameter[binary_operation[name[doubleValue] <ast.BitAnd object at 0x7da2590d6b60> constant[65535]]]]]] call[name[myList].append, parameter[call[name[int], parameter[binary_operation[binary_operation[name[doubleValue] <ast.BitAnd object at 0x7da2590d6b60> constant[4294901760]] <ast.RShift object at 0x7da2590d6a40> constant[16]]]]]] return[name[myList]]
keyword[def] identifier[convert_double_to_two_registers] ( identifier[doubleValue] ): literal[string] identifier[myList] = identifier[list] () identifier[myList] . identifier[append] ( identifier[int] ( identifier[doubleValue] & literal[int] )) identifier[myList] . identifier[append] ( identifier[int] (( identifier[doubleValue] & literal[int] )>> literal[int] )) keyword[return] identifier[myList]
def convert_double_to_two_registers(doubleValue): """ Convert 32 Bit Value to two 16 Bit Value to send as Modbus Registers doubleValue: Value to be converted return: 16 Bit Register values int[] """ myList = list() myList.append(int(doubleValue & 65535)) #Append Least Significant Word myList.append(int((doubleValue & 4294901760) >> 16)) #Append Most Significant Word return myList
def download(self, *ids): """ Downloads the subtitles with the given ids. :param ids: The subtitles to download :return: Result instances :raises NotOKException """ bundles = sublists_of(ids, 20) # 20 files at once is an API restriction for bundle in bundles: download_response = self._rpc.DownloadSubtitles(self._token, bundle) assert_status(download_response) download_data = download_response.get('data') for item in download_data: subtitle_id = item['idsubtitlefile'] subtitle_data = item['data'] decompressed = decompress(subtitle_data) yield Result(subtitle_id, decompressed)
def function[download, parameter[self]]: constant[ Downloads the subtitles with the given ids. :param ids: The subtitles to download :return: Result instances :raises NotOKException ] variable[bundles] assign[=] call[name[sublists_of], parameter[name[ids], constant[20]]] for taget[name[bundle]] in starred[name[bundles]] begin[:] variable[download_response] assign[=] call[name[self]._rpc.DownloadSubtitles, parameter[name[self]._token, name[bundle]]] call[name[assert_status], parameter[name[download_response]]] variable[download_data] assign[=] call[name[download_response].get, parameter[constant[data]]] for taget[name[item]] in starred[name[download_data]] begin[:] variable[subtitle_id] assign[=] call[name[item]][constant[idsubtitlefile]] variable[subtitle_data] assign[=] call[name[item]][constant[data]] variable[decompressed] assign[=] call[name[decompress], parameter[name[subtitle_data]]] <ast.Yield object at 0x7da18f09dae0>
keyword[def] identifier[download] ( identifier[self] ,* identifier[ids] ): literal[string] identifier[bundles] = identifier[sublists_of] ( identifier[ids] , literal[int] ) keyword[for] identifier[bundle] keyword[in] identifier[bundles] : identifier[download_response] = identifier[self] . identifier[_rpc] . identifier[DownloadSubtitles] ( identifier[self] . identifier[_token] , identifier[bundle] ) identifier[assert_status] ( identifier[download_response] ) identifier[download_data] = identifier[download_response] . identifier[get] ( literal[string] ) keyword[for] identifier[item] keyword[in] identifier[download_data] : identifier[subtitle_id] = identifier[item] [ literal[string] ] identifier[subtitle_data] = identifier[item] [ literal[string] ] identifier[decompressed] = identifier[decompress] ( identifier[subtitle_data] ) keyword[yield] identifier[Result] ( identifier[subtitle_id] , identifier[decompressed] )
def download(self, *ids): """ Downloads the subtitles with the given ids. :param ids: The subtitles to download :return: Result instances :raises NotOKException """ bundles = sublists_of(ids, 20) # 20 files at once is an API restriction for bundle in bundles: download_response = self._rpc.DownloadSubtitles(self._token, bundle) assert_status(download_response) download_data = download_response.get('data') for item in download_data: subtitle_id = item['idsubtitlefile'] subtitle_data = item['data'] decompressed = decompress(subtitle_data) yield Result(subtitle_id, decompressed) # depends on [control=['for'], data=['item']] # depends on [control=['for'], data=['bundle']]
def server_inspect_exception(self, req_event, rep_event, task_ctx, exc_info): """ Called when an exception has been raised in the code run by ZeroRPC """ # Hide the zerorpc internal frames for readability, for a REQ/REP or # REQ/STREAM server the frames to hide are: # - core.ServerBase._async_task # - core.Pattern*.process_call # - core.DecoratorBase.__call__ # # For a PUSH/PULL or PUB/SUB server the frame to hide is: # - core.Puller._receiver if self._hide_zerorpc_frames: traceback = exc_info[2] while traceback: zerorpc_frame = traceback.tb_frame zerorpc_frame.f_locals['__traceback_hide__'] = True frame_info = inspect.getframeinfo(zerorpc_frame) # Is there a better way than this (or looking up the filenames # or hardcoding the number of frames to skip) to know when we # are out of zerorpc? if frame_info.function == '__call__' \ or frame_info.function == '_receiver': break traceback = traceback.tb_next self._sentry_client.captureException( exc_info, extra=task_ctx )
def function[server_inspect_exception, parameter[self, req_event, rep_event, task_ctx, exc_info]]: constant[ Called when an exception has been raised in the code run by ZeroRPC ] if name[self]._hide_zerorpc_frames begin[:] variable[traceback] assign[=] call[name[exc_info]][constant[2]] while name[traceback] begin[:] variable[zerorpc_frame] assign[=] name[traceback].tb_frame call[name[zerorpc_frame].f_locals][constant[__traceback_hide__]] assign[=] constant[True] variable[frame_info] assign[=] call[name[inspect].getframeinfo, parameter[name[zerorpc_frame]]] if <ast.BoolOp object at 0x7da2045650c0> begin[:] break variable[traceback] assign[=] name[traceback].tb_next call[name[self]._sentry_client.captureException, parameter[name[exc_info]]]
keyword[def] identifier[server_inspect_exception] ( identifier[self] , identifier[req_event] , identifier[rep_event] , identifier[task_ctx] , identifier[exc_info] ): literal[string] keyword[if] identifier[self] . identifier[_hide_zerorpc_frames] : identifier[traceback] = identifier[exc_info] [ literal[int] ] keyword[while] identifier[traceback] : identifier[zerorpc_frame] = identifier[traceback] . identifier[tb_frame] identifier[zerorpc_frame] . identifier[f_locals] [ literal[string] ]= keyword[True] identifier[frame_info] = identifier[inspect] . identifier[getframeinfo] ( identifier[zerorpc_frame] ) keyword[if] identifier[frame_info] . identifier[function] == literal[string] keyword[or] identifier[frame_info] . identifier[function] == literal[string] : keyword[break] identifier[traceback] = identifier[traceback] . identifier[tb_next] identifier[self] . identifier[_sentry_client] . identifier[captureException] ( identifier[exc_info] , identifier[extra] = identifier[task_ctx] )
def server_inspect_exception(self, req_event, rep_event, task_ctx, exc_info): """ Called when an exception has been raised in the code run by ZeroRPC """ # Hide the zerorpc internal frames for readability, for a REQ/REP or # REQ/STREAM server the frames to hide are: # - core.ServerBase._async_task # - core.Pattern*.process_call # - core.DecoratorBase.__call__ # # For a PUSH/PULL or PUB/SUB server the frame to hide is: # - core.Puller._receiver if self._hide_zerorpc_frames: traceback = exc_info[2] while traceback: zerorpc_frame = traceback.tb_frame zerorpc_frame.f_locals['__traceback_hide__'] = True frame_info = inspect.getframeinfo(zerorpc_frame) # Is there a better way than this (or looking up the filenames # or hardcoding the number of frames to skip) to know when we # are out of zerorpc? if frame_info.function == '__call__' or frame_info.function == '_receiver': break # depends on [control=['if'], data=[]] traceback = traceback.tb_next # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]] self._sentry_client.captureException(exc_info, extra=task_ctx)
def webui_data_stores_saved_query_key(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") webui = ET.SubElement(config, "webui", xmlns="http://tail-f.com/ns/webui") data_stores = ET.SubElement(webui, "data-stores") saved_query = ET.SubElement(data_stores, "saved-query") key = ET.SubElement(saved_query, "key") key.text = kwargs.pop('key') callback = kwargs.pop('callback', self._callback) return callback(config)
def function[webui_data_stores_saved_query_key, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[webui] assign[=] call[name[ET].SubElement, parameter[name[config], constant[webui]]] variable[data_stores] assign[=] call[name[ET].SubElement, parameter[name[webui], constant[data-stores]]] variable[saved_query] assign[=] call[name[ET].SubElement, parameter[name[data_stores], constant[saved-query]]] variable[key] assign[=] call[name[ET].SubElement, parameter[name[saved_query], constant[key]]] name[key].text assign[=] call[name[kwargs].pop, parameter[constant[key]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[webui_data_stores_saved_query_key] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[webui] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] ) identifier[data_stores] = identifier[ET] . identifier[SubElement] ( identifier[webui] , literal[string] ) identifier[saved_query] = identifier[ET] . identifier[SubElement] ( identifier[data_stores] , literal[string] ) identifier[key] = identifier[ET] . identifier[SubElement] ( identifier[saved_query] , literal[string] ) identifier[key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def webui_data_stores_saved_query_key(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') webui = ET.SubElement(config, 'webui', xmlns='http://tail-f.com/ns/webui') data_stores = ET.SubElement(webui, 'data-stores') saved_query = ET.SubElement(data_stores, 'saved-query') key = ET.SubElement(saved_query, 'key') key.text = kwargs.pop('key') callback = kwargs.pop('callback', self._callback) return callback(config)
def get_content(path): """Get content of file.""" with codecs.open(abs_path(path), encoding='utf-8') as f: return f.read()
def function[get_content, parameter[path]]: constant[Get content of file.] with call[name[codecs].open, parameter[call[name[abs_path], parameter[name[path]]]]] begin[:] return[call[name[f].read, parameter[]]]
keyword[def] identifier[get_content] ( identifier[path] ): literal[string] keyword[with] identifier[codecs] . identifier[open] ( identifier[abs_path] ( identifier[path] ), identifier[encoding] = literal[string] ) keyword[as] identifier[f] : keyword[return] identifier[f] . identifier[read] ()
def get_content(path): """Get content of file.""" with codecs.open(abs_path(path), encoding='utf-8') as f: return f.read() # depends on [control=['with'], data=['f']]
def load(self, path, name): """Imports the specified ``fgic`` file from the hard disk. :param path: filedirectory to which the ``fgic`` file is written. :param name: filename, without file extension """ filename = name + '.fgic' filepath = aux.joinpath(path, filename) with zipfile.ZipFile(filepath, 'r') as containerZip: #Convert the zipfile data into a str object, necessary since #containerZip.read() returns a bytes object. jsonString = io.TextIOWrapper(containerZip.open('data'), encoding='utf-8' ).read() infoString = io.TextIOWrapper(containerZip.open('info'), encoding='utf-8' ).read() self.container = json.loads(jsonString, object_hook=Fgi.jsonHook) self.info.update(json.loads(infoString)) self._matrixTemplate = self.info['_matrixTemplate'] del self.info['_matrixTemplate']
def function[load, parameter[self, path, name]]: constant[Imports the specified ``fgic`` file from the hard disk. :param path: filedirectory to which the ``fgic`` file is written. :param name: filename, without file extension ] variable[filename] assign[=] binary_operation[name[name] + constant[.fgic]] variable[filepath] assign[=] call[name[aux].joinpath, parameter[name[path], name[filename]]] with call[name[zipfile].ZipFile, parameter[name[filepath], constant[r]]] begin[:] variable[jsonString] assign[=] call[call[name[io].TextIOWrapper, parameter[call[name[containerZip].open, parameter[constant[data]]]]].read, parameter[]] variable[infoString] assign[=] call[call[name[io].TextIOWrapper, parameter[call[name[containerZip].open, parameter[constant[info]]]]].read, parameter[]] name[self].container assign[=] call[name[json].loads, parameter[name[jsonString]]] call[name[self].info.update, parameter[call[name[json].loads, parameter[name[infoString]]]]] name[self]._matrixTemplate assign[=] call[name[self].info][constant[_matrixTemplate]] <ast.Delete object at 0x7da20c6ab1c0>
keyword[def] identifier[load] ( identifier[self] , identifier[path] , identifier[name] ): literal[string] identifier[filename] = identifier[name] + literal[string] identifier[filepath] = identifier[aux] . identifier[joinpath] ( identifier[path] , identifier[filename] ) keyword[with] identifier[zipfile] . identifier[ZipFile] ( identifier[filepath] , literal[string] ) keyword[as] identifier[containerZip] : identifier[jsonString] = identifier[io] . identifier[TextIOWrapper] ( identifier[containerZip] . identifier[open] ( literal[string] ), identifier[encoding] = literal[string] ). identifier[read] () identifier[infoString] = identifier[io] . identifier[TextIOWrapper] ( identifier[containerZip] . identifier[open] ( literal[string] ), identifier[encoding] = literal[string] ). identifier[read] () identifier[self] . identifier[container] = identifier[json] . identifier[loads] ( identifier[jsonString] , identifier[object_hook] = identifier[Fgi] . identifier[jsonHook] ) identifier[self] . identifier[info] . identifier[update] ( identifier[json] . identifier[loads] ( identifier[infoString] )) identifier[self] . identifier[_matrixTemplate] = identifier[self] . identifier[info] [ literal[string] ] keyword[del] identifier[self] . identifier[info] [ literal[string] ]
def load(self, path, name): """Imports the specified ``fgic`` file from the hard disk. :param path: filedirectory to which the ``fgic`` file is written. :param name: filename, without file extension """ filename = name + '.fgic' filepath = aux.joinpath(path, filename) with zipfile.ZipFile(filepath, 'r') as containerZip: #Convert the zipfile data into a str object, necessary since #containerZip.read() returns a bytes object. jsonString = io.TextIOWrapper(containerZip.open('data'), encoding='utf-8').read() infoString = io.TextIOWrapper(containerZip.open('info'), encoding='utf-8').read() # depends on [control=['with'], data=['containerZip']] self.container = json.loads(jsonString, object_hook=Fgi.jsonHook) self.info.update(json.loads(infoString)) self._matrixTemplate = self.info['_matrixTemplate'] del self.info['_matrixTemplate']
def tar_files(self, path: Path) -> bytes: """ Returns a tar with the git repository. """ tarstream = BytesIO() tar = tarfile.TarFile(fileobj=tarstream, mode='w') tar.add(str(path), arcname="data", recursive=True) tar.close() return tarstream.getvalue()
def function[tar_files, parameter[self, path]]: constant[ Returns a tar with the git repository. ] variable[tarstream] assign[=] call[name[BytesIO], parameter[]] variable[tar] assign[=] call[name[tarfile].TarFile, parameter[]] call[name[tar].add, parameter[call[name[str], parameter[name[path]]]]] call[name[tar].close, parameter[]] return[call[name[tarstream].getvalue, parameter[]]]
keyword[def] identifier[tar_files] ( identifier[self] , identifier[path] : identifier[Path] )-> identifier[bytes] : literal[string] identifier[tarstream] = identifier[BytesIO] () identifier[tar] = identifier[tarfile] . identifier[TarFile] ( identifier[fileobj] = identifier[tarstream] , identifier[mode] = literal[string] ) identifier[tar] . identifier[add] ( identifier[str] ( identifier[path] ), identifier[arcname] = literal[string] , identifier[recursive] = keyword[True] ) identifier[tar] . identifier[close] () keyword[return] identifier[tarstream] . identifier[getvalue] ()
def tar_files(self, path: Path) -> bytes: """ Returns a tar with the git repository. """ tarstream = BytesIO() tar = tarfile.TarFile(fileobj=tarstream, mode='w') tar.add(str(path), arcname='data', recursive=True) tar.close() return tarstream.getvalue()
def undeploy_lambda_alb(self, lambda_name): """ The `zappa undeploy` functionality for ALB infrastructure. """ print("Undeploying ALB infrastructure...") # Locate and delete alb/lambda permissions try: # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.remove_permission self.lambda_client.remove_permission( FunctionName=lambda_name, StatementId=lambda_name ) except botocore.exceptions.ClientError as e: # pragma: no cover if "ResourceNotFoundException" in e.response["Error"]["Code"]: pass else: raise e # Locate and delete load balancer try: # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_load_balancers response = self.elbv2_client.describe_load_balancers( Names=[lambda_name] ) if not(response["LoadBalancers"]) or len(response["LoadBalancers"]) > 1: raise EnvironmentError("Failure to locate/delete ALB named [{}]. Response was: {}".format(lambda_name, repr(response))) load_balancer_arn = response["LoadBalancers"][0]["LoadBalancerArn"] # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_listeners response = self.elbv2_client.describe_listeners(LoadBalancerArn=load_balancer_arn) if not(response["Listeners"]): print('No listeners found.') elif len(response["Listeners"]) > 1: raise EnvironmentError("Failure to locate/delete listener for ALB named [{}]. Response was: {}".format(lambda_name, repr(response))) else: listener_arn = response["Listeners"][0]["ListenerArn"] # Remove the listener. This explicit deletion of the listener seems necessary to avoid ResourceInUseExceptions when deleting target groups. # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_listener response = self.elbv2_client.delete_listener(ListenerArn=listener_arn) # Remove the load balancer and wait for completion # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_load_balancer response = self.elbv2_client.delete_load_balancer(LoadBalancerArn=load_balancer_arn) waiter = self.elbv2_client.get_waiter('load_balancers_deleted') print('Waiting for load balancer [{}] to be deleted..'.format(lambda_name)) waiter.wait(LoadBalancerArns=[load_balancer_arn], WaiterConfig={"Delay": 3}) except botocore.exceptions.ClientError as e: # pragma: no cover print(e.response["Error"]["Code"]) if "LoadBalancerNotFound" in e.response["Error"]["Code"]: pass else: raise e # Locate and delete target group try: # Locate the lambda ARN # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.get_function response = self.lambda_client.get_function(FunctionName=lambda_name) lambda_arn = response["Configuration"]["FunctionArn"] # Locate the target group ARN # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_target_groups response = self.elbv2_client.describe_target_groups(Names=[lambda_name]) if not(response["TargetGroups"]) or len(response["TargetGroups"]) > 1: raise EnvironmentError("Failure to locate/delete ALB target group named [{}]. Response was: {}".format(lambda_name, repr(response))) target_group_arn = response["TargetGroups"][0]["TargetGroupArn"] # Deregister targets and wait for completion self.elbv2_client.deregister_targets( TargetGroupArn=target_group_arn, Targets=[{"Id": lambda_arn}] ) waiter = self.elbv2_client.get_waiter('target_deregistered') print('Waiting for target [{}] to be deregistered...'.format(lambda_name)) waiter.wait( TargetGroupArn=target_group_arn, Targets=[{"Id": lambda_arn}], WaiterConfig={"Delay": 3} ) # Remove the target group # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_target_group self.elbv2_client.delete_target_group(TargetGroupArn=target_group_arn) except botocore.exceptions.ClientError as e: # pragma: no cover print(e.response["Error"]["Code"]) if "TargetGroupNotFound" in e.response["Error"]["Code"]: pass else: raise e
def function[undeploy_lambda_alb, parameter[self, lambda_name]]: constant[ The `zappa undeploy` functionality for ALB infrastructure. ] call[name[print], parameter[constant[Undeploying ALB infrastructure...]]] <ast.Try object at 0x7da1b1fe5180> <ast.Try object at 0x7da1b1fe5690> <ast.Try object at 0x7da1b1fe64a0>
keyword[def] identifier[undeploy_lambda_alb] ( identifier[self] , identifier[lambda_name] ): literal[string] identifier[print] ( literal[string] ) keyword[try] : identifier[self] . identifier[lambda_client] . identifier[remove_permission] ( identifier[FunctionName] = identifier[lambda_name] , identifier[StatementId] = identifier[lambda_name] ) keyword[except] identifier[botocore] . identifier[exceptions] . identifier[ClientError] keyword[as] identifier[e] : keyword[if] literal[string] keyword[in] identifier[e] . identifier[response] [ literal[string] ][ literal[string] ]: keyword[pass] keyword[else] : keyword[raise] identifier[e] keyword[try] : identifier[response] = identifier[self] . identifier[elbv2_client] . identifier[describe_load_balancers] ( identifier[Names] =[ identifier[lambda_name] ] ) keyword[if] keyword[not] ( identifier[response] [ literal[string] ]) keyword[or] identifier[len] ( identifier[response] [ literal[string] ])> literal[int] : keyword[raise] identifier[EnvironmentError] ( literal[string] . identifier[format] ( identifier[lambda_name] , identifier[repr] ( identifier[response] ))) identifier[load_balancer_arn] = identifier[response] [ literal[string] ][ literal[int] ][ literal[string] ] identifier[response] = identifier[self] . identifier[elbv2_client] . identifier[describe_listeners] ( identifier[LoadBalancerArn] = identifier[load_balancer_arn] ) keyword[if] keyword[not] ( identifier[response] [ literal[string] ]): identifier[print] ( literal[string] ) keyword[elif] identifier[len] ( identifier[response] [ literal[string] ])> literal[int] : keyword[raise] identifier[EnvironmentError] ( literal[string] . identifier[format] ( identifier[lambda_name] , identifier[repr] ( identifier[response] ))) keyword[else] : identifier[listener_arn] = identifier[response] [ literal[string] ][ literal[int] ][ literal[string] ] identifier[response] = identifier[self] . identifier[elbv2_client] . identifier[delete_listener] ( identifier[ListenerArn] = identifier[listener_arn] ) identifier[response] = identifier[self] . identifier[elbv2_client] . identifier[delete_load_balancer] ( identifier[LoadBalancerArn] = identifier[load_balancer_arn] ) identifier[waiter] = identifier[self] . identifier[elbv2_client] . identifier[get_waiter] ( literal[string] ) identifier[print] ( literal[string] . identifier[format] ( identifier[lambda_name] )) identifier[waiter] . identifier[wait] ( identifier[LoadBalancerArns] =[ identifier[load_balancer_arn] ], identifier[WaiterConfig] ={ literal[string] : literal[int] }) keyword[except] identifier[botocore] . identifier[exceptions] . identifier[ClientError] keyword[as] identifier[e] : identifier[print] ( identifier[e] . identifier[response] [ literal[string] ][ literal[string] ]) keyword[if] literal[string] keyword[in] identifier[e] . identifier[response] [ literal[string] ][ literal[string] ]: keyword[pass] keyword[else] : keyword[raise] identifier[e] keyword[try] : identifier[response] = identifier[self] . identifier[lambda_client] . identifier[get_function] ( identifier[FunctionName] = identifier[lambda_name] ) identifier[lambda_arn] = identifier[response] [ literal[string] ][ literal[string] ] identifier[response] = identifier[self] . identifier[elbv2_client] . identifier[describe_target_groups] ( identifier[Names] =[ identifier[lambda_name] ]) keyword[if] keyword[not] ( identifier[response] [ literal[string] ]) keyword[or] identifier[len] ( identifier[response] [ literal[string] ])> literal[int] : keyword[raise] identifier[EnvironmentError] ( literal[string] . identifier[format] ( identifier[lambda_name] , identifier[repr] ( identifier[response] ))) identifier[target_group_arn] = identifier[response] [ literal[string] ][ literal[int] ][ literal[string] ] identifier[self] . identifier[elbv2_client] . identifier[deregister_targets] ( identifier[TargetGroupArn] = identifier[target_group_arn] , identifier[Targets] =[{ literal[string] : identifier[lambda_arn] }] ) identifier[waiter] = identifier[self] . identifier[elbv2_client] . identifier[get_waiter] ( literal[string] ) identifier[print] ( literal[string] . identifier[format] ( identifier[lambda_name] )) identifier[waiter] . identifier[wait] ( identifier[TargetGroupArn] = identifier[target_group_arn] , identifier[Targets] =[{ literal[string] : identifier[lambda_arn] }], identifier[WaiterConfig] ={ literal[string] : literal[int] } ) identifier[self] . identifier[elbv2_client] . identifier[delete_target_group] ( identifier[TargetGroupArn] = identifier[target_group_arn] ) keyword[except] identifier[botocore] . identifier[exceptions] . identifier[ClientError] keyword[as] identifier[e] : identifier[print] ( identifier[e] . identifier[response] [ literal[string] ][ literal[string] ]) keyword[if] literal[string] keyword[in] identifier[e] . identifier[response] [ literal[string] ][ literal[string] ]: keyword[pass] keyword[else] : keyword[raise] identifier[e]
def undeploy_lambda_alb(self, lambda_name): """ The `zappa undeploy` functionality for ALB infrastructure. """ print('Undeploying ALB infrastructure...') # Locate and delete alb/lambda permissions try: # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.remove_permission self.lambda_client.remove_permission(FunctionName=lambda_name, StatementId=lambda_name) # depends on [control=['try'], data=[]] except botocore.exceptions.ClientError as e: # pragma: no cover if 'ResourceNotFoundException' in e.response['Error']['Code']: pass # depends on [control=['if'], data=[]] else: raise e # depends on [control=['except'], data=['e']] # Locate and delete load balancer try: # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_load_balancers response = self.elbv2_client.describe_load_balancers(Names=[lambda_name]) if not response['LoadBalancers'] or len(response['LoadBalancers']) > 1: raise EnvironmentError('Failure to locate/delete ALB named [{}]. Response was: {}'.format(lambda_name, repr(response))) # depends on [control=['if'], data=[]] load_balancer_arn = response['LoadBalancers'][0]['LoadBalancerArn'] # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_listeners response = self.elbv2_client.describe_listeners(LoadBalancerArn=load_balancer_arn) if not response['Listeners']: print('No listeners found.') # depends on [control=['if'], data=[]] elif len(response['Listeners']) > 1: raise EnvironmentError('Failure to locate/delete listener for ALB named [{}]. Response was: {}'.format(lambda_name, repr(response))) # depends on [control=['if'], data=[]] else: listener_arn = response['Listeners'][0]['ListenerArn'] # Remove the listener. This explicit deletion of the listener seems necessary to avoid ResourceInUseExceptions when deleting target groups. # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_listener response = self.elbv2_client.delete_listener(ListenerArn=listener_arn) # Remove the load balancer and wait for completion # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_load_balancer response = self.elbv2_client.delete_load_balancer(LoadBalancerArn=load_balancer_arn) waiter = self.elbv2_client.get_waiter('load_balancers_deleted') print('Waiting for load balancer [{}] to be deleted..'.format(lambda_name)) waiter.wait(LoadBalancerArns=[load_balancer_arn], WaiterConfig={'Delay': 3}) # depends on [control=['try'], data=[]] except botocore.exceptions.ClientError as e: # pragma: no cover print(e.response['Error']['Code']) if 'LoadBalancerNotFound' in e.response['Error']['Code']: pass # depends on [control=['if'], data=[]] else: raise e # depends on [control=['except'], data=['e']] # Locate and delete target group try: # Locate the lambda ARN # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.get_function response = self.lambda_client.get_function(FunctionName=lambda_name) lambda_arn = response['Configuration']['FunctionArn'] # Locate the target group ARN # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_target_groups response = self.elbv2_client.describe_target_groups(Names=[lambda_name]) if not response['TargetGroups'] or len(response['TargetGroups']) > 1: raise EnvironmentError('Failure to locate/delete ALB target group named [{}]. Response was: {}'.format(lambda_name, repr(response))) # depends on [control=['if'], data=[]] target_group_arn = response['TargetGroups'][0]['TargetGroupArn'] # Deregister targets and wait for completion self.elbv2_client.deregister_targets(TargetGroupArn=target_group_arn, Targets=[{'Id': lambda_arn}]) waiter = self.elbv2_client.get_waiter('target_deregistered') print('Waiting for target [{}] to be deregistered...'.format(lambda_name)) waiter.wait(TargetGroupArn=target_group_arn, Targets=[{'Id': lambda_arn}], WaiterConfig={'Delay': 3}) # Remove the target group # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_target_group self.elbv2_client.delete_target_group(TargetGroupArn=target_group_arn) # depends on [control=['try'], data=[]] except botocore.exceptions.ClientError as e: # pragma: no cover print(e.response['Error']['Code']) if 'TargetGroupNotFound' in e.response['Error']['Code']: pass # depends on [control=['if'], data=[]] else: raise e # depends on [control=['except'], data=['e']]
def dumpsItem(self, item, contentType=None, version=None): ''' [OPTIONAL] Identical to :meth:`dump`, except the serialized form is returned as a string representation. As documented in :meth:`dump`, the return value can optionally be a three-element tuple of (contentType, version, data) if the provided content-type should be overridden or enhanced. The default implementation just wraps :meth:`dump`. ''' buf = six.StringIO() ret = self.dumpItem(item, buf, contentType, version) if ret is None: return buf.getvalue() return (ret[0], ret[1], buf.getvalue())
def function[dumpsItem, parameter[self, item, contentType, version]]: constant[ [OPTIONAL] Identical to :meth:`dump`, except the serialized form is returned as a string representation. As documented in :meth:`dump`, the return value can optionally be a three-element tuple of (contentType, version, data) if the provided content-type should be overridden or enhanced. The default implementation just wraps :meth:`dump`. ] variable[buf] assign[=] call[name[six].StringIO, parameter[]] variable[ret] assign[=] call[name[self].dumpItem, parameter[name[item], name[buf], name[contentType], name[version]]] if compare[name[ret] is constant[None]] begin[:] return[call[name[buf].getvalue, parameter[]]] return[tuple[[<ast.Subscript object at 0x7da1affedba0>, <ast.Subscript object at 0x7da1affedc00>, <ast.Call object at 0x7da1affeda50>]]]
keyword[def] identifier[dumpsItem] ( identifier[self] , identifier[item] , identifier[contentType] = keyword[None] , identifier[version] = keyword[None] ): literal[string] identifier[buf] = identifier[six] . identifier[StringIO] () identifier[ret] = identifier[self] . identifier[dumpItem] ( identifier[item] , identifier[buf] , identifier[contentType] , identifier[version] ) keyword[if] identifier[ret] keyword[is] keyword[None] : keyword[return] identifier[buf] . identifier[getvalue] () keyword[return] ( identifier[ret] [ literal[int] ], identifier[ret] [ literal[int] ], identifier[buf] . identifier[getvalue] ())
def dumpsItem(self, item, contentType=None, version=None): """ [OPTIONAL] Identical to :meth:`dump`, except the serialized form is returned as a string representation. As documented in :meth:`dump`, the return value can optionally be a three-element tuple of (contentType, version, data) if the provided content-type should be overridden or enhanced. The default implementation just wraps :meth:`dump`. """ buf = six.StringIO() ret = self.dumpItem(item, buf, contentType, version) if ret is None: return buf.getvalue() # depends on [control=['if'], data=[]] return (ret[0], ret[1], buf.getvalue())
def get_cluster(self, name): """Get cluster from kubeconfig.""" clusters = self.data['clusters'] for cluster in clusters: if cluster['name'] == name: return cluster raise KubeConfError("Cluster name not found.")
def function[get_cluster, parameter[self, name]]: constant[Get cluster from kubeconfig.] variable[clusters] assign[=] call[name[self].data][constant[clusters]] for taget[name[cluster]] in starred[name[clusters]] begin[:] if compare[call[name[cluster]][constant[name]] equal[==] name[name]] begin[:] return[name[cluster]] <ast.Raise object at 0x7da18c4cd540>
keyword[def] identifier[get_cluster] ( identifier[self] , identifier[name] ): literal[string] identifier[clusters] = identifier[self] . identifier[data] [ literal[string] ] keyword[for] identifier[cluster] keyword[in] identifier[clusters] : keyword[if] identifier[cluster] [ literal[string] ]== identifier[name] : keyword[return] identifier[cluster] keyword[raise] identifier[KubeConfError] ( literal[string] )
def get_cluster(self, name): """Get cluster from kubeconfig.""" clusters = self.data['clusters'] for cluster in clusters: if cluster['name'] == name: return cluster # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cluster']] raise KubeConfError('Cluster name not found.')
def on_bar_min1(self, tiny_bar): """每一分钟触发一次回调""" bar = tiny_bar symbol = bar.symbol str_dt = bar.datetime.strftime("%Y%m%d %H:%M:%S") # 得到分k数据的ArrayManager(vnpy)对象 am = self.get_kl_min1_am(symbol) array_high = am.high array_low = am.low array_open = am.open array_close = am.close array_vol = am.volume n = 5 ma_high = self.ema(array_high, n) ma_low = self.ema(array_low, n) ma_open = self.ema(array_open, n) ma_close = self.ema(array_close, n) ma_vol = self.ema(array_vol, n) str_log = "on_bar_min1 symbol=%s dt=%s ema(%s) open=%s high=%s close=%s low=%s vol=%s" % ( symbol, str_dt, n, ma_open, ma_high, ma_close, ma_low, ma_vol) self.log(str_log)
def function[on_bar_min1, parameter[self, tiny_bar]]: constant[每一分钟触发一次回调] variable[bar] assign[=] name[tiny_bar] variable[symbol] assign[=] name[bar].symbol variable[str_dt] assign[=] call[name[bar].datetime.strftime, parameter[constant[%Y%m%d %H:%M:%S]]] variable[am] assign[=] call[name[self].get_kl_min1_am, parameter[name[symbol]]] variable[array_high] assign[=] name[am].high variable[array_low] assign[=] name[am].low variable[array_open] assign[=] name[am].open variable[array_close] assign[=] name[am].close variable[array_vol] assign[=] name[am].volume variable[n] assign[=] constant[5] variable[ma_high] assign[=] call[name[self].ema, parameter[name[array_high], name[n]]] variable[ma_low] assign[=] call[name[self].ema, parameter[name[array_low], name[n]]] variable[ma_open] assign[=] call[name[self].ema, parameter[name[array_open], name[n]]] variable[ma_close] assign[=] call[name[self].ema, parameter[name[array_close], name[n]]] variable[ma_vol] assign[=] call[name[self].ema, parameter[name[array_vol], name[n]]] variable[str_log] assign[=] binary_operation[constant[on_bar_min1 symbol=%s dt=%s ema(%s) open=%s high=%s close=%s low=%s vol=%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f810b50>, <ast.Name object at 0x7da18f8135b0>, <ast.Name object at 0x7da18f811a80>, <ast.Name object at 0x7da18f8124d0>, <ast.Name object at 0x7da18f58ee30>, <ast.Name object at 0x7da18f58f8b0>, <ast.Name object at 0x7da18f58eaa0>, <ast.Name object at 0x7da18f58ce80>]]] call[name[self].log, parameter[name[str_log]]]
keyword[def] identifier[on_bar_min1] ( identifier[self] , identifier[tiny_bar] ): literal[string] identifier[bar] = identifier[tiny_bar] identifier[symbol] = identifier[bar] . identifier[symbol] identifier[str_dt] = identifier[bar] . identifier[datetime] . identifier[strftime] ( literal[string] ) identifier[am] = identifier[self] . identifier[get_kl_min1_am] ( identifier[symbol] ) identifier[array_high] = identifier[am] . identifier[high] identifier[array_low] = identifier[am] . identifier[low] identifier[array_open] = identifier[am] . identifier[open] identifier[array_close] = identifier[am] . identifier[close] identifier[array_vol] = identifier[am] . identifier[volume] identifier[n] = literal[int] identifier[ma_high] = identifier[self] . identifier[ema] ( identifier[array_high] , identifier[n] ) identifier[ma_low] = identifier[self] . identifier[ema] ( identifier[array_low] , identifier[n] ) identifier[ma_open] = identifier[self] . identifier[ema] ( identifier[array_open] , identifier[n] ) identifier[ma_close] = identifier[self] . identifier[ema] ( identifier[array_close] , identifier[n] ) identifier[ma_vol] = identifier[self] . identifier[ema] ( identifier[array_vol] , identifier[n] ) identifier[str_log] = literal[string] %( identifier[symbol] , identifier[str_dt] , identifier[n] , identifier[ma_open] , identifier[ma_high] , identifier[ma_close] , identifier[ma_low] , identifier[ma_vol] ) identifier[self] . identifier[log] ( identifier[str_log] )
def on_bar_min1(self, tiny_bar): """每一分钟触发一次回调""" bar = tiny_bar symbol = bar.symbol str_dt = bar.datetime.strftime('%Y%m%d %H:%M:%S') # 得到分k数据的ArrayManager(vnpy)对象 am = self.get_kl_min1_am(symbol) array_high = am.high array_low = am.low array_open = am.open array_close = am.close array_vol = am.volume n = 5 ma_high = self.ema(array_high, n) ma_low = self.ema(array_low, n) ma_open = self.ema(array_open, n) ma_close = self.ema(array_close, n) ma_vol = self.ema(array_vol, n) str_log = 'on_bar_min1 symbol=%s dt=%s ema(%s) open=%s high=%s close=%s low=%s vol=%s' % (symbol, str_dt, n, ma_open, ma_high, ma_close, ma_low, ma_vol) self.log(str_log)
def format_modified(self, modified, sep=" "): """Format modification date in UTC if it's not None. @param modified: modification date in UTC @ptype modified: datetime or None @return: formatted date or empty string @rtype: unicode """ if modified is not None: return modified.strftime("%Y-%m-%d{0}%H:%M:%S.%fZ".format(sep)) return u""
def function[format_modified, parameter[self, modified, sep]]: constant[Format modification date in UTC if it's not None. @param modified: modification date in UTC @ptype modified: datetime or None @return: formatted date or empty string @rtype: unicode ] if compare[name[modified] is_not constant[None]] begin[:] return[call[name[modified].strftime, parameter[call[constant[%Y-%m-%d{0}%H:%M:%S.%fZ].format, parameter[name[sep]]]]]] return[constant[]]
keyword[def] identifier[format_modified] ( identifier[self] , identifier[modified] , identifier[sep] = literal[string] ): literal[string] keyword[if] identifier[modified] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[modified] . identifier[strftime] ( literal[string] . identifier[format] ( identifier[sep] )) keyword[return] literal[string]
def format_modified(self, modified, sep=' '): """Format modification date in UTC if it's not None. @param modified: modification date in UTC @ptype modified: datetime or None @return: formatted date or empty string @rtype: unicode """ if modified is not None: return modified.strftime('%Y-%m-%d{0}%H:%M:%S.%fZ'.format(sep)) # depends on [control=['if'], data=['modified']] return u''
def write_dicts_to_csv(self, dicts): """Saves .csv file with posts data :param dicts: Dictionaries with same values """ csv_headers = sorted(dicts[0].keys()) with open(self.path, "w") as out_file: # write to file dict_writer = csv.DictWriter( out_file, csv_headers, delimiter=",", quotechar="\"" ) dict_writer.writeheader() dict_writer.writerows(dicts)
def function[write_dicts_to_csv, parameter[self, dicts]]: constant[Saves .csv file with posts data :param dicts: Dictionaries with same values ] variable[csv_headers] assign[=] call[name[sorted], parameter[call[call[name[dicts]][constant[0]].keys, parameter[]]]] with call[name[open], parameter[name[self].path, constant[w]]] begin[:] variable[dict_writer] assign[=] call[name[csv].DictWriter, parameter[name[out_file], name[csv_headers]]] call[name[dict_writer].writeheader, parameter[]] call[name[dict_writer].writerows, parameter[name[dicts]]]
keyword[def] identifier[write_dicts_to_csv] ( identifier[self] , identifier[dicts] ): literal[string] identifier[csv_headers] = identifier[sorted] ( identifier[dicts] [ literal[int] ]. identifier[keys] ()) keyword[with] identifier[open] ( identifier[self] . identifier[path] , literal[string] ) keyword[as] identifier[out_file] : identifier[dict_writer] = identifier[csv] . identifier[DictWriter] ( identifier[out_file] , identifier[csv_headers] , identifier[delimiter] = literal[string] , identifier[quotechar] = literal[string] ) identifier[dict_writer] . identifier[writeheader] () identifier[dict_writer] . identifier[writerows] ( identifier[dicts] )
def write_dicts_to_csv(self, dicts): """Saves .csv file with posts data :param dicts: Dictionaries with same values """ csv_headers = sorted(dicts[0].keys()) with open(self.path, 'w') as out_file: # write to file dict_writer = csv.DictWriter(out_file, csv_headers, delimiter=',', quotechar='"') dict_writer.writeheader() dict_writer.writerows(dicts) # depends on [control=['with'], data=['out_file']]
def _submit(self): '''submit a uservoice ticket. When we get here we should have: {'user_prompt_issue': 'I want to do the thing.', 'record_asciinema': '/tmp/helpme.93o__nt5.json', 'record_environment': ((1,1),(2,2)...(N,N))} Required Client Variables self.api_key self.api_secret self.subdomain self.email ''' # Step 0: Authenticate with uservoice API self.authenticate() title = "HelpMe UserVoice Ticket: %s" %(self.run_id) body = self.data['user_prompt_issue'] # Step 1: Environment envars = self.data.get('record_environment') if envars not in [None, '', []]: body += '\n\nEnvironment:\n' for envar in envars: body += ' - %s: %s\n' %(envar[0], envar[1]) # Step 2: Asciinema asciinema = self.data.get('record_asciinema') if asciinema not in [None, '']: url = upload_asciinema(asciinema) # If the upload is successful, add a link to it. if url is not None: body += "\n\nAsciinema Recording: %s" %url # Add other metadata about client body += "\ngenerated by HelpMe: https://vsoch.github.io/helpme/" # Submit the ticket! self.post_ticket(title, body)
def function[_submit, parameter[self]]: constant[submit a uservoice ticket. When we get here we should have: {'user_prompt_issue': 'I want to do the thing.', 'record_asciinema': '/tmp/helpme.93o__nt5.json', 'record_environment': ((1,1),(2,2)...(N,N))} Required Client Variables self.api_key self.api_secret self.subdomain self.email ] call[name[self].authenticate, parameter[]] variable[title] assign[=] binary_operation[constant[HelpMe UserVoice Ticket: %s] <ast.Mod object at 0x7da2590d6920> name[self].run_id] variable[body] assign[=] call[name[self].data][constant[user_prompt_issue]] variable[envars] assign[=] call[name[self].data.get, parameter[constant[record_environment]]] if compare[name[envars] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da18f812320>, <ast.Constant object at 0x7da18f810a60>, <ast.List object at 0x7da18f8130a0>]]] begin[:] <ast.AugAssign object at 0x7da18f8131c0> for taget[name[envar]] in starred[name[envars]] begin[:] <ast.AugAssign object at 0x7da204344370> variable[asciinema] assign[=] call[name[self].data.get, parameter[constant[record_asciinema]]] if compare[name[asciinema] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da204345c60>, <ast.Constant object at 0x7da204345240>]]] begin[:] variable[url] assign[=] call[name[upload_asciinema], parameter[name[asciinema]]] if compare[name[url] is_not constant[None]] begin[:] <ast.AugAssign object at 0x7da204346650> <ast.AugAssign object at 0x7da204347df0> call[name[self].post_ticket, parameter[name[title], name[body]]]
keyword[def] identifier[_submit] ( identifier[self] ): literal[string] identifier[self] . identifier[authenticate] () identifier[title] = literal[string] %( identifier[self] . identifier[run_id] ) identifier[body] = identifier[self] . identifier[data] [ literal[string] ] identifier[envars] = identifier[self] . identifier[data] . identifier[get] ( literal[string] ) keyword[if] identifier[envars] keyword[not] keyword[in] [ keyword[None] , literal[string] ,[]]: identifier[body] += literal[string] keyword[for] identifier[envar] keyword[in] identifier[envars] : identifier[body] += literal[string] %( identifier[envar] [ literal[int] ], identifier[envar] [ literal[int] ]) identifier[asciinema] = identifier[self] . identifier[data] . identifier[get] ( literal[string] ) keyword[if] identifier[asciinema] keyword[not] keyword[in] [ keyword[None] , literal[string] ]: identifier[url] = identifier[upload_asciinema] ( identifier[asciinema] ) keyword[if] identifier[url] keyword[is] keyword[not] keyword[None] : identifier[body] += literal[string] % identifier[url] identifier[body] += literal[string] identifier[self] . identifier[post_ticket] ( identifier[title] , identifier[body] )
def _submit(self): """submit a uservoice ticket. When we get here we should have: {'user_prompt_issue': 'I want to do the thing.', 'record_asciinema': '/tmp/helpme.93o__nt5.json', 'record_environment': ((1,1),(2,2)...(N,N))} Required Client Variables self.api_key self.api_secret self.subdomain self.email """ # Step 0: Authenticate with uservoice API self.authenticate() title = 'HelpMe UserVoice Ticket: %s' % self.run_id body = self.data['user_prompt_issue'] # Step 1: Environment envars = self.data.get('record_environment') if envars not in [None, '', []]: body += '\n\nEnvironment:\n' for envar in envars: body += ' - %s: %s\n' % (envar[0], envar[1]) # depends on [control=['for'], data=['envar']] # depends on [control=['if'], data=['envars']] # Step 2: Asciinema asciinema = self.data.get('record_asciinema') if asciinema not in [None, '']: url = upload_asciinema(asciinema) # If the upload is successful, add a link to it. if url is not None: body += '\n\nAsciinema Recording: %s' % url # depends on [control=['if'], data=['url']] # depends on [control=['if'], data=['asciinema']] # Add other metadata about client body += '\ngenerated by HelpMe: https://vsoch.github.io/helpme/' # Submit the ticket! self.post_ticket(title, body)
def scan_system(): """ Scans /sys/class/leds looking for entries, then examining their .../device/uevent file to obtain unique hardware IDs corresponding to the associated hardware. This then allows us to associate InputDevice based controllers with sets of zero or more LEDs. The result is a dict from hardware address to a dict of name to filename, where the name is the name of the LED and the filename is the brightness control to which writing a value changes the LED state. At the same time, scans /sys/class/power_supply looking for battery entries and analysing them in the same way. Hardware IDs are, in order of preference, the HID_UNIQ address (corresponding to the physical MAC of an attached bluetooth or similar HID device), or the PHYS corresponding to other devices. This is the same logic as used in the evdev based scanning to group together input nodes for composite controllers (i.e. ones with motion sensors as well as physical axes). It is intended to produce the same results, so the LEDs for e.g. a PS4 controller will be keyed on the same physical address as that returned by :func:`approxeng.input.controllers.unique_name` for all the InputDevice instances associated with a given controller. :return: A dict containing available LEDs, keyed on physical device ID """ def find_device_hardware_id(uevent_file_path): hid_uniq = None phys = None for line in open(uevent_file_path, 'r').read().split('\n'): parts = line.split('=') if len(parts) == 2: name, value = parts value = value.replace('"', '') if name == 'HID_UNIQ' and value: hid_uniq = value elif name == 'PHYS' and value: phys = value.split('/')[0] if hid_uniq: return hid_uniq elif phys: return phys return None leds = {} for sub in ['/sys/class/leds/' + sub_dir for sub_dir in listdir('/sys/class/leds')]: led_name = sub.split(':')[-1] write_path = sub + '/brightness' device_id = find_device_hardware_id(sub + '/device/uevent') if device_id: if device_id not in leds: leds[device_id] = {} leds[device_id][led_name] = write_path power = {} for sub in ['/sys/class/power_supply/' + sub_dir for sub_dir in listdir('/sys/class/power_supply')]: read_path = sub + '/capacity' device_id = find_device_hardware_id(sub + '/device/uevent') if device_id: power[device_id] = read_path return {'leds': leds, 'power': power}
def function[scan_system, parameter[]]: constant[ Scans /sys/class/leds looking for entries, then examining their .../device/uevent file to obtain unique hardware IDs corresponding to the associated hardware. This then allows us to associate InputDevice based controllers with sets of zero or more LEDs. The result is a dict from hardware address to a dict of name to filename, where the name is the name of the LED and the filename is the brightness control to which writing a value changes the LED state. At the same time, scans /sys/class/power_supply looking for battery entries and analysing them in the same way. Hardware IDs are, in order of preference, the HID_UNIQ address (corresponding to the physical MAC of an attached bluetooth or similar HID device), or the PHYS corresponding to other devices. This is the same logic as used in the evdev based scanning to group together input nodes for composite controllers (i.e. ones with motion sensors as well as physical axes). It is intended to produce the same results, so the LEDs for e.g. a PS4 controller will be keyed on the same physical address as that returned by :func:`approxeng.input.controllers.unique_name` for all the InputDevice instances associated with a given controller. :return: A dict containing available LEDs, keyed on physical device ID ] def function[find_device_hardware_id, parameter[uevent_file_path]]: variable[hid_uniq] assign[=] constant[None] variable[phys] assign[=] constant[None] for taget[name[line]] in starred[call[call[call[name[open], parameter[name[uevent_file_path], constant[r]]].read, parameter[]].split, parameter[constant[ ]]]] begin[:] variable[parts] assign[=] call[name[line].split, parameter[constant[=]]] if compare[call[name[len], parameter[name[parts]]] equal[==] constant[2]] begin[:] <ast.Tuple object at 0x7da18f09e080> assign[=] name[parts] variable[value] assign[=] call[name[value].replace, parameter[constant["], constant[]]] if <ast.BoolOp object at 0x7da18f09d6c0> begin[:] variable[hid_uniq] assign[=] name[value] if name[hid_uniq] begin[:] return[name[hid_uniq]] return[constant[None]] variable[leds] assign[=] dictionary[[], []] for taget[name[sub]] in starred[<ast.ListComp object at 0x7da18f09c790>] begin[:] variable[led_name] assign[=] call[call[name[sub].split, parameter[constant[:]]]][<ast.UnaryOp object at 0x7da18f09c640>] variable[write_path] assign[=] binary_operation[name[sub] + constant[/brightness]] variable[device_id] assign[=] call[name[find_device_hardware_id], parameter[binary_operation[name[sub] + constant[/device/uevent]]]] if name[device_id] begin[:] if compare[name[device_id] <ast.NotIn object at 0x7da2590d7190> name[leds]] begin[:] call[name[leds]][name[device_id]] assign[=] dictionary[[], []] call[call[name[leds]][name[device_id]]][name[led_name]] assign[=] name[write_path] variable[power] assign[=] dictionary[[], []] for taget[name[sub]] in starred[<ast.ListComp object at 0x7da20c6c5390>] begin[:] variable[read_path] assign[=] binary_operation[name[sub] + constant[/capacity]] variable[device_id] assign[=] call[name[find_device_hardware_id], parameter[binary_operation[name[sub] + constant[/device/uevent]]]] if name[device_id] begin[:] call[name[power]][name[device_id]] assign[=] name[read_path] return[dictionary[[<ast.Constant object at 0x7da20c6c7310>, <ast.Constant object at 0x7da20c6c7cd0>], [<ast.Name object at 0x7da20c6c4e50>, <ast.Name object at 0x7da20c6c4100>]]]
keyword[def] identifier[scan_system] (): literal[string] keyword[def] identifier[find_device_hardware_id] ( identifier[uevent_file_path] ): identifier[hid_uniq] = keyword[None] identifier[phys] = keyword[None] keyword[for] identifier[line] keyword[in] identifier[open] ( identifier[uevent_file_path] , literal[string] ). identifier[read] (). identifier[split] ( literal[string] ): identifier[parts] = identifier[line] . identifier[split] ( literal[string] ) keyword[if] identifier[len] ( identifier[parts] )== literal[int] : identifier[name] , identifier[value] = identifier[parts] identifier[value] = identifier[value] . identifier[replace] ( literal[string] , literal[string] ) keyword[if] identifier[name] == literal[string] keyword[and] identifier[value] : identifier[hid_uniq] = identifier[value] keyword[elif] identifier[name] == literal[string] keyword[and] identifier[value] : identifier[phys] = identifier[value] . identifier[split] ( literal[string] )[ literal[int] ] keyword[if] identifier[hid_uniq] : keyword[return] identifier[hid_uniq] keyword[elif] identifier[phys] : keyword[return] identifier[phys] keyword[return] keyword[None] identifier[leds] ={} keyword[for] identifier[sub] keyword[in] [ literal[string] + identifier[sub_dir] keyword[for] identifier[sub_dir] keyword[in] identifier[listdir] ( literal[string] )]: identifier[led_name] = identifier[sub] . identifier[split] ( literal[string] )[- literal[int] ] identifier[write_path] = identifier[sub] + literal[string] identifier[device_id] = identifier[find_device_hardware_id] ( identifier[sub] + literal[string] ) keyword[if] identifier[device_id] : keyword[if] identifier[device_id] keyword[not] keyword[in] identifier[leds] : identifier[leds] [ identifier[device_id] ]={} identifier[leds] [ identifier[device_id] ][ identifier[led_name] ]= identifier[write_path] identifier[power] ={} keyword[for] identifier[sub] keyword[in] [ literal[string] + identifier[sub_dir] keyword[for] identifier[sub_dir] keyword[in] identifier[listdir] ( literal[string] )]: identifier[read_path] = identifier[sub] + literal[string] identifier[device_id] = identifier[find_device_hardware_id] ( identifier[sub] + literal[string] ) keyword[if] identifier[device_id] : identifier[power] [ identifier[device_id] ]= identifier[read_path] keyword[return] { literal[string] : identifier[leds] , literal[string] : identifier[power] }
def scan_system(): """ Scans /sys/class/leds looking for entries, then examining their .../device/uevent file to obtain unique hardware IDs corresponding to the associated hardware. This then allows us to associate InputDevice based controllers with sets of zero or more LEDs. The result is a dict from hardware address to a dict of name to filename, where the name is the name of the LED and the filename is the brightness control to which writing a value changes the LED state. At the same time, scans /sys/class/power_supply looking for battery entries and analysing them in the same way. Hardware IDs are, in order of preference, the HID_UNIQ address (corresponding to the physical MAC of an attached bluetooth or similar HID device), or the PHYS corresponding to other devices. This is the same logic as used in the evdev based scanning to group together input nodes for composite controllers (i.e. ones with motion sensors as well as physical axes). It is intended to produce the same results, so the LEDs for e.g. a PS4 controller will be keyed on the same physical address as that returned by :func:`approxeng.input.controllers.unique_name` for all the InputDevice instances associated with a given controller. :return: A dict containing available LEDs, keyed on physical device ID """ def find_device_hardware_id(uevent_file_path): hid_uniq = None phys = None for line in open(uevent_file_path, 'r').read().split('\n'): parts = line.split('=') if len(parts) == 2: (name, value) = parts value = value.replace('"', '') if name == 'HID_UNIQ' and value: hid_uniq = value # depends on [control=['if'], data=[]] elif name == 'PHYS' and value: phys = value.split('/')[0] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] if hid_uniq: return hid_uniq # depends on [control=['if'], data=[]] elif phys: return phys # depends on [control=['if'], data=[]] return None leds = {} for sub in ['/sys/class/leds/' + sub_dir for sub_dir in listdir('/sys/class/leds')]: led_name = sub.split(':')[-1] write_path = sub + '/brightness' device_id = find_device_hardware_id(sub + '/device/uevent') if device_id: if device_id not in leds: leds[device_id] = {} # depends on [control=['if'], data=['device_id', 'leds']] leds[device_id][led_name] = write_path # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['sub']] power = {} for sub in ['/sys/class/power_supply/' + sub_dir for sub_dir in listdir('/sys/class/power_supply')]: read_path = sub + '/capacity' device_id = find_device_hardware_id(sub + '/device/uevent') if device_id: power[device_id] = read_path # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['sub']] return {'leds': leds, 'power': power}
def followers(self, blogname, **kwargs): """ Gets the followers of the given blog :param limit: an int, the number of followers you want returned :param offset: an int, the follower to start at, for pagination. # Start at the 20th blog and get 20 more blogs. client.followers({'offset': 20, 'limit': 20}) :returns: A dict created from the JSON response """ url = "/v2/blog/{}/followers".format(blogname) return self.send_api_request("get", url, kwargs, ['limit', 'offset'])
def function[followers, parameter[self, blogname]]: constant[ Gets the followers of the given blog :param limit: an int, the number of followers you want returned :param offset: an int, the follower to start at, for pagination. # Start at the 20th blog and get 20 more blogs. client.followers({'offset': 20, 'limit': 20}) :returns: A dict created from the JSON response ] variable[url] assign[=] call[constant[/v2/blog/{}/followers].format, parameter[name[blogname]]] return[call[name[self].send_api_request, parameter[constant[get], name[url], name[kwargs], list[[<ast.Constant object at 0x7da2047e80a0>, <ast.Constant object at 0x7da2047e9c00>]]]]]
keyword[def] identifier[followers] ( identifier[self] , identifier[blogname] ,** identifier[kwargs] ): literal[string] identifier[url] = literal[string] . identifier[format] ( identifier[blogname] ) keyword[return] identifier[self] . identifier[send_api_request] ( literal[string] , identifier[url] , identifier[kwargs] ,[ literal[string] , literal[string] ])
def followers(self, blogname, **kwargs): """ Gets the followers of the given blog :param limit: an int, the number of followers you want returned :param offset: an int, the follower to start at, for pagination. # Start at the 20th blog and get 20 more blogs. client.followers({'offset': 20, 'limit': 20}) :returns: A dict created from the JSON response """ url = '/v2/blog/{}/followers'.format(blogname) return self.send_api_request('get', url, kwargs, ['limit', 'offset'])
def rule(self, text): """rule = identifier , "=" , expression , ";" ;""" self._attempting(text) return concatenation([ self.identifier, "=", self.expression, ";", ], ignore_whitespace=True)(text).retyped(TokenType.rule)
def function[rule, parameter[self, text]]: constant[rule = identifier , "=" , expression , ";" ;] call[name[self]._attempting, parameter[name[text]]] return[call[call[call[name[concatenation], parameter[list[[<ast.Attribute object at 0x7da1b013dcc0>, <ast.Constant object at 0x7da1b013fd00>, <ast.Attribute object at 0x7da1b013c490>, <ast.Constant object at 0x7da1b013cdf0>]]]], parameter[name[text]]].retyped, parameter[name[TokenType].rule]]]
keyword[def] identifier[rule] ( identifier[self] , identifier[text] ): literal[string] identifier[self] . identifier[_attempting] ( identifier[text] ) keyword[return] identifier[concatenation] ([ identifier[self] . identifier[identifier] , literal[string] , identifier[self] . identifier[expression] , literal[string] , ], identifier[ignore_whitespace] = keyword[True] )( identifier[text] ). identifier[retyped] ( identifier[TokenType] . identifier[rule] )
def rule(self, text): """rule = identifier , "=" , expression , ";" ;""" self._attempting(text) return concatenation([self.identifier, '=', self.expression, ';'], ignore_whitespace=True)(text).retyped(TokenType.rule)
def to_markdown(self): """Converts to markdown :return: item in markdown format """ if self.type == "text": return self.text elif self.type == "url" or self.type == "image": return "[" + self.text + "](" + self.attributes["ref"] + ")" elif self.type == "title": return "#" * int(self.attributes["size"]) + " " + self.text return None
def function[to_markdown, parameter[self]]: constant[Converts to markdown :return: item in markdown format ] if compare[name[self].type equal[==] constant[text]] begin[:] return[name[self].text] return[constant[None]]
keyword[def] identifier[to_markdown] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[type] == literal[string] : keyword[return] identifier[self] . identifier[text] keyword[elif] identifier[self] . identifier[type] == literal[string] keyword[or] identifier[self] . identifier[type] == literal[string] : keyword[return] literal[string] + identifier[self] . identifier[text] + literal[string] + identifier[self] . identifier[attributes] [ literal[string] ]+ literal[string] keyword[elif] identifier[self] . identifier[type] == literal[string] : keyword[return] literal[string] * identifier[int] ( identifier[self] . identifier[attributes] [ literal[string] ])+ literal[string] + identifier[self] . identifier[text] keyword[return] keyword[None]
def to_markdown(self): """Converts to markdown :return: item in markdown format """ if self.type == 'text': return self.text # depends on [control=['if'], data=[]] elif self.type == 'url' or self.type == 'image': return '[' + self.text + '](' + self.attributes['ref'] + ')' # depends on [control=['if'], data=[]] elif self.type == 'title': return '#' * int(self.attributes['size']) + ' ' + self.text # depends on [control=['if'], data=[]] return None
def run_command(self, args=None, node_paths=None): """Returns a command that when executed will run an arbitury command via package manager.""" return command_gen( self.tool_installations, self.name, args=args, node_paths=node_paths )
def function[run_command, parameter[self, args, node_paths]]: constant[Returns a command that when executed will run an arbitury command via package manager.] return[call[name[command_gen], parameter[name[self].tool_installations, name[self].name]]]
keyword[def] identifier[run_command] ( identifier[self] , identifier[args] = keyword[None] , identifier[node_paths] = keyword[None] ): literal[string] keyword[return] identifier[command_gen] ( identifier[self] . identifier[tool_installations] , identifier[self] . identifier[name] , identifier[args] = identifier[args] , identifier[node_paths] = identifier[node_paths] )
def run_command(self, args=None, node_paths=None): """Returns a command that when executed will run an arbitury command via package manager.""" return command_gen(self.tool_installations, self.name, args=args, node_paths=node_paths)
def set_parallel_value_for_key(self, key, value): """ Set a globally available key and value that can be accessed from all the pabot processes. """ if self._remotelib: self._remotelib.run_keyword('set_parallel_value_for_key', [key, value], {}) else: _PabotLib.set_parallel_value_for_key(self, key, value)
def function[set_parallel_value_for_key, parameter[self, key, value]]: constant[ Set a globally available key and value that can be accessed from all the pabot processes. ] if name[self]._remotelib begin[:] call[name[self]._remotelib.run_keyword, parameter[constant[set_parallel_value_for_key], list[[<ast.Name object at 0x7da1b1600b80>, <ast.Name object at 0x7da1b1600310>]], dictionary[[], []]]]
keyword[def] identifier[set_parallel_value_for_key] ( identifier[self] , identifier[key] , identifier[value] ): literal[string] keyword[if] identifier[self] . identifier[_remotelib] : identifier[self] . identifier[_remotelib] . identifier[run_keyword] ( literal[string] , [ identifier[key] , identifier[value] ],{}) keyword[else] : identifier[_PabotLib] . identifier[set_parallel_value_for_key] ( identifier[self] , identifier[key] , identifier[value] )
def set_parallel_value_for_key(self, key, value): """ Set a globally available key and value that can be accessed from all the pabot processes. """ if self._remotelib: self._remotelib.run_keyword('set_parallel_value_for_key', [key, value], {}) # depends on [control=['if'], data=[]] else: _PabotLib.set_parallel_value_for_key(self, key, value)
def encode(self, message): '''Encode a message when publishing.''' if not isinstance(message, dict): message = {'message': message} message['time'] = time.time() return json.dumps(message)
def function[encode, parameter[self, message]]: constant[Encode a message when publishing.] if <ast.UnaryOp object at 0x7da18c4cc370> begin[:] variable[message] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cf850>], [<ast.Name object at 0x7da18c4cfca0>]] call[name[message]][constant[time]] assign[=] call[name[time].time, parameter[]] return[call[name[json].dumps, parameter[name[message]]]]
keyword[def] identifier[encode] ( identifier[self] , identifier[message] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[message] , identifier[dict] ): identifier[message] ={ literal[string] : identifier[message] } identifier[message] [ literal[string] ]= identifier[time] . identifier[time] () keyword[return] identifier[json] . identifier[dumps] ( identifier[message] )
def encode(self, message): """Encode a message when publishing.""" if not isinstance(message, dict): message = {'message': message} # depends on [control=['if'], data=[]] message['time'] = time.time() return json.dumps(message)
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Read the data encoding the ProtocolVersion struct and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if either the major or minor protocol versions are missing from the encoding. """ super(ProtocolVersion, self).read( input_stream, kmip_version=kmip_version ) local_stream = utils.BytearrayStream(input_stream.read(self.length)) if self.is_tag_next(enums.Tags.PROTOCOL_VERSION_MAJOR, local_stream): self._major = primitives.Integer( tag=enums.Tags.PROTOCOL_VERSION_MAJOR ) self._major.read(local_stream, kmip_version=kmip_version) else: raise ValueError( "Invalid encoding missing the major protocol version number." ) if self.is_tag_next(enums.Tags.PROTOCOL_VERSION_MINOR, local_stream): self._minor = primitives.Integer( tag=enums.Tags.PROTOCOL_VERSION_MINOR ) self._minor.read(local_stream, kmip_version=kmip_version) else: raise ValueError( "Invalid encoding missing the minor protocol version number." ) self.is_oversized(local_stream)
def function[read, parameter[self, input_stream, kmip_version]]: constant[ Read the data encoding the ProtocolVersion struct and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if either the major or minor protocol versions are missing from the encoding. ] call[call[name[super], parameter[name[ProtocolVersion], name[self]]].read, parameter[name[input_stream]]] variable[local_stream] assign[=] call[name[utils].BytearrayStream, parameter[call[name[input_stream].read, parameter[name[self].length]]]] if call[name[self].is_tag_next, parameter[name[enums].Tags.PROTOCOL_VERSION_MAJOR, name[local_stream]]] begin[:] name[self]._major assign[=] call[name[primitives].Integer, parameter[]] call[name[self]._major.read, parameter[name[local_stream]]] if call[name[self].is_tag_next, parameter[name[enums].Tags.PROTOCOL_VERSION_MINOR, name[local_stream]]] begin[:] name[self]._minor assign[=] call[name[primitives].Integer, parameter[]] call[name[self]._minor.read, parameter[name[local_stream]]] call[name[self].is_oversized, parameter[name[local_stream]]]
keyword[def] identifier[read] ( identifier[self] , identifier[input_stream] , identifier[kmip_version] = identifier[enums] . identifier[KMIPVersion] . identifier[KMIP_1_0] ): literal[string] identifier[super] ( identifier[ProtocolVersion] , identifier[self] ). identifier[read] ( identifier[input_stream] , identifier[kmip_version] = identifier[kmip_version] ) identifier[local_stream] = identifier[utils] . identifier[BytearrayStream] ( identifier[input_stream] . identifier[read] ( identifier[self] . identifier[length] )) keyword[if] identifier[self] . identifier[is_tag_next] ( identifier[enums] . identifier[Tags] . identifier[PROTOCOL_VERSION_MAJOR] , identifier[local_stream] ): identifier[self] . identifier[_major] = identifier[primitives] . identifier[Integer] ( identifier[tag] = identifier[enums] . identifier[Tags] . identifier[PROTOCOL_VERSION_MAJOR] ) identifier[self] . identifier[_major] . identifier[read] ( identifier[local_stream] , identifier[kmip_version] = identifier[kmip_version] ) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[self] . identifier[is_tag_next] ( identifier[enums] . identifier[Tags] . identifier[PROTOCOL_VERSION_MINOR] , identifier[local_stream] ): identifier[self] . identifier[_minor] = identifier[primitives] . identifier[Integer] ( identifier[tag] = identifier[enums] . identifier[Tags] . identifier[PROTOCOL_VERSION_MINOR] ) identifier[self] . identifier[_minor] . identifier[read] ( identifier[local_stream] , identifier[kmip_version] = identifier[kmip_version] ) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[self] . identifier[is_oversized] ( identifier[local_stream] )
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Read the data encoding the ProtocolVersion struct and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if either the major or minor protocol versions are missing from the encoding. """ super(ProtocolVersion, self).read(input_stream, kmip_version=kmip_version) local_stream = utils.BytearrayStream(input_stream.read(self.length)) if self.is_tag_next(enums.Tags.PROTOCOL_VERSION_MAJOR, local_stream): self._major = primitives.Integer(tag=enums.Tags.PROTOCOL_VERSION_MAJOR) self._major.read(local_stream, kmip_version=kmip_version) # depends on [control=['if'], data=[]] else: raise ValueError('Invalid encoding missing the major protocol version number.') if self.is_tag_next(enums.Tags.PROTOCOL_VERSION_MINOR, local_stream): self._minor = primitives.Integer(tag=enums.Tags.PROTOCOL_VERSION_MINOR) self._minor.read(local_stream, kmip_version=kmip_version) # depends on [control=['if'], data=[]] else: raise ValueError('Invalid encoding missing the minor protocol version number.') self.is_oversized(local_stream)
def from_string(cls, constraint): """ :param str constraint: The string representation of a constraint :rtype: :class:`MarathonConstraint` """ obj = constraint.split(':') marathon_constraint = cls.from_json(obj) if marathon_constraint: return marathon_constraint raise ValueError("Invalid string format. " "Expected `field:operator:value`")
def function[from_string, parameter[cls, constraint]]: constant[ :param str constraint: The string representation of a constraint :rtype: :class:`MarathonConstraint` ] variable[obj] assign[=] call[name[constraint].split, parameter[constant[:]]] variable[marathon_constraint] assign[=] call[name[cls].from_json, parameter[name[obj]]] if name[marathon_constraint] begin[:] return[name[marathon_constraint]] <ast.Raise object at 0x7da1b0f5b310>
keyword[def] identifier[from_string] ( identifier[cls] , identifier[constraint] ): literal[string] identifier[obj] = identifier[constraint] . identifier[split] ( literal[string] ) identifier[marathon_constraint] = identifier[cls] . identifier[from_json] ( identifier[obj] ) keyword[if] identifier[marathon_constraint] : keyword[return] identifier[marathon_constraint] keyword[raise] identifier[ValueError] ( literal[string] literal[string] )
def from_string(cls, constraint): """ :param str constraint: The string representation of a constraint :rtype: :class:`MarathonConstraint` """ obj = constraint.split(':') marathon_constraint = cls.from_json(obj) if marathon_constraint: return marathon_constraint # depends on [control=['if'], data=[]] raise ValueError('Invalid string format. Expected `field:operator:value`')
def absent(self, name, rdtype=None): """Require that an owner name (and optionally an rdata type) does not exist as a prerequisite to the execution of the update.""" if isinstance(name, (str, unicode)): name = dns.name.from_text(name, None) if rdtype is None: rrset = self.find_rrset(self.answer, name, dns.rdataclass.NONE, dns.rdatatype.ANY, dns.rdatatype.NONE, None, True, True) else: if isinstance(rdtype, (str, unicode)): rdtype = dns.rdatatype.from_text(rdtype) rrset = self.find_rrset(self.answer, name, dns.rdataclass.NONE, rdtype, dns.rdatatype.NONE, None, True, True)
def function[absent, parameter[self, name, rdtype]]: constant[Require that an owner name (and optionally an rdata type) does not exist as a prerequisite to the execution of the update.] if call[name[isinstance], parameter[name[name], tuple[[<ast.Name object at 0x7da1b0805030>, <ast.Name object at 0x7da1b0805750>]]]] begin[:] variable[name] assign[=] call[name[dns].name.from_text, parameter[name[name], constant[None]]] if compare[name[rdtype] is constant[None]] begin[:] variable[rrset] assign[=] call[name[self].find_rrset, parameter[name[self].answer, name[name], name[dns].rdataclass.NONE, name[dns].rdatatype.ANY, name[dns].rdatatype.NONE, constant[None], constant[True], constant[True]]]
keyword[def] identifier[absent] ( identifier[self] , identifier[name] , identifier[rdtype] = keyword[None] ): literal[string] keyword[if] identifier[isinstance] ( identifier[name] ,( identifier[str] , identifier[unicode] )): identifier[name] = identifier[dns] . identifier[name] . identifier[from_text] ( identifier[name] , keyword[None] ) keyword[if] identifier[rdtype] keyword[is] keyword[None] : identifier[rrset] = identifier[self] . identifier[find_rrset] ( identifier[self] . identifier[answer] , identifier[name] , identifier[dns] . identifier[rdataclass] . identifier[NONE] , identifier[dns] . identifier[rdatatype] . identifier[ANY] , identifier[dns] . identifier[rdatatype] . identifier[NONE] , keyword[None] , keyword[True] , keyword[True] ) keyword[else] : keyword[if] identifier[isinstance] ( identifier[rdtype] ,( identifier[str] , identifier[unicode] )): identifier[rdtype] = identifier[dns] . identifier[rdatatype] . identifier[from_text] ( identifier[rdtype] ) identifier[rrset] = identifier[self] . identifier[find_rrset] ( identifier[self] . identifier[answer] , identifier[name] , identifier[dns] . identifier[rdataclass] . identifier[NONE] , identifier[rdtype] , identifier[dns] . identifier[rdatatype] . identifier[NONE] , keyword[None] , keyword[True] , keyword[True] )
def absent(self, name, rdtype=None): """Require that an owner name (and optionally an rdata type) does not exist as a prerequisite to the execution of the update.""" if isinstance(name, (str, unicode)): name = dns.name.from_text(name, None) # depends on [control=['if'], data=[]] if rdtype is None: rrset = self.find_rrset(self.answer, name, dns.rdataclass.NONE, dns.rdatatype.ANY, dns.rdatatype.NONE, None, True, True) # depends on [control=['if'], data=[]] else: if isinstance(rdtype, (str, unicode)): rdtype = dns.rdatatype.from_text(rdtype) # depends on [control=['if'], data=[]] rrset = self.find_rrset(self.answer, name, dns.rdataclass.NONE, rdtype, dns.rdatatype.NONE, None, True, True)
def login(request, template="accounts/account_login.html", form_class=LoginForm, extra_context=None): """ Login form. """ form = form_class(request.POST or None) if request.method == "POST" and form.is_valid(): authenticated_user = form.save() info(request, _("Successfully logged in")) auth_login(request, authenticated_user) return login_redirect(request) context = {"form": form, "title": _("Log in")} context.update(extra_context or {}) return TemplateResponse(request, template, context)
def function[login, parameter[request, template, form_class, extra_context]]: constant[ Login form. ] variable[form] assign[=] call[name[form_class], parameter[<ast.BoolOp object at 0x7da18f09edd0>]] if <ast.BoolOp object at 0x7da18f09c9a0> begin[:] variable[authenticated_user] assign[=] call[name[form].save, parameter[]] call[name[info], parameter[name[request], call[name[_], parameter[constant[Successfully logged in]]]]] call[name[auth_login], parameter[name[request], name[authenticated_user]]] return[call[name[login_redirect], parameter[name[request]]]] variable[context] assign[=] dictionary[[<ast.Constant object at 0x7da204620f40>, <ast.Constant object at 0x7da204622a40>], [<ast.Name object at 0x7da204622980>, <ast.Call object at 0x7da204621f30>]] call[name[context].update, parameter[<ast.BoolOp object at 0x7da204621e40>]] return[call[name[TemplateResponse], parameter[name[request], name[template], name[context]]]]
keyword[def] identifier[login] ( identifier[request] , identifier[template] = literal[string] , identifier[form_class] = identifier[LoginForm] , identifier[extra_context] = keyword[None] ): literal[string] identifier[form] = identifier[form_class] ( identifier[request] . identifier[POST] keyword[or] keyword[None] ) keyword[if] identifier[request] . identifier[method] == literal[string] keyword[and] identifier[form] . identifier[is_valid] (): identifier[authenticated_user] = identifier[form] . identifier[save] () identifier[info] ( identifier[request] , identifier[_] ( literal[string] )) identifier[auth_login] ( identifier[request] , identifier[authenticated_user] ) keyword[return] identifier[login_redirect] ( identifier[request] ) identifier[context] ={ literal[string] : identifier[form] , literal[string] : identifier[_] ( literal[string] )} identifier[context] . identifier[update] ( identifier[extra_context] keyword[or] {}) keyword[return] identifier[TemplateResponse] ( identifier[request] , identifier[template] , identifier[context] )
def login(request, template='accounts/account_login.html', form_class=LoginForm, extra_context=None): """ Login form. """ form = form_class(request.POST or None) if request.method == 'POST' and form.is_valid(): authenticated_user = form.save() info(request, _('Successfully logged in')) auth_login(request, authenticated_user) return login_redirect(request) # depends on [control=['if'], data=[]] context = {'form': form, 'title': _('Log in')} context.update(extra_context or {}) return TemplateResponse(request, template, context)
def _load( self, fn ): """Retrieve the notebook from the given file. :param fn: the file name""" # if file is empty, create an empty notebook if os.path.getsize(fn) == 0: self._description = None self._results = dict() self._pending = dict() else: # load the JSON object with open(fn, "r") as f: s = f.read() # parse back into appropriate variables j = json.loads(s) self._description = j['description'] self._pending = dict(j['pending']) self._results = j['results'] # perform any post-load patching self.patch()
def function[_load, parameter[self, fn]]: constant[Retrieve the notebook from the given file. :param fn: the file name] if compare[call[name[os].path.getsize, parameter[name[fn]]] equal[==] constant[0]] begin[:] name[self]._description assign[=] constant[None] name[self]._results assign[=] call[name[dict], parameter[]] name[self]._pending assign[=] call[name[dict], parameter[]]
keyword[def] identifier[_load] ( identifier[self] , identifier[fn] ): literal[string] keyword[if] identifier[os] . identifier[path] . identifier[getsize] ( identifier[fn] )== literal[int] : identifier[self] . identifier[_description] = keyword[None] identifier[self] . identifier[_results] = identifier[dict] () identifier[self] . identifier[_pending] = identifier[dict] () keyword[else] : keyword[with] identifier[open] ( identifier[fn] , literal[string] ) keyword[as] identifier[f] : identifier[s] = identifier[f] . identifier[read] () identifier[j] = identifier[json] . identifier[loads] ( identifier[s] ) identifier[self] . identifier[_description] = identifier[j] [ literal[string] ] identifier[self] . identifier[_pending] = identifier[dict] ( identifier[j] [ literal[string] ]) identifier[self] . identifier[_results] = identifier[j] [ literal[string] ] identifier[self] . identifier[patch] ()
def _load(self, fn): """Retrieve the notebook from the given file. :param fn: the file name""" # if file is empty, create an empty notebook if os.path.getsize(fn) == 0: self._description = None self._results = dict() self._pending = dict() # depends on [control=['if'], data=[]] else: # load the JSON object with open(fn, 'r') as f: s = f.read() # parse back into appropriate variables j = json.loads(s) self._description = j['description'] self._pending = dict(j['pending']) self._results = j['results'] # perform any post-load patching self.patch() # depends on [control=['with'], data=['f']]
def from_pubkey(cls, pubkey, compressed=True, version=56, prefix=None): # Ensure this is a public key pubkey = PublicKey(pubkey, prefix=prefix or Prefix.prefix) if compressed: pubkey_plain = pubkey.compressed() else: pubkey_plain = pubkey.uncompressed() """ Derive address using ``RIPEMD160(SHA512(x))`` """ addressbin = ripemd160(hashlib.sha512(unhexlify(pubkey_plain)).hexdigest()) result = Base58(hexlify(addressbin).decode("ascii")) return cls(result, prefix=pubkey.prefix)
def function[from_pubkey, parameter[cls, pubkey, compressed, version, prefix]]: variable[pubkey] assign[=] call[name[PublicKey], parameter[name[pubkey]]] if name[compressed] begin[:] variable[pubkey_plain] assign[=] call[name[pubkey].compressed, parameter[]] constant[ Derive address using ``RIPEMD160(SHA512(x))`` ] variable[addressbin] assign[=] call[name[ripemd160], parameter[call[call[name[hashlib].sha512, parameter[call[name[unhexlify], parameter[name[pubkey_plain]]]]].hexdigest, parameter[]]]] variable[result] assign[=] call[name[Base58], parameter[call[call[name[hexlify], parameter[name[addressbin]]].decode, parameter[constant[ascii]]]]] return[call[name[cls], parameter[name[result]]]]
keyword[def] identifier[from_pubkey] ( identifier[cls] , identifier[pubkey] , identifier[compressed] = keyword[True] , identifier[version] = literal[int] , identifier[prefix] = keyword[None] ): identifier[pubkey] = identifier[PublicKey] ( identifier[pubkey] , identifier[prefix] = identifier[prefix] keyword[or] identifier[Prefix] . identifier[prefix] ) keyword[if] identifier[compressed] : identifier[pubkey_plain] = identifier[pubkey] . identifier[compressed] () keyword[else] : identifier[pubkey_plain] = identifier[pubkey] . identifier[uncompressed] () literal[string] identifier[addressbin] = identifier[ripemd160] ( identifier[hashlib] . identifier[sha512] ( identifier[unhexlify] ( identifier[pubkey_plain] )). identifier[hexdigest] ()) identifier[result] = identifier[Base58] ( identifier[hexlify] ( identifier[addressbin] ). identifier[decode] ( literal[string] )) keyword[return] identifier[cls] ( identifier[result] , identifier[prefix] = identifier[pubkey] . identifier[prefix] )
def from_pubkey(cls, pubkey, compressed=True, version=56, prefix=None): # Ensure this is a public key pubkey = PublicKey(pubkey, prefix=prefix or Prefix.prefix) if compressed: pubkey_plain = pubkey.compressed() # depends on [control=['if'], data=[]] else: pubkey_plain = pubkey.uncompressed() ' Derive address using ``RIPEMD160(SHA512(x))`` ' addressbin = ripemd160(hashlib.sha512(unhexlify(pubkey_plain)).hexdigest()) result = Base58(hexlify(addressbin).decode('ascii')) return cls(result, prefix=pubkey.prefix)
def locate_package_json(): """ Find and return the location of package.json. """ directory = settings.SYSTEMJS_PACKAGE_JSON_DIR if not directory: raise ImproperlyConfigured( "Could not locate 'package.json'. Set SYSTEMJS_PACKAGE_JSON_DIR " "to the directory that holds 'package.json'." ) path = os.path.join(directory, 'package.json') if not os.path.isfile(path): raise ImproperlyConfigured("'package.json' does not exist, tried looking in %s" % path) return path
def function[locate_package_json, parameter[]]: constant[ Find and return the location of package.json. ] variable[directory] assign[=] name[settings].SYSTEMJS_PACKAGE_JSON_DIR if <ast.UnaryOp object at 0x7da1b025fd30> begin[:] <ast.Raise object at 0x7da1b025d8d0> variable[path] assign[=] call[name[os].path.join, parameter[name[directory], constant[package.json]]] if <ast.UnaryOp object at 0x7da1b025d4b0> begin[:] <ast.Raise object at 0x7da1b025ed10> return[name[path]]
keyword[def] identifier[locate_package_json] (): literal[string] identifier[directory] = identifier[settings] . identifier[SYSTEMJS_PACKAGE_JSON_DIR] keyword[if] keyword[not] identifier[directory] : keyword[raise] identifier[ImproperlyConfigured] ( literal[string] literal[string] ) identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , literal[string] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[path] ): keyword[raise] identifier[ImproperlyConfigured] ( literal[string] % identifier[path] ) keyword[return] identifier[path]
def locate_package_json(): """ Find and return the location of package.json. """ directory = settings.SYSTEMJS_PACKAGE_JSON_DIR if not directory: raise ImproperlyConfigured("Could not locate 'package.json'. Set SYSTEMJS_PACKAGE_JSON_DIR to the directory that holds 'package.json'.") # depends on [control=['if'], data=[]] path = os.path.join(directory, 'package.json') if not os.path.isfile(path): raise ImproperlyConfigured("'package.json' does not exist, tried looking in %s" % path) # depends on [control=['if'], data=[]] return path