code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def _issuer(self, entityid=None): """ Return an Issuer instance """ if entityid: if isinstance(entityid, Issuer): return entityid else: return Issuer(text=entityid, format=NAMEID_FORMAT_ENTITY) else: return Issuer(text=self.config.entityid, format=NAMEID_FORMAT_ENTITY)
def function[_issuer, parameter[self, entityid]]: constant[ Return an Issuer instance ] if name[entityid] begin[:] if call[name[isinstance], parameter[name[entityid], name[Issuer]]] begin[:] return[name[entityid]]
keyword[def] identifier[_issuer] ( identifier[self] , identifier[entityid] = keyword[None] ): literal[string] keyword[if] identifier[entityid] : keyword[if] identifier[isinstance] ( identifier[entityid] , identifier[Issuer] ): keyword[return] identifier[entityid] keyword[else] : keyword[return] identifier[Issuer] ( identifier[text] = identifier[entityid] , identifier[format] = identifier[NAMEID_FORMAT_ENTITY] ) keyword[else] : keyword[return] identifier[Issuer] ( identifier[text] = identifier[self] . identifier[config] . identifier[entityid] , identifier[format] = identifier[NAMEID_FORMAT_ENTITY] )
def _issuer(self, entityid=None): """ Return an Issuer instance """ if entityid: if isinstance(entityid, Issuer): return entityid # depends on [control=['if'], data=[]] else: return Issuer(text=entityid, format=NAMEID_FORMAT_ENTITY) # depends on [control=['if'], data=[]] else: return Issuer(text=self.config.entityid, format=NAMEID_FORMAT_ENTITY)
def R(self,*args,**kwargs): """ NAME: R PURPOSE: return cylindrical radius at time t INPUT: t - (optional) time at which to get the radius ro= (Object-wide default) physical scale for distances to use to convert use_physical= use to override Object-wide default for using a physical scale for output OUTPUT: R(t) HISTORY: 2010-09-21 - Written - Bovy (NYU) """ thiso= self(*args,**kwargs) onet= (len(thiso.shape) == 1) if onet: return thiso[0] else: return thiso[0,:]
def function[R, parameter[self]]: constant[ NAME: R PURPOSE: return cylindrical radius at time t INPUT: t - (optional) time at which to get the radius ro= (Object-wide default) physical scale for distances to use to convert use_physical= use to override Object-wide default for using a physical scale for output OUTPUT: R(t) HISTORY: 2010-09-21 - Written - Bovy (NYU) ] variable[thiso] assign[=] call[name[self], parameter[<ast.Starred object at 0x7da1b0ec1f00>]] variable[onet] assign[=] compare[call[name[len], parameter[name[thiso].shape]] equal[==] constant[1]] if name[onet] begin[:] return[call[name[thiso]][constant[0]]]
keyword[def] identifier[R] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[thiso] = identifier[self] (* identifier[args] ,** identifier[kwargs] ) identifier[onet] =( identifier[len] ( identifier[thiso] . identifier[shape] )== literal[int] ) keyword[if] identifier[onet] : keyword[return] identifier[thiso] [ literal[int] ] keyword[else] : keyword[return] identifier[thiso] [ literal[int] ,:]
def R(self, *args, **kwargs): """ NAME: R PURPOSE: return cylindrical radius at time t INPUT: t - (optional) time at which to get the radius ro= (Object-wide default) physical scale for distances to use to convert use_physical= use to override Object-wide default for using a physical scale for output OUTPUT: R(t) HISTORY: 2010-09-21 - Written - Bovy (NYU) """ thiso = self(*args, **kwargs) onet = len(thiso.shape) == 1 if onet: return thiso[0] # depends on [control=['if'], data=[]] else: return thiso[0, :]
def generate_network(nl_model, handler, seed=1234, always_include_props=False, include_connections=True, include_inputs=True, base_dir=None): """ Generate the network model as described in NeuroMLlite in a specific handler, e.g. NeuroMLHandler, PyNNHandler, etc. """ pop_locations = {} cell_objects = {} synapse_objects = {} print_v("Starting net generation for %s%s..." % (nl_model.id, ' (base dir: %s)' % base_dir if base_dir else '')) rng = random.Random(seed) if nl_model.network_reader: exec('from neuromllite.%s import %s' % (nl_model.network_reader.type, nl_model.network_reader.type)) exec('network_reader = %s()' % (nl_model.network_reader.type)) network_reader.parameters = nl_model.network_reader.parameters network_reader.parse(handler) pop_locations = network_reader.get_locations() else: notes = "Generated network: %s" % nl_model.id notes += "\n Generation seed: %i" % (seed) if nl_model.parameters: notes += "\n NeuroMLlite parameters: " for p in nl_model.parameters: notes += "\n %s = %s" % (p, nl_model.parameters[p]) handler.handle_document_start(nl_model.id, notes) temperature = '%sdegC' % nl_model.temperature if nl_model.temperature else None handler.handle_network(nl_model.id, nl_model.notes, temperature=temperature) nml2_doc_temp = _extract_pynn_components_to_neuroml(nl_model) for c in nl_model.cells: if c.neuroml2_source_file: from pyneuroml import pynml nml2_doc = pynml.read_neuroml2_file(_locate_file(c.neuroml2_source_file, base_dir), include_includes=True) cell_objects[c.id] = nml2_doc.get_by_id(c.id) if c.pynn_cell: cell_objects[c.id] = nml2_doc_temp.get_by_id(c.id) for s in nl_model.synapses: if s.neuroml2_source_file: from pyneuroml import pynml nml2_doc = pynml.read_neuroml2_file(_locate_file(s.neuroml2_source_file, base_dir), include_includes=True) synapse_objects[s.id] = nml2_doc.get_by_id(s.id) if s.pynn_synapse: synapse_objects[s.id] = nml2_doc_temp.get_by_id(s.id) for p in nl_model.populations: size = evaluate(p.size, nl_model.parameters) properties = p.properties if p.properties else {} if p.random_layout: properties['region'] = p.random_layout.region if not p.random_layout and not p.single_location and not always_include_props: # If there are no positions (abstract network), and <property> # is added to <population>, jLems doesn't like it... (it has difficulty # interpreting pop0[0]/v, etc.) # So better not to give properties... properties = {} if p.notes: handler.handle_population(p.id, p.component, size, cell_objects[p.component] if p.component in cell_objects else None, properties=properties, notes=p.notes) else: handler.handle_population(p.id, p.component, size, cell_objects[p.component] if p.component in cell_objects else None, properties=properties) pop_locations[p.id] = np.zeros((size, 3)) for i in range(size): if p.random_layout: region = nl_model.get_child(p.random_layout.region, 'regions') x = region.x + rng.random() * region.width y = region.y + rng.random() * region.height z = region.z + rng.random() * region.depth pop_locations[p.id][i] = (x, y, z) handler.handle_location(i, p.id, p.component, x, y, z) if p.single_location: loc = p.single_location.location x = loc.x y = loc.y z = loc.z pop_locations[p.id][i] = (x, y, z) handler.handle_location(i, p.id, p.component, x, y, z) if hasattr(handler, 'finalise_population'): handler.finalise_population(p.id) if include_connections: for p in nl_model.projections: type = p.type if p.type else 'projection' handler.handle_projection(p.id, p.presynaptic, p.postsynaptic, p.synapse, synapse_obj=synapse_objects[p.synapse] if p.synapse in synapse_objects else None, pre_synapse_obj=synapse_objects[p.pre_synapse] if p.pre_synapse in synapse_objects else None, type=type) delay = p.delay if p.delay else 0 weight = p.weight if p.weight else 1 conn_count = 0 if p.random_connectivity: for pre_i in range(len(pop_locations[p.presynaptic])): for post_i in range(len(pop_locations[p.postsynaptic])): flip = rng.random() #print("Is cell %i conn to %i, prob %s - %s"%(pre_i, post_i, flip, p.random_connectivity.probability)) if flip < p.random_connectivity.probability: weight = evaluate(weight, nl_model.parameters) delay = evaluate(delay, nl_model.parameters) #print_v("Adding connection %i with weight: %s, delay: %s"%(conn_count, weight, delay)) handler.handle_connection(p.id, conn_count, p.presynaptic, p.postsynaptic, p.synapse, \ pre_i, \ post_i, \ preSegId=0, \ preFract=0.5, \ postSegId=0, \ postFract=0.5, \ delay=delay, \ weight=weight) conn_count += 1 if p.convergent_connectivity: for post_i in range(len(pop_locations[p.postsynaptic])): for count in range(int(p.convergent_connectivity.num_per_post)): found = False while not found: pre_i = int(rng.random()*len(pop_locations[p.presynaptic])) if p.presynaptic==p.postsynaptic and pre_i==post_i: found=False else: found=True weight = evaluate(weight, nl_model.parameters) delay = evaluate(delay, nl_model.parameters) print_v("Adding connection %i (%i->%i; %i to %s of post) with weight: %s, delay: %s"%(conn_count, pre_i, post_i, count, p.convergent_connectivity.num_per_post, weight, delay)) handler.handle_connection(p.id, conn_count, p.presynaptic, p.postsynaptic, p.synapse, \ pre_i, \ post_i, \ preSegId=0, \ preFract=0.5, \ postSegId=0, \ postFract=0.5, \ delay=delay, \ weight=weight) conn_count += 1 elif p.one_to_one_connector: for i in range(min(len(pop_locations[p.presynaptic]), len(pop_locations[p.postsynaptic]))): weight = evaluate(weight, nl_model.parameters) delay = evaluate(delay, nl_model.parameters) #print_v("Adding connection %i with weight: %s, delay: %s"%(conn_count, weight, delay)) handler.handle_connection(p.id, conn_count, p.presynaptic, p.postsynaptic, p.synapse, \ i, \ i, \ preSegId=0, \ preFract=0.5, \ postSegId=0, \ postFract=0.5, \ delay=delay, \ weight=weight) conn_count += 1 handler.finalise_projection(p.id, p.presynaptic, p.postsynaptic, p.synapse) if include_inputs: for input in nl_model.inputs: handler.handle_input_list(input.id, input.population, input.input_source, size=0, input_comp_obj=None) input_count = 0 for i in range(len(pop_locations[input.population])): flip = rng.random() weight = input.weight if input.weight else 1 if flip * 100. < input.percentage: number_per_cell = evaluate(input.number_per_cell, nl_model.parameters) if input.number_per_cell else 1 for j in range(number_per_cell): handler.handle_single_input(input.id, input_count, i, weight=evaluate(weight, nl_model.parameters)) input_count += 1 handler.finalise_input_source(input.id) if hasattr(handler, 'finalise_document'): handler.finalise_document()
def function[generate_network, parameter[nl_model, handler, seed, always_include_props, include_connections, include_inputs, base_dir]]: constant[ Generate the network model as described in NeuroMLlite in a specific handler, e.g. NeuroMLHandler, PyNNHandler, etc. ] variable[pop_locations] assign[=] dictionary[[], []] variable[cell_objects] assign[=] dictionary[[], []] variable[synapse_objects] assign[=] dictionary[[], []] call[name[print_v], parameter[binary_operation[constant[Starting net generation for %s%s...] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b184b3d0>, <ast.IfExp object at 0x7da1b184b370>]]]]] variable[rng] assign[=] call[name[random].Random, parameter[name[seed]]] if name[nl_model].network_reader begin[:] call[name[exec], parameter[binary_operation[constant[from neuromllite.%s import %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b184af20>, <ast.Attribute object at 0x7da1b184ae90>]]]]] call[name[exec], parameter[binary_operation[constant[network_reader = %s()] <ast.Mod object at 0x7da2590d6920> name[nl_model].network_reader.type]]] name[network_reader].parameters assign[=] name[nl_model].network_reader.parameters call[name[network_reader].parse, parameter[name[handler]]] variable[pop_locations] assign[=] call[name[network_reader].get_locations, parameter[]] variable[nml2_doc_temp] assign[=] call[name[_extract_pynn_components_to_neuroml], parameter[name[nl_model]]] for taget[name[c]] in starred[name[nl_model].cells] begin[:] if name[c].neuroml2_source_file begin[:] from relative_module[pyneuroml] import module[pynml] variable[nml2_doc] assign[=] call[name[pynml].read_neuroml2_file, parameter[call[name[_locate_file], parameter[name[c].neuroml2_source_file, name[base_dir]]]]] call[name[cell_objects]][name[c].id] assign[=] call[name[nml2_doc].get_by_id, parameter[name[c].id]] if name[c].pynn_cell begin[:] call[name[cell_objects]][name[c].id] assign[=] call[name[nml2_doc_temp].get_by_id, parameter[name[c].id]] for taget[name[s]] in starred[name[nl_model].synapses] begin[:] if name[s].neuroml2_source_file begin[:] from relative_module[pyneuroml] import module[pynml] variable[nml2_doc] assign[=] call[name[pynml].read_neuroml2_file, parameter[call[name[_locate_file], parameter[name[s].neuroml2_source_file, name[base_dir]]]]] call[name[synapse_objects]][name[s].id] assign[=] call[name[nml2_doc].get_by_id, parameter[name[s].id]] if name[s].pynn_synapse begin[:] call[name[synapse_objects]][name[s].id] assign[=] call[name[nml2_doc_temp].get_by_id, parameter[name[s].id]] for taget[name[p]] in starred[name[nl_model].populations] begin[:] variable[size] assign[=] call[name[evaluate], parameter[name[p].size, name[nl_model].parameters]] variable[properties] assign[=] <ast.IfExp object at 0x7da1b1848970> if name[p].random_layout begin[:] call[name[properties]][constant[region]] assign[=] name[p].random_layout.region if <ast.BoolOp object at 0x7da1b1848640> begin[:] variable[properties] assign[=] dictionary[[], []] if name[p].notes begin[:] call[name[handler].handle_population, parameter[name[p].id, name[p].component, name[size], <ast.IfExp object at 0x7da1b18481c0>]] call[name[pop_locations]][name[p].id] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b19cb4c0>, <ast.Constant object at 0x7da1b19cb790>]]]] for taget[name[i]] in starred[call[name[range], parameter[name[size]]]] begin[:] if name[p].random_layout begin[:] variable[region] assign[=] call[name[nl_model].get_child, parameter[name[p].random_layout.region, constant[regions]]] variable[x] assign[=] binary_operation[name[region].x + binary_operation[call[name[rng].random, parameter[]] * name[region].width]] variable[y] assign[=] binary_operation[name[region].y + binary_operation[call[name[rng].random, parameter[]] * name[region].height]] variable[z] assign[=] binary_operation[name[region].z + binary_operation[call[name[rng].random, parameter[]] * name[region].depth]] call[call[name[pop_locations]][name[p].id]][name[i]] assign[=] tuple[[<ast.Name object at 0x7da1b195c970>, <ast.Name object at 0x7da1b195c4c0>, <ast.Name object at 0x7da1b195ebc0>]] call[name[handler].handle_location, parameter[name[i], name[p].id, name[p].component, name[x], name[y], name[z]]] if name[p].single_location begin[:] variable[loc] assign[=] name[p].single_location.location variable[x] assign[=] name[loc].x variable[y] assign[=] name[loc].y variable[z] assign[=] name[loc].z call[call[name[pop_locations]][name[p].id]][name[i]] assign[=] tuple[[<ast.Name object at 0x7da1b195d480>, <ast.Name object at 0x7da1b195d8a0>, <ast.Name object at 0x7da1b195f8e0>]] call[name[handler].handle_location, parameter[name[i], name[p].id, name[p].component, name[x], name[y], name[z]]] if call[name[hasattr], parameter[name[handler], constant[finalise_population]]] begin[:] call[name[handler].finalise_population, parameter[name[p].id]] if name[include_connections] begin[:] for taget[name[p]] in starred[name[nl_model].projections] begin[:] variable[type] assign[=] <ast.IfExp object at 0x7da1b195e140> call[name[handler].handle_projection, parameter[name[p].id, name[p].presynaptic, name[p].postsynaptic, name[p].synapse]] variable[delay] assign[=] <ast.IfExp object at 0x7da1b195dde0> variable[weight] assign[=] <ast.IfExp object at 0x7da1b195f340> variable[conn_count] assign[=] constant[0] if name[p].random_connectivity begin[:] for taget[name[pre_i]] in starred[call[name[range], parameter[call[name[len], parameter[call[name[pop_locations]][name[p].presynaptic]]]]]] begin[:] for taget[name[post_i]] in starred[call[name[range], parameter[call[name[len], parameter[call[name[pop_locations]][name[p].postsynaptic]]]]]] begin[:] variable[flip] assign[=] call[name[rng].random, parameter[]] if compare[name[flip] less[<] name[p].random_connectivity.probability] begin[:] variable[weight] assign[=] call[name[evaluate], parameter[name[weight], name[nl_model].parameters]] variable[delay] assign[=] call[name[evaluate], parameter[name[delay], name[nl_model].parameters]] call[name[handler].handle_connection, parameter[name[p].id, name[conn_count], name[p].presynaptic, name[p].postsynaptic, name[p].synapse, name[pre_i], name[post_i]]] <ast.AugAssign object at 0x7da1b195feb0> if name[p].convergent_connectivity begin[:] for taget[name[post_i]] in starred[call[name[range], parameter[call[name[len], parameter[call[name[pop_locations]][name[p].postsynaptic]]]]]] begin[:] for taget[name[count]] in starred[call[name[range], parameter[call[name[int], parameter[name[p].convergent_connectivity.num_per_post]]]]] begin[:] variable[found] assign[=] constant[False] while <ast.UnaryOp object at 0x7da1b195c1f0> begin[:] variable[pre_i] assign[=] call[name[int], parameter[binary_operation[call[name[rng].random, parameter[]] * call[name[len], parameter[call[name[pop_locations]][name[p].presynaptic]]]]]] if <ast.BoolOp object at 0x7da1b195db40> begin[:] variable[found] assign[=] constant[False] variable[weight] assign[=] call[name[evaluate], parameter[name[weight], name[nl_model].parameters]] variable[delay] assign[=] call[name[evaluate], parameter[name[delay], name[nl_model].parameters]] call[name[print_v], parameter[binary_operation[constant[Adding connection %i (%i->%i; %i to %s of post) with weight: %s, delay: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1971810>, <ast.Name object at 0x7da1b19716c0>, <ast.Name object at 0x7da1b1970190>, <ast.Name object at 0x7da1b19713f0>, <ast.Attribute object at 0x7da1b1971840>, <ast.Name object at 0x7da1b1970760>, <ast.Name object at 0x7da1b19701c0>]]]]] call[name[handler].handle_connection, parameter[name[p].id, name[conn_count], name[p].presynaptic, name[p].postsynaptic, name[p].synapse, name[pre_i], name[post_i]]] <ast.AugAssign object at 0x7da1b1971f90> call[name[handler].finalise_projection, parameter[name[p].id, name[p].presynaptic, name[p].postsynaptic, name[p].synapse]] if name[include_inputs] begin[:] for taget[name[input]] in starred[name[nl_model].inputs] begin[:] call[name[handler].handle_input_list, parameter[name[input].id, name[input].population, name[input].input_source]] variable[input_count] assign[=] constant[0] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[call[name[pop_locations]][name[input].population]]]]]] begin[:] variable[flip] assign[=] call[name[rng].random, parameter[]] variable[weight] assign[=] <ast.IfExp object at 0x7da1b1968b80> if compare[binary_operation[name[flip] * constant[100.0]] less[<] name[input].percentage] begin[:] variable[number_per_cell] assign[=] <ast.IfExp object at 0x7da1b196bf10> for taget[name[j]] in starred[call[name[range], parameter[name[number_per_cell]]]] begin[:] call[name[handler].handle_single_input, parameter[name[input].id, name[input_count], name[i]]] <ast.AugAssign object at 0x7da1b196beb0> call[name[handler].finalise_input_source, parameter[name[input].id]] if call[name[hasattr], parameter[name[handler], constant[finalise_document]]] begin[:] call[name[handler].finalise_document, parameter[]]
keyword[def] identifier[generate_network] ( identifier[nl_model] , identifier[handler] , identifier[seed] = literal[int] , identifier[always_include_props] = keyword[False] , identifier[include_connections] = keyword[True] , identifier[include_inputs] = keyword[True] , identifier[base_dir] = keyword[None] ): literal[string] identifier[pop_locations] ={} identifier[cell_objects] ={} identifier[synapse_objects] ={} identifier[print_v] ( literal[string] %( identifier[nl_model] . identifier[id] , literal[string] % identifier[base_dir] keyword[if] identifier[base_dir] keyword[else] literal[string] )) identifier[rng] = identifier[random] . identifier[Random] ( identifier[seed] ) keyword[if] identifier[nl_model] . identifier[network_reader] : identifier[exec] ( literal[string] %( identifier[nl_model] . identifier[network_reader] . identifier[type] , identifier[nl_model] . identifier[network_reader] . identifier[type] )) identifier[exec] ( literal[string] %( identifier[nl_model] . identifier[network_reader] . identifier[type] )) identifier[network_reader] . identifier[parameters] = identifier[nl_model] . identifier[network_reader] . identifier[parameters] identifier[network_reader] . identifier[parse] ( identifier[handler] ) identifier[pop_locations] = identifier[network_reader] . identifier[get_locations] () keyword[else] : identifier[notes] = literal[string] % identifier[nl_model] . identifier[id] identifier[notes] += literal[string] %( identifier[seed] ) keyword[if] identifier[nl_model] . identifier[parameters] : identifier[notes] += literal[string] keyword[for] identifier[p] keyword[in] identifier[nl_model] . identifier[parameters] : identifier[notes] += literal[string] %( identifier[p] , identifier[nl_model] . identifier[parameters] [ identifier[p] ]) identifier[handler] . identifier[handle_document_start] ( identifier[nl_model] . identifier[id] , identifier[notes] ) identifier[temperature] = literal[string] % identifier[nl_model] . identifier[temperature] keyword[if] identifier[nl_model] . identifier[temperature] keyword[else] keyword[None] identifier[handler] . identifier[handle_network] ( identifier[nl_model] . identifier[id] , identifier[nl_model] . identifier[notes] , identifier[temperature] = identifier[temperature] ) identifier[nml2_doc_temp] = identifier[_extract_pynn_components_to_neuroml] ( identifier[nl_model] ) keyword[for] identifier[c] keyword[in] identifier[nl_model] . identifier[cells] : keyword[if] identifier[c] . identifier[neuroml2_source_file] : keyword[from] identifier[pyneuroml] keyword[import] identifier[pynml] identifier[nml2_doc] = identifier[pynml] . identifier[read_neuroml2_file] ( identifier[_locate_file] ( identifier[c] . identifier[neuroml2_source_file] , identifier[base_dir] ), identifier[include_includes] = keyword[True] ) identifier[cell_objects] [ identifier[c] . identifier[id] ]= identifier[nml2_doc] . identifier[get_by_id] ( identifier[c] . identifier[id] ) keyword[if] identifier[c] . identifier[pynn_cell] : identifier[cell_objects] [ identifier[c] . identifier[id] ]= identifier[nml2_doc_temp] . identifier[get_by_id] ( identifier[c] . identifier[id] ) keyword[for] identifier[s] keyword[in] identifier[nl_model] . identifier[synapses] : keyword[if] identifier[s] . identifier[neuroml2_source_file] : keyword[from] identifier[pyneuroml] keyword[import] identifier[pynml] identifier[nml2_doc] = identifier[pynml] . identifier[read_neuroml2_file] ( identifier[_locate_file] ( identifier[s] . identifier[neuroml2_source_file] , identifier[base_dir] ), identifier[include_includes] = keyword[True] ) identifier[synapse_objects] [ identifier[s] . identifier[id] ]= identifier[nml2_doc] . identifier[get_by_id] ( identifier[s] . identifier[id] ) keyword[if] identifier[s] . identifier[pynn_synapse] : identifier[synapse_objects] [ identifier[s] . identifier[id] ]= identifier[nml2_doc_temp] . identifier[get_by_id] ( identifier[s] . identifier[id] ) keyword[for] identifier[p] keyword[in] identifier[nl_model] . identifier[populations] : identifier[size] = identifier[evaluate] ( identifier[p] . identifier[size] , identifier[nl_model] . identifier[parameters] ) identifier[properties] = identifier[p] . identifier[properties] keyword[if] identifier[p] . identifier[properties] keyword[else] {} keyword[if] identifier[p] . identifier[random_layout] : identifier[properties] [ literal[string] ]= identifier[p] . identifier[random_layout] . identifier[region] keyword[if] keyword[not] identifier[p] . identifier[random_layout] keyword[and] keyword[not] identifier[p] . identifier[single_location] keyword[and] keyword[not] identifier[always_include_props] : identifier[properties] ={} keyword[if] identifier[p] . identifier[notes] : identifier[handler] . identifier[handle_population] ( identifier[p] . identifier[id] , identifier[p] . identifier[component] , identifier[size] , identifier[cell_objects] [ identifier[p] . identifier[component] ] keyword[if] identifier[p] . identifier[component] keyword[in] identifier[cell_objects] keyword[else] keyword[None] , identifier[properties] = identifier[properties] , identifier[notes] = identifier[p] . identifier[notes] ) keyword[else] : identifier[handler] . identifier[handle_population] ( identifier[p] . identifier[id] , identifier[p] . identifier[component] , identifier[size] , identifier[cell_objects] [ identifier[p] . identifier[component] ] keyword[if] identifier[p] . identifier[component] keyword[in] identifier[cell_objects] keyword[else] keyword[None] , identifier[properties] = identifier[properties] ) identifier[pop_locations] [ identifier[p] . identifier[id] ]= identifier[np] . identifier[zeros] (( identifier[size] , literal[int] )) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[size] ): keyword[if] identifier[p] . identifier[random_layout] : identifier[region] = identifier[nl_model] . identifier[get_child] ( identifier[p] . identifier[random_layout] . identifier[region] , literal[string] ) identifier[x] = identifier[region] . identifier[x] + identifier[rng] . identifier[random] ()* identifier[region] . identifier[width] identifier[y] = identifier[region] . identifier[y] + identifier[rng] . identifier[random] ()* identifier[region] . identifier[height] identifier[z] = identifier[region] . identifier[z] + identifier[rng] . identifier[random] ()* identifier[region] . identifier[depth] identifier[pop_locations] [ identifier[p] . identifier[id] ][ identifier[i] ]=( identifier[x] , identifier[y] , identifier[z] ) identifier[handler] . identifier[handle_location] ( identifier[i] , identifier[p] . identifier[id] , identifier[p] . identifier[component] , identifier[x] , identifier[y] , identifier[z] ) keyword[if] identifier[p] . identifier[single_location] : identifier[loc] = identifier[p] . identifier[single_location] . identifier[location] identifier[x] = identifier[loc] . identifier[x] identifier[y] = identifier[loc] . identifier[y] identifier[z] = identifier[loc] . identifier[z] identifier[pop_locations] [ identifier[p] . identifier[id] ][ identifier[i] ]=( identifier[x] , identifier[y] , identifier[z] ) identifier[handler] . identifier[handle_location] ( identifier[i] , identifier[p] . identifier[id] , identifier[p] . identifier[component] , identifier[x] , identifier[y] , identifier[z] ) keyword[if] identifier[hasattr] ( identifier[handler] , literal[string] ): identifier[handler] . identifier[finalise_population] ( identifier[p] . identifier[id] ) keyword[if] identifier[include_connections] : keyword[for] identifier[p] keyword[in] identifier[nl_model] . identifier[projections] : identifier[type] = identifier[p] . identifier[type] keyword[if] identifier[p] . identifier[type] keyword[else] literal[string] identifier[handler] . identifier[handle_projection] ( identifier[p] . identifier[id] , identifier[p] . identifier[presynaptic] , identifier[p] . identifier[postsynaptic] , identifier[p] . identifier[synapse] , identifier[synapse_obj] = identifier[synapse_objects] [ identifier[p] . identifier[synapse] ] keyword[if] identifier[p] . identifier[synapse] keyword[in] identifier[synapse_objects] keyword[else] keyword[None] , identifier[pre_synapse_obj] = identifier[synapse_objects] [ identifier[p] . identifier[pre_synapse] ] keyword[if] identifier[p] . identifier[pre_synapse] keyword[in] identifier[synapse_objects] keyword[else] keyword[None] , identifier[type] = identifier[type] ) identifier[delay] = identifier[p] . identifier[delay] keyword[if] identifier[p] . identifier[delay] keyword[else] literal[int] identifier[weight] = identifier[p] . identifier[weight] keyword[if] identifier[p] . identifier[weight] keyword[else] literal[int] identifier[conn_count] = literal[int] keyword[if] identifier[p] . identifier[random_connectivity] : keyword[for] identifier[pre_i] keyword[in] identifier[range] ( identifier[len] ( identifier[pop_locations] [ identifier[p] . identifier[presynaptic] ])): keyword[for] identifier[post_i] keyword[in] identifier[range] ( identifier[len] ( identifier[pop_locations] [ identifier[p] . identifier[postsynaptic] ])): identifier[flip] = identifier[rng] . identifier[random] () keyword[if] identifier[flip] < identifier[p] . identifier[random_connectivity] . identifier[probability] : identifier[weight] = identifier[evaluate] ( identifier[weight] , identifier[nl_model] . identifier[parameters] ) identifier[delay] = identifier[evaluate] ( identifier[delay] , identifier[nl_model] . identifier[parameters] ) identifier[handler] . identifier[handle_connection] ( identifier[p] . identifier[id] , identifier[conn_count] , identifier[p] . identifier[presynaptic] , identifier[p] . identifier[postsynaptic] , identifier[p] . identifier[synapse] , identifier[pre_i] , identifier[post_i] , identifier[preSegId] = literal[int] , identifier[preFract] = literal[int] , identifier[postSegId] = literal[int] , identifier[postFract] = literal[int] , identifier[delay] = identifier[delay] , identifier[weight] = identifier[weight] ) identifier[conn_count] += literal[int] keyword[if] identifier[p] . identifier[convergent_connectivity] : keyword[for] identifier[post_i] keyword[in] identifier[range] ( identifier[len] ( identifier[pop_locations] [ identifier[p] . identifier[postsynaptic] ])): keyword[for] identifier[count] keyword[in] identifier[range] ( identifier[int] ( identifier[p] . identifier[convergent_connectivity] . identifier[num_per_post] )): identifier[found] = keyword[False] keyword[while] keyword[not] identifier[found] : identifier[pre_i] = identifier[int] ( identifier[rng] . identifier[random] ()* identifier[len] ( identifier[pop_locations] [ identifier[p] . identifier[presynaptic] ])) keyword[if] identifier[p] . identifier[presynaptic] == identifier[p] . identifier[postsynaptic] keyword[and] identifier[pre_i] == identifier[post_i] : identifier[found] = keyword[False] keyword[else] : identifier[found] = keyword[True] identifier[weight] = identifier[evaluate] ( identifier[weight] , identifier[nl_model] . identifier[parameters] ) identifier[delay] = identifier[evaluate] ( identifier[delay] , identifier[nl_model] . identifier[parameters] ) identifier[print_v] ( literal[string] %( identifier[conn_count] , identifier[pre_i] , identifier[post_i] , identifier[count] , identifier[p] . identifier[convergent_connectivity] . identifier[num_per_post] , identifier[weight] , identifier[delay] )) identifier[handler] . identifier[handle_connection] ( identifier[p] . identifier[id] , identifier[conn_count] , identifier[p] . identifier[presynaptic] , identifier[p] . identifier[postsynaptic] , identifier[p] . identifier[synapse] , identifier[pre_i] , identifier[post_i] , identifier[preSegId] = literal[int] , identifier[preFract] = literal[int] , identifier[postSegId] = literal[int] , identifier[postFract] = literal[int] , identifier[delay] = identifier[delay] , identifier[weight] = identifier[weight] ) identifier[conn_count] += literal[int] keyword[elif] identifier[p] . identifier[one_to_one_connector] : keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[min] ( identifier[len] ( identifier[pop_locations] [ identifier[p] . identifier[presynaptic] ]), identifier[len] ( identifier[pop_locations] [ identifier[p] . identifier[postsynaptic] ]))): identifier[weight] = identifier[evaluate] ( identifier[weight] , identifier[nl_model] . identifier[parameters] ) identifier[delay] = identifier[evaluate] ( identifier[delay] , identifier[nl_model] . identifier[parameters] ) identifier[handler] . identifier[handle_connection] ( identifier[p] . identifier[id] , identifier[conn_count] , identifier[p] . identifier[presynaptic] , identifier[p] . identifier[postsynaptic] , identifier[p] . identifier[synapse] , identifier[i] , identifier[i] , identifier[preSegId] = literal[int] , identifier[preFract] = literal[int] , identifier[postSegId] = literal[int] , identifier[postFract] = literal[int] , identifier[delay] = identifier[delay] , identifier[weight] = identifier[weight] ) identifier[conn_count] += literal[int] identifier[handler] . identifier[finalise_projection] ( identifier[p] . identifier[id] , identifier[p] . identifier[presynaptic] , identifier[p] . identifier[postsynaptic] , identifier[p] . identifier[synapse] ) keyword[if] identifier[include_inputs] : keyword[for] identifier[input] keyword[in] identifier[nl_model] . identifier[inputs] : identifier[handler] . identifier[handle_input_list] ( identifier[input] . identifier[id] , identifier[input] . identifier[population] , identifier[input] . identifier[input_source] , identifier[size] = literal[int] , identifier[input_comp_obj] = keyword[None] ) identifier[input_count] = literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[pop_locations] [ identifier[input] . identifier[population] ])): identifier[flip] = identifier[rng] . identifier[random] () identifier[weight] = identifier[input] . identifier[weight] keyword[if] identifier[input] . identifier[weight] keyword[else] literal[int] keyword[if] identifier[flip] * literal[int] < identifier[input] . identifier[percentage] : identifier[number_per_cell] = identifier[evaluate] ( identifier[input] . identifier[number_per_cell] , identifier[nl_model] . identifier[parameters] ) keyword[if] identifier[input] . identifier[number_per_cell] keyword[else] literal[int] keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[number_per_cell] ): identifier[handler] . identifier[handle_single_input] ( identifier[input] . identifier[id] , identifier[input_count] , identifier[i] , identifier[weight] = identifier[evaluate] ( identifier[weight] , identifier[nl_model] . identifier[parameters] )) identifier[input_count] += literal[int] identifier[handler] . identifier[finalise_input_source] ( identifier[input] . identifier[id] ) keyword[if] identifier[hasattr] ( identifier[handler] , literal[string] ): identifier[handler] . identifier[finalise_document] ()
def generate_network(nl_model, handler, seed=1234, always_include_props=False, include_connections=True, include_inputs=True, base_dir=None): """ Generate the network model as described in NeuroMLlite in a specific handler, e.g. NeuroMLHandler, PyNNHandler, etc. """ pop_locations = {} cell_objects = {} synapse_objects = {} print_v('Starting net generation for %s%s...' % (nl_model.id, ' (base dir: %s)' % base_dir if base_dir else '')) rng = random.Random(seed) if nl_model.network_reader: exec('from neuromllite.%s import %s' % (nl_model.network_reader.type, nl_model.network_reader.type)) exec('network_reader = %s()' % nl_model.network_reader.type) network_reader.parameters = nl_model.network_reader.parameters network_reader.parse(handler) pop_locations = network_reader.get_locations() # depends on [control=['if'], data=[]] else: notes = 'Generated network: %s' % nl_model.id notes += '\n Generation seed: %i' % seed if nl_model.parameters: notes += '\n NeuroMLlite parameters: ' for p in nl_model.parameters: notes += '\n %s = %s' % (p, nl_model.parameters[p]) # depends on [control=['for'], data=['p']] # depends on [control=['if'], data=[]] handler.handle_document_start(nl_model.id, notes) temperature = '%sdegC' % nl_model.temperature if nl_model.temperature else None handler.handle_network(nl_model.id, nl_model.notes, temperature=temperature) nml2_doc_temp = _extract_pynn_components_to_neuroml(nl_model) for c in nl_model.cells: if c.neuroml2_source_file: from pyneuroml import pynml nml2_doc = pynml.read_neuroml2_file(_locate_file(c.neuroml2_source_file, base_dir), include_includes=True) cell_objects[c.id] = nml2_doc.get_by_id(c.id) # depends on [control=['if'], data=[]] if c.pynn_cell: cell_objects[c.id] = nml2_doc_temp.get_by_id(c.id) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c']] for s in nl_model.synapses: if s.neuroml2_source_file: from pyneuroml import pynml nml2_doc = pynml.read_neuroml2_file(_locate_file(s.neuroml2_source_file, base_dir), include_includes=True) synapse_objects[s.id] = nml2_doc.get_by_id(s.id) # depends on [control=['if'], data=[]] if s.pynn_synapse: synapse_objects[s.id] = nml2_doc_temp.get_by_id(s.id) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['s']] for p in nl_model.populations: size = evaluate(p.size, nl_model.parameters) properties = p.properties if p.properties else {} if p.random_layout: properties['region'] = p.random_layout.region # depends on [control=['if'], data=[]] if not p.random_layout and (not p.single_location) and (not always_include_props): # If there are no positions (abstract network), and <property> # is added to <population>, jLems doesn't like it... (it has difficulty # interpreting pop0[0]/v, etc.) # So better not to give properties... properties = {} # depends on [control=['if'], data=[]] if p.notes: handler.handle_population(p.id, p.component, size, cell_objects[p.component] if p.component in cell_objects else None, properties=properties, notes=p.notes) # depends on [control=['if'], data=[]] else: handler.handle_population(p.id, p.component, size, cell_objects[p.component] if p.component in cell_objects else None, properties=properties) pop_locations[p.id] = np.zeros((size, 3)) for i in range(size): if p.random_layout: region = nl_model.get_child(p.random_layout.region, 'regions') x = region.x + rng.random() * region.width y = region.y + rng.random() * region.height z = region.z + rng.random() * region.depth pop_locations[p.id][i] = (x, y, z) handler.handle_location(i, p.id, p.component, x, y, z) # depends on [control=['if'], data=[]] if p.single_location: loc = p.single_location.location x = loc.x y = loc.y z = loc.z pop_locations[p.id][i] = (x, y, z) handler.handle_location(i, p.id, p.component, x, y, z) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] if hasattr(handler, 'finalise_population'): handler.finalise_population(p.id) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']] if include_connections: for p in nl_model.projections: type = p.type if p.type else 'projection' handler.handle_projection(p.id, p.presynaptic, p.postsynaptic, p.synapse, synapse_obj=synapse_objects[p.synapse] if p.synapse in synapse_objects else None, pre_synapse_obj=synapse_objects[p.pre_synapse] if p.pre_synapse in synapse_objects else None, type=type) delay = p.delay if p.delay else 0 weight = p.weight if p.weight else 1 conn_count = 0 if p.random_connectivity: for pre_i in range(len(pop_locations[p.presynaptic])): for post_i in range(len(pop_locations[p.postsynaptic])): flip = rng.random() #print("Is cell %i conn to %i, prob %s - %s"%(pre_i, post_i, flip, p.random_connectivity.probability)) if flip < p.random_connectivity.probability: weight = evaluate(weight, nl_model.parameters) delay = evaluate(delay, nl_model.parameters) #print_v("Adding connection %i with weight: %s, delay: %s"%(conn_count, weight, delay)) handler.handle_connection(p.id, conn_count, p.presynaptic, p.postsynaptic, p.synapse, pre_i, post_i, preSegId=0, preFract=0.5, postSegId=0, postFract=0.5, delay=delay, weight=weight) conn_count += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['post_i']] # depends on [control=['for'], data=['pre_i']] # depends on [control=['if'], data=[]] if p.convergent_connectivity: for post_i in range(len(pop_locations[p.postsynaptic])): for count in range(int(p.convergent_connectivity.num_per_post)): found = False while not found: pre_i = int(rng.random() * len(pop_locations[p.presynaptic])) if p.presynaptic == p.postsynaptic and pre_i == post_i: found = False # depends on [control=['if'], data=[]] else: found = True # depends on [control=['while'], data=[]] weight = evaluate(weight, nl_model.parameters) delay = evaluate(delay, nl_model.parameters) print_v('Adding connection %i (%i->%i; %i to %s of post) with weight: %s, delay: %s' % (conn_count, pre_i, post_i, count, p.convergent_connectivity.num_per_post, weight, delay)) handler.handle_connection(p.id, conn_count, p.presynaptic, p.postsynaptic, p.synapse, pre_i, post_i, preSegId=0, preFract=0.5, postSegId=0, postFract=0.5, delay=delay, weight=weight) conn_count += 1 # depends on [control=['for'], data=['count']] # depends on [control=['for'], data=['post_i']] # depends on [control=['if'], data=[]] elif p.one_to_one_connector: for i in range(min(len(pop_locations[p.presynaptic]), len(pop_locations[p.postsynaptic]))): weight = evaluate(weight, nl_model.parameters) delay = evaluate(delay, nl_model.parameters) #print_v("Adding connection %i with weight: %s, delay: %s"%(conn_count, weight, delay)) handler.handle_connection(p.id, conn_count, p.presynaptic, p.postsynaptic, p.synapse, i, i, preSegId=0, preFract=0.5, postSegId=0, postFract=0.5, delay=delay, weight=weight) conn_count += 1 # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]] handler.finalise_projection(p.id, p.presynaptic, p.postsynaptic, p.synapse) # depends on [control=['for'], data=['p']] # depends on [control=['if'], data=[]] if include_inputs: for input in nl_model.inputs: handler.handle_input_list(input.id, input.population, input.input_source, size=0, input_comp_obj=None) input_count = 0 for i in range(len(pop_locations[input.population])): flip = rng.random() weight = input.weight if input.weight else 1 if flip * 100.0 < input.percentage: number_per_cell = evaluate(input.number_per_cell, nl_model.parameters) if input.number_per_cell else 1 for j in range(number_per_cell): handler.handle_single_input(input.id, input_count, i, weight=evaluate(weight, nl_model.parameters)) input_count += 1 # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] handler.finalise_input_source(input.id) # depends on [control=['for'], data=['input']] # depends on [control=['if'], data=[]] if hasattr(handler, 'finalise_document'): handler.finalise_document() # depends on [control=['if'], data=[]]
def _check_pretrained_file_names(cls, pretrained_file_name): """Checks if a pre-trained token embedding file name is valid. Parameters ---------- pretrained_file_name : str The pre-trained token embedding file. """ embedding_name = cls.__name__.lower() if pretrained_file_name not in cls.pretrained_file_name_sha1: raise KeyError('Cannot find pretrained file %s for token embedding %s. Valid ' 'pretrained files for embedding %s: %s' % (pretrained_file_name, embedding_name, embedding_name, ', '.join(cls.pretrained_file_name_sha1.keys())))
def function[_check_pretrained_file_names, parameter[cls, pretrained_file_name]]: constant[Checks if a pre-trained token embedding file name is valid. Parameters ---------- pretrained_file_name : str The pre-trained token embedding file. ] variable[embedding_name] assign[=] call[name[cls].__name__.lower, parameter[]] if compare[name[pretrained_file_name] <ast.NotIn object at 0x7da2590d7190> name[cls].pretrained_file_name_sha1] begin[:] <ast.Raise object at 0x7da1b2028130>
keyword[def] identifier[_check_pretrained_file_names] ( identifier[cls] , identifier[pretrained_file_name] ): literal[string] identifier[embedding_name] = identifier[cls] . identifier[__name__] . identifier[lower] () keyword[if] identifier[pretrained_file_name] keyword[not] keyword[in] identifier[cls] . identifier[pretrained_file_name_sha1] : keyword[raise] identifier[KeyError] ( literal[string] literal[string] % ( identifier[pretrained_file_name] , identifier[embedding_name] , identifier[embedding_name] , literal[string] . identifier[join] ( identifier[cls] . identifier[pretrained_file_name_sha1] . identifier[keys] ())))
def _check_pretrained_file_names(cls, pretrained_file_name): """Checks if a pre-trained token embedding file name is valid. Parameters ---------- pretrained_file_name : str The pre-trained token embedding file. """ embedding_name = cls.__name__.lower() if pretrained_file_name not in cls.pretrained_file_name_sha1: raise KeyError('Cannot find pretrained file %s for token embedding %s. Valid pretrained files for embedding %s: %s' % (pretrained_file_name, embedding_name, embedding_name, ', '.join(cls.pretrained_file_name_sha1.keys()))) # depends on [control=['if'], data=['pretrained_file_name']]
def deleteAllActivationKeys(server): ''' Delete all activation keys from Spacewalk CLI Example: .. code-block:: bash salt-run spacewalk.deleteAllActivationKeys spacewalk01.domain.com ''' try: client, key = _get_session(server) except Exception as exc: err_msg = 'Exception raised when connecting to spacewalk server ({0}): {1}'.format(server, exc) log.error(err_msg) return {'Error': err_msg} activation_keys = client.activationkey.listActivationKeys(key) deleted_keys = [] failed_keys = [] for aKey in activation_keys: if client.activationkey.delete(key, aKey['key']) == 1: deleted_keys.append(aKey['key']) else: failed_keys.append(aKey['key']) ret = {'deleted': deleted_keys} if failed_keys: ret['failed'] = failed_keys return ret
def function[deleteAllActivationKeys, parameter[server]]: constant[ Delete all activation keys from Spacewalk CLI Example: .. code-block:: bash salt-run spacewalk.deleteAllActivationKeys spacewalk01.domain.com ] <ast.Try object at 0x7da1b2003f10> variable[activation_keys] assign[=] call[name[client].activationkey.listActivationKeys, parameter[name[key]]] variable[deleted_keys] assign[=] list[[]] variable[failed_keys] assign[=] list[[]] for taget[name[aKey]] in starred[name[activation_keys]] begin[:] if compare[call[name[client].activationkey.delete, parameter[name[key], call[name[aKey]][constant[key]]]] equal[==] constant[1]] begin[:] call[name[deleted_keys].append, parameter[call[name[aKey]][constant[key]]]] variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b2003010>], [<ast.Name object at 0x7da1b2002ef0>]] if name[failed_keys] begin[:] call[name[ret]][constant[failed]] assign[=] name[failed_keys] return[name[ret]]
keyword[def] identifier[deleteAllActivationKeys] ( identifier[server] ): literal[string] keyword[try] : identifier[client] , identifier[key] = identifier[_get_session] ( identifier[server] ) keyword[except] identifier[Exception] keyword[as] identifier[exc] : identifier[err_msg] = literal[string] . identifier[format] ( identifier[server] , identifier[exc] ) identifier[log] . identifier[error] ( identifier[err_msg] ) keyword[return] { literal[string] : identifier[err_msg] } identifier[activation_keys] = identifier[client] . identifier[activationkey] . identifier[listActivationKeys] ( identifier[key] ) identifier[deleted_keys] =[] identifier[failed_keys] =[] keyword[for] identifier[aKey] keyword[in] identifier[activation_keys] : keyword[if] identifier[client] . identifier[activationkey] . identifier[delete] ( identifier[key] , identifier[aKey] [ literal[string] ])== literal[int] : identifier[deleted_keys] . identifier[append] ( identifier[aKey] [ literal[string] ]) keyword[else] : identifier[failed_keys] . identifier[append] ( identifier[aKey] [ literal[string] ]) identifier[ret] ={ literal[string] : identifier[deleted_keys] } keyword[if] identifier[failed_keys] : identifier[ret] [ literal[string] ]= identifier[failed_keys] keyword[return] identifier[ret]
def deleteAllActivationKeys(server): """ Delete all activation keys from Spacewalk CLI Example: .. code-block:: bash salt-run spacewalk.deleteAllActivationKeys spacewalk01.domain.com """ try: (client, key) = _get_session(server) # depends on [control=['try'], data=[]] except Exception as exc: err_msg = 'Exception raised when connecting to spacewalk server ({0}): {1}'.format(server, exc) log.error(err_msg) return {'Error': err_msg} # depends on [control=['except'], data=['exc']] activation_keys = client.activationkey.listActivationKeys(key) deleted_keys = [] failed_keys = [] for aKey in activation_keys: if client.activationkey.delete(key, aKey['key']) == 1: deleted_keys.append(aKey['key']) # depends on [control=['if'], data=[]] else: failed_keys.append(aKey['key']) # depends on [control=['for'], data=['aKey']] ret = {'deleted': deleted_keys} if failed_keys: ret['failed'] = failed_keys # depends on [control=['if'], data=[]] return ret
def _MoveFileToLibrary(self, oldPath, newPath): """ Move file from old file path to new file path. This follows certain conditions: - If file already exists at destination do rename inplace. - If file destination is on same file system and doesn't exist rename and move. - If source and destination are on different file systems do rename in-place, and if forceCopy is true copy to dest and move orig to archive directory. Parameters ---------- oldPath : string Old file path. newPath : string New file path. Returns ---------- boolean If old and new file paths are the same or if the new file path already exists this returns False. If file rename is skipped for any reason this returns None otherwise if rename completes okay it returns True. """ if oldPath == newPath: return False goodlogging.Log.Info("RENAMER", "PROCESSING FILE: {0}".format(oldPath)) if os.path.exists(newPath): goodlogging.Log.Info("RENAMER", "File skipped - file aleady exists in TV library at {0}".format(newPath)) return False newDir = os.path.dirname(newPath) os.makedirs(newDir, exist_ok=True) try: os.rename(oldPath, newPath) except OSError as ex: if ex.errno is errno.EXDEV: goodlogging.Log.Info("RENAMER", "Simple rename failed - source and destination exist on different file systems") goodlogging.Log.Info("RENAMER", "Renaming file in-place") newFileName = os.path.basename(newPath) origFileDir = os.path.dirname(oldPath) renameFilePath = os.path.join(origFileDir, newFileName) if oldPath != renameFilePath: renameFilePath = util.CheckPathExists(renameFilePath) goodlogging.Log.Info("RENAMER", "Renaming from {0} to {1}".format(oldPath, renameFilePath)) else: goodlogging.Log.Info("RENAMER", "File already has the correct name ({0})".format(newFileName)) try: os.rename(oldPath, renameFilePath) except Exception as ex2: goodlogging.Log.Info("RENAMER", "File rename skipped - Exception ({0}): {1}".format(ex2.args[0], ex2.args[1])) else: if self._forceCopy is True: goodlogging.Log.Info("RENAMER", "Copying file to new file system {0} to {1}".format(renameFilePath, newPath)) try: shutil.copy2(renameFilePath, newPath) except shutil.Error as ex3: err = ex3.args[0] goodlogging.Log.Info("RENAMER", "File copy failed - Shutil Error: {0}".format(err)) else: util.ArchiveProcessedFile(renameFilePath, self._archiveDir) return True else: goodlogging.Log.Info("RENAMER", "File copy skipped - copying between file systems is disabled (enabling this functionality is slow)") else: goodlogging.Log.Info("RENAMER", "File rename skipped - Exception ({0}): {1}".format(ex.args[0], ex.args[1])) except Exception as ex: goodlogging.Log.Info("RENAMER", "File rename skipped - Exception ({0}): {1}".format(ex.args[0], ex.args[1])) else: goodlogging.Log.Info("RENAMER", "RENAME COMPLETE: {0}".format(newPath)) return True
def function[_MoveFileToLibrary, parameter[self, oldPath, newPath]]: constant[ Move file from old file path to new file path. This follows certain conditions: - If file already exists at destination do rename inplace. - If file destination is on same file system and doesn't exist rename and move. - If source and destination are on different file systems do rename in-place, and if forceCopy is true copy to dest and move orig to archive directory. Parameters ---------- oldPath : string Old file path. newPath : string New file path. Returns ---------- boolean If old and new file paths are the same or if the new file path already exists this returns False. If file rename is skipped for any reason this returns None otherwise if rename completes okay it returns True. ] if compare[name[oldPath] equal[==] name[newPath]] begin[:] return[constant[False]] call[name[goodlogging].Log.Info, parameter[constant[RENAMER], call[constant[PROCESSING FILE: {0}].format, parameter[name[oldPath]]]]] if call[name[os].path.exists, parameter[name[newPath]]] begin[:] call[name[goodlogging].Log.Info, parameter[constant[RENAMER], call[constant[File skipped - file aleady exists in TV library at {0}].format, parameter[name[newPath]]]]] return[constant[False]] variable[newDir] assign[=] call[name[os].path.dirname, parameter[name[newPath]]] call[name[os].makedirs, parameter[name[newDir]]] <ast.Try object at 0x7da20c6aae90>
keyword[def] identifier[_MoveFileToLibrary] ( identifier[self] , identifier[oldPath] , identifier[newPath] ): literal[string] keyword[if] identifier[oldPath] == identifier[newPath] : keyword[return] keyword[False] identifier[goodlogging] . identifier[Log] . identifier[Info] ( literal[string] , literal[string] . identifier[format] ( identifier[oldPath] )) keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[newPath] ): identifier[goodlogging] . identifier[Log] . identifier[Info] ( literal[string] , literal[string] . identifier[format] ( identifier[newPath] )) keyword[return] keyword[False] identifier[newDir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[newPath] ) identifier[os] . identifier[makedirs] ( identifier[newDir] , identifier[exist_ok] = keyword[True] ) keyword[try] : identifier[os] . identifier[rename] ( identifier[oldPath] , identifier[newPath] ) keyword[except] identifier[OSError] keyword[as] identifier[ex] : keyword[if] identifier[ex] . identifier[errno] keyword[is] identifier[errno] . identifier[EXDEV] : identifier[goodlogging] . identifier[Log] . identifier[Info] ( literal[string] , literal[string] ) identifier[goodlogging] . identifier[Log] . identifier[Info] ( literal[string] , literal[string] ) identifier[newFileName] = identifier[os] . identifier[path] . identifier[basename] ( identifier[newPath] ) identifier[origFileDir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[oldPath] ) identifier[renameFilePath] = identifier[os] . identifier[path] . identifier[join] ( identifier[origFileDir] , identifier[newFileName] ) keyword[if] identifier[oldPath] != identifier[renameFilePath] : identifier[renameFilePath] = identifier[util] . identifier[CheckPathExists] ( identifier[renameFilePath] ) identifier[goodlogging] . identifier[Log] . identifier[Info] ( literal[string] , literal[string] . identifier[format] ( identifier[oldPath] , identifier[renameFilePath] )) keyword[else] : identifier[goodlogging] . identifier[Log] . identifier[Info] ( literal[string] , literal[string] . identifier[format] ( identifier[newFileName] )) keyword[try] : identifier[os] . identifier[rename] ( identifier[oldPath] , identifier[renameFilePath] ) keyword[except] identifier[Exception] keyword[as] identifier[ex2] : identifier[goodlogging] . identifier[Log] . identifier[Info] ( literal[string] , literal[string] . identifier[format] ( identifier[ex2] . identifier[args] [ literal[int] ], identifier[ex2] . identifier[args] [ literal[int] ])) keyword[else] : keyword[if] identifier[self] . identifier[_forceCopy] keyword[is] keyword[True] : identifier[goodlogging] . identifier[Log] . identifier[Info] ( literal[string] , literal[string] . identifier[format] ( identifier[renameFilePath] , identifier[newPath] )) keyword[try] : identifier[shutil] . identifier[copy2] ( identifier[renameFilePath] , identifier[newPath] ) keyword[except] identifier[shutil] . identifier[Error] keyword[as] identifier[ex3] : identifier[err] = identifier[ex3] . identifier[args] [ literal[int] ] identifier[goodlogging] . identifier[Log] . identifier[Info] ( literal[string] , literal[string] . identifier[format] ( identifier[err] )) keyword[else] : identifier[util] . identifier[ArchiveProcessedFile] ( identifier[renameFilePath] , identifier[self] . identifier[_archiveDir] ) keyword[return] keyword[True] keyword[else] : identifier[goodlogging] . identifier[Log] . identifier[Info] ( literal[string] , literal[string] ) keyword[else] : identifier[goodlogging] . identifier[Log] . identifier[Info] ( literal[string] , literal[string] . identifier[format] ( identifier[ex] . identifier[args] [ literal[int] ], identifier[ex] . identifier[args] [ literal[int] ])) keyword[except] identifier[Exception] keyword[as] identifier[ex] : identifier[goodlogging] . identifier[Log] . identifier[Info] ( literal[string] , literal[string] . identifier[format] ( identifier[ex] . identifier[args] [ literal[int] ], identifier[ex] . identifier[args] [ literal[int] ])) keyword[else] : identifier[goodlogging] . identifier[Log] . identifier[Info] ( literal[string] , literal[string] . identifier[format] ( identifier[newPath] )) keyword[return] keyword[True]
def _MoveFileToLibrary(self, oldPath, newPath): """ Move file from old file path to new file path. This follows certain conditions: - If file already exists at destination do rename inplace. - If file destination is on same file system and doesn't exist rename and move. - If source and destination are on different file systems do rename in-place, and if forceCopy is true copy to dest and move orig to archive directory. Parameters ---------- oldPath : string Old file path. newPath : string New file path. Returns ---------- boolean If old and new file paths are the same or if the new file path already exists this returns False. If file rename is skipped for any reason this returns None otherwise if rename completes okay it returns True. """ if oldPath == newPath: return False # depends on [control=['if'], data=[]] goodlogging.Log.Info('RENAMER', 'PROCESSING FILE: {0}'.format(oldPath)) if os.path.exists(newPath): goodlogging.Log.Info('RENAMER', 'File skipped - file aleady exists in TV library at {0}'.format(newPath)) return False # depends on [control=['if'], data=[]] newDir = os.path.dirname(newPath) os.makedirs(newDir, exist_ok=True) try: os.rename(oldPath, newPath) # depends on [control=['try'], data=[]] except OSError as ex: if ex.errno is errno.EXDEV: goodlogging.Log.Info('RENAMER', 'Simple rename failed - source and destination exist on different file systems') goodlogging.Log.Info('RENAMER', 'Renaming file in-place') newFileName = os.path.basename(newPath) origFileDir = os.path.dirname(oldPath) renameFilePath = os.path.join(origFileDir, newFileName) if oldPath != renameFilePath: renameFilePath = util.CheckPathExists(renameFilePath) goodlogging.Log.Info('RENAMER', 'Renaming from {0} to {1}'.format(oldPath, renameFilePath)) # depends on [control=['if'], data=['oldPath', 'renameFilePath']] else: goodlogging.Log.Info('RENAMER', 'File already has the correct name ({0})'.format(newFileName)) try: os.rename(oldPath, renameFilePath) # depends on [control=['try'], data=[]] except Exception as ex2: goodlogging.Log.Info('RENAMER', 'File rename skipped - Exception ({0}): {1}'.format(ex2.args[0], ex2.args[1])) # depends on [control=['except'], data=['ex2']] else: if self._forceCopy is True: goodlogging.Log.Info('RENAMER', 'Copying file to new file system {0} to {1}'.format(renameFilePath, newPath)) try: shutil.copy2(renameFilePath, newPath) # depends on [control=['try'], data=[]] except shutil.Error as ex3: err = ex3.args[0] goodlogging.Log.Info('RENAMER', 'File copy failed - Shutil Error: {0}'.format(err)) # depends on [control=['except'], data=['ex3']] else: util.ArchiveProcessedFile(renameFilePath, self._archiveDir) return True # depends on [control=['if'], data=[]] else: goodlogging.Log.Info('RENAMER', 'File copy skipped - copying between file systems is disabled (enabling this functionality is slow)') # depends on [control=['if'], data=[]] else: goodlogging.Log.Info('RENAMER', 'File rename skipped - Exception ({0}): {1}'.format(ex.args[0], ex.args[1])) # depends on [control=['except'], data=['ex']] except Exception as ex: goodlogging.Log.Info('RENAMER', 'File rename skipped - Exception ({0}): {1}'.format(ex.args[0], ex.args[1])) # depends on [control=['except'], data=['ex']] else: goodlogging.Log.Info('RENAMER', 'RENAME COMPLETE: {0}'.format(newPath)) return True
def update(taxids, conn, force_download, silent): """Update local UniProt database""" if not silent: click.secho("WARNING: Update is very time consuming and can take several " "hours depending which organisms you are importing!", fg="yellow") if not taxids: click.echo("Please note that you can restrict import to organisms by " "NCBI taxonomy IDs") click.echo("Example (human, mouse, rat):\n") click.secho("\tpyuniprot update --taxids 9606,10090,10116\n\n", fg="green") if taxids: taxids = [int(taxid.strip()) for taxid in taxids.strip().split(',') if re.search('^ *\d+ *$', taxid)] database.update(taxids=taxids, connection=conn, force_download=force_download, silent=silent)
def function[update, parameter[taxids, conn, force_download, silent]]: constant[Update local UniProt database] if <ast.UnaryOp object at 0x7da18fe93b80> begin[:] call[name[click].secho, parameter[constant[WARNING: Update is very time consuming and can take several hours depending which organisms you are importing!]]] if <ast.UnaryOp object at 0x7da18fe90f40> begin[:] call[name[click].echo, parameter[constant[Please note that you can restrict import to organisms by NCBI taxonomy IDs]]] call[name[click].echo, parameter[constant[Example (human, mouse, rat): ]]] call[name[click].secho, parameter[constant[ pyuniprot update --taxids 9606,10090,10116 ]]] if name[taxids] begin[:] variable[taxids] assign[=] <ast.ListComp object at 0x7da18fe90760> call[name[database].update, parameter[]]
keyword[def] identifier[update] ( identifier[taxids] , identifier[conn] , identifier[force_download] , identifier[silent] ): literal[string] keyword[if] keyword[not] identifier[silent] : identifier[click] . identifier[secho] ( literal[string] literal[string] , identifier[fg] = literal[string] ) keyword[if] keyword[not] identifier[taxids] : identifier[click] . identifier[echo] ( literal[string] literal[string] ) identifier[click] . identifier[echo] ( literal[string] ) identifier[click] . identifier[secho] ( literal[string] , identifier[fg] = literal[string] ) keyword[if] identifier[taxids] : identifier[taxids] =[ identifier[int] ( identifier[taxid] . identifier[strip] ()) keyword[for] identifier[taxid] keyword[in] identifier[taxids] . identifier[strip] (). identifier[split] ( literal[string] ) keyword[if] identifier[re] . identifier[search] ( literal[string] , identifier[taxid] )] identifier[database] . identifier[update] ( identifier[taxids] = identifier[taxids] , identifier[connection] = identifier[conn] , identifier[force_download] = identifier[force_download] , identifier[silent] = identifier[silent] )
def update(taxids, conn, force_download, silent): """Update local UniProt database""" if not silent: click.secho('WARNING: Update is very time consuming and can take several hours depending which organisms you are importing!', fg='yellow') if not taxids: click.echo('Please note that you can restrict import to organisms by NCBI taxonomy IDs') click.echo('Example (human, mouse, rat):\n') click.secho('\tpyuniprot update --taxids 9606,10090,10116\n\n', fg='green') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if taxids: taxids = [int(taxid.strip()) for taxid in taxids.strip().split(',') if re.search('^ *\\d+ *$', taxid)] # depends on [control=['if'], data=[]] database.update(taxids=taxids, connection=conn, force_download=force_download, silent=silent)
def markdown(random=random, length=10, *args, **kwargs): """ Produces a bunch of markdown text. >>> mock_random.seed(0) >>> markdown(random=mock_random, length=2) 'Nobody will **head** _to_ Mystery Studies Department **to** _buy_ a mighty poop.\\nNobody will **head** _to_ Mystery Studies Department **to** _buy_ a mighty poop.' """ def title_sentence(): return "\n" + "#"*random.randint(1,5) + " " + sentence(capitalize=True, random=random) def embellish(word): return random.choice([word, word, word, "**"+word+"**", "_"+word+"_"]) def randomly_markdownify(string): return " ".join([embellish(word) for word in string.split(" ")]) sentences = [] for i in range(0, length): sentences.append(random.choice([ title_sentence(), sentence(random=random), sentence(random=random), randomly_markdownify(sentence(random=random)) ])) return "\n".join(sentences)
def function[markdown, parameter[random, length]]: constant[ Produces a bunch of markdown text. >>> mock_random.seed(0) >>> markdown(random=mock_random, length=2) 'Nobody will **head** _to_ Mystery Studies Department **to** _buy_ a mighty poop.\nNobody will **head** _to_ Mystery Studies Department **to** _buy_ a mighty poop.' ] def function[title_sentence, parameter[]]: return[binary_operation[binary_operation[binary_operation[constant[ ] + binary_operation[constant[#] * call[name[random].randint, parameter[constant[1], constant[5]]]]] + constant[ ]] + call[name[sentence], parameter[]]]] def function[embellish, parameter[word]]: return[call[name[random].choice, parameter[list[[<ast.Name object at 0x7da1b0ca5630>, <ast.Name object at 0x7da1b0ca4ee0>, <ast.Name object at 0x7da1b0ca4670>, <ast.BinOp object at 0x7da1b0ca44f0>, <ast.BinOp object at 0x7da1b0ca4040>]]]]] def function[randomly_markdownify, parameter[string]]: return[call[constant[ ].join, parameter[<ast.ListComp object at 0x7da1b0ca5240>]]] variable[sentences] assign[=] list[[]] for taget[name[i]] in starred[call[name[range], parameter[constant[0], name[length]]]] begin[:] call[name[sentences].append, parameter[call[name[random].choice, parameter[list[[<ast.Call object at 0x7da204565ba0>, <ast.Call object at 0x7da2045646d0>, <ast.Call object at 0x7da204566e60>, <ast.Call object at 0x7da204566440>]]]]]] return[call[constant[ ].join, parameter[name[sentences]]]]
keyword[def] identifier[markdown] ( identifier[random] = identifier[random] , identifier[length] = literal[int] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[def] identifier[title_sentence] (): keyword[return] literal[string] + literal[string] * identifier[random] . identifier[randint] ( literal[int] , literal[int] )+ literal[string] + identifier[sentence] ( identifier[capitalize] = keyword[True] , identifier[random] = identifier[random] ) keyword[def] identifier[embellish] ( identifier[word] ): keyword[return] identifier[random] . identifier[choice] ([ identifier[word] , identifier[word] , identifier[word] , literal[string] + identifier[word] + literal[string] , literal[string] + identifier[word] + literal[string] ]) keyword[def] identifier[randomly_markdownify] ( identifier[string] ): keyword[return] literal[string] . identifier[join] ([ identifier[embellish] ( identifier[word] ) keyword[for] identifier[word] keyword[in] identifier[string] . identifier[split] ( literal[string] )]) identifier[sentences] =[] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[length] ): identifier[sentences] . identifier[append] ( identifier[random] . identifier[choice] ([ identifier[title_sentence] (), identifier[sentence] ( identifier[random] = identifier[random] ), identifier[sentence] ( identifier[random] = identifier[random] ), identifier[randomly_markdownify] ( identifier[sentence] ( identifier[random] = identifier[random] )) ])) keyword[return] literal[string] . identifier[join] ( identifier[sentences] )
def markdown(random=random, length=10, *args, **kwargs): """ Produces a bunch of markdown text. >>> mock_random.seed(0) >>> markdown(random=mock_random, length=2) 'Nobody will **head** _to_ Mystery Studies Department **to** _buy_ a mighty poop.\\nNobody will **head** _to_ Mystery Studies Department **to** _buy_ a mighty poop.' """ def title_sentence(): return '\n' + '#' * random.randint(1, 5) + ' ' + sentence(capitalize=True, random=random) def embellish(word): return random.choice([word, word, word, '**' + word + '**', '_' + word + '_']) def randomly_markdownify(string): return ' '.join([embellish(word) for word in string.split(' ')]) sentences = [] for i in range(0, length): sentences.append(random.choice([title_sentence(), sentence(random=random), sentence(random=random), randomly_markdownify(sentence(random=random))])) # depends on [control=['for'], data=[]] return '\n'.join(sentences)
def _equally_weight_samples(samples, weights): """ Convert samples to be equally weighted. Samples are trimmed by discarding samples in accordance with a probability determined by the corresponding weight. This function has assumed you have normalised the weights properly. If in doubt, convert weights via: `weights /= weights.max()` Parameters ---------- samples: array-like Samples to trim. weights: array-like Weights to trim by. Returns ------- 1D numpy.array: Equally weighted sample array. `shape=(len(samples))` """ if len(weights) != len(samples): raise ValueError("len(weights) = %i != len(samples) = %i" % (len(weights), len(samples))) if numpy.logical_or(weights < 0, weights > 1).any(): raise ValueError("weights must have probability between 0 and 1") weights = numpy.array(weights) samples = numpy.array(samples) state = numpy.random.get_state() numpy.random.seed(1) n = len(weights) choices = numpy.random.rand(n) < weights new_samples = samples[choices] numpy.random.set_state(state) return new_samples.copy()
def function[_equally_weight_samples, parameter[samples, weights]]: constant[ Convert samples to be equally weighted. Samples are trimmed by discarding samples in accordance with a probability determined by the corresponding weight. This function has assumed you have normalised the weights properly. If in doubt, convert weights via: `weights /= weights.max()` Parameters ---------- samples: array-like Samples to trim. weights: array-like Weights to trim by. Returns ------- 1D numpy.array: Equally weighted sample array. `shape=(len(samples))` ] if compare[call[name[len], parameter[name[weights]]] not_equal[!=] call[name[len], parameter[name[samples]]]] begin[:] <ast.Raise object at 0x7da1b0a82470> if call[call[name[numpy].logical_or, parameter[compare[name[weights] less[<] constant[0]], compare[name[weights] greater[>] constant[1]]]].any, parameter[]] begin[:] <ast.Raise object at 0x7da1b0ad8490> variable[weights] assign[=] call[name[numpy].array, parameter[name[weights]]] variable[samples] assign[=] call[name[numpy].array, parameter[name[samples]]] variable[state] assign[=] call[name[numpy].random.get_state, parameter[]] call[name[numpy].random.seed, parameter[constant[1]]] variable[n] assign[=] call[name[len], parameter[name[weights]]] variable[choices] assign[=] compare[call[name[numpy].random.rand, parameter[name[n]]] less[<] name[weights]] variable[new_samples] assign[=] call[name[samples]][name[choices]] call[name[numpy].random.set_state, parameter[name[state]]] return[call[name[new_samples].copy, parameter[]]]
keyword[def] identifier[_equally_weight_samples] ( identifier[samples] , identifier[weights] ): literal[string] keyword[if] identifier[len] ( identifier[weights] )!= identifier[len] ( identifier[samples] ): keyword[raise] identifier[ValueError] ( literal[string] % ( identifier[len] ( identifier[weights] ), identifier[len] ( identifier[samples] ))) keyword[if] identifier[numpy] . identifier[logical_or] ( identifier[weights] < literal[int] , identifier[weights] > literal[int] ). identifier[any] (): keyword[raise] identifier[ValueError] ( literal[string] ) identifier[weights] = identifier[numpy] . identifier[array] ( identifier[weights] ) identifier[samples] = identifier[numpy] . identifier[array] ( identifier[samples] ) identifier[state] = identifier[numpy] . identifier[random] . identifier[get_state] () identifier[numpy] . identifier[random] . identifier[seed] ( literal[int] ) identifier[n] = identifier[len] ( identifier[weights] ) identifier[choices] = identifier[numpy] . identifier[random] . identifier[rand] ( identifier[n] )< identifier[weights] identifier[new_samples] = identifier[samples] [ identifier[choices] ] identifier[numpy] . identifier[random] . identifier[set_state] ( identifier[state] ) keyword[return] identifier[new_samples] . identifier[copy] ()
def _equally_weight_samples(samples, weights): """ Convert samples to be equally weighted. Samples are trimmed by discarding samples in accordance with a probability determined by the corresponding weight. This function has assumed you have normalised the weights properly. If in doubt, convert weights via: `weights /= weights.max()` Parameters ---------- samples: array-like Samples to trim. weights: array-like Weights to trim by. Returns ------- 1D numpy.array: Equally weighted sample array. `shape=(len(samples))` """ if len(weights) != len(samples): raise ValueError('len(weights) = %i != len(samples) = %i' % (len(weights), len(samples))) # depends on [control=['if'], data=[]] if numpy.logical_or(weights < 0, weights > 1).any(): raise ValueError('weights must have probability between 0 and 1') # depends on [control=['if'], data=[]] weights = numpy.array(weights) samples = numpy.array(samples) state = numpy.random.get_state() numpy.random.seed(1) n = len(weights) choices = numpy.random.rand(n) < weights new_samples = samples[choices] numpy.random.set_state(state) return new_samples.copy()
def init_app(self, app): """ Register this extension with the flask app :param app: A flask application """ # Save this so we can use it later in the extension if not hasattr(app, 'extensions'): # pragma: no cover app.extensions = {} app.extensions['flask-jwt-simple'] = self # Set all the default configurations for this extension self._set_default_configuration_options(app) self._set_error_handler_callbacks(app) # Set propagate exceptions, so all of our error handlers properly # work in production app.config['PROPAGATE_EXCEPTIONS'] = True
def function[init_app, parameter[self, app]]: constant[ Register this extension with the flask app :param app: A flask application ] if <ast.UnaryOp object at 0x7da1b1037d00> begin[:] name[app].extensions assign[=] dictionary[[], []] call[name[app].extensions][constant[flask-jwt-simple]] assign[=] name[self] call[name[self]._set_default_configuration_options, parameter[name[app]]] call[name[self]._set_error_handler_callbacks, parameter[name[app]]] call[name[app].config][constant[PROPAGATE_EXCEPTIONS]] assign[=] constant[True]
keyword[def] identifier[init_app] ( identifier[self] , identifier[app] ): literal[string] keyword[if] keyword[not] identifier[hasattr] ( identifier[app] , literal[string] ): identifier[app] . identifier[extensions] ={} identifier[app] . identifier[extensions] [ literal[string] ]= identifier[self] identifier[self] . identifier[_set_default_configuration_options] ( identifier[app] ) identifier[self] . identifier[_set_error_handler_callbacks] ( identifier[app] ) identifier[app] . identifier[config] [ literal[string] ]= keyword[True]
def init_app(self, app): """ Register this extension with the flask app :param app: A flask application """ # Save this so we can use it later in the extension if not hasattr(app, 'extensions'): # pragma: no cover app.extensions = {} # depends on [control=['if'], data=[]] app.extensions['flask-jwt-simple'] = self # Set all the default configurations for this extension self._set_default_configuration_options(app) self._set_error_handler_callbacks(app) # Set propagate exceptions, so all of our error handlers properly # work in production app.config['PROPAGATE_EXCEPTIONS'] = True
def export_ply(filename, cutout, level=0): """ Converts a dense annotation to a .PLY, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success """ if ".ply" not in filename: filename = filename + ".ply" vs, fs = mcubes.marching_cubes(cutout, level) with open(filename, 'w') as fh: lines = [ "ply" "format ascii 1.0", "comment generated by ndio", "element vertex " + str(len(vs)), "property float32 x", "property float32 y", "property float32 z", "element face " + str(len(fs)), "property list uint8 int32 vertex_index", "end_header" ] fh.writelines(lines) for v in vs: fh.write("{} {} {}".format(v[0], v[1], v[2])) for f in fs: fh.write("3 {} {} {}".format(f[0], f[1], f[2]))
def function[export_ply, parameter[filename, cutout, level]]: constant[ Converts a dense annotation to a .PLY, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success ] if compare[constant[.ply] <ast.NotIn object at 0x7da2590d7190> name[filename]] begin[:] variable[filename] assign[=] binary_operation[name[filename] + constant[.ply]] <ast.Tuple object at 0x7da1b02b86a0> assign[=] call[name[mcubes].marching_cubes, parameter[name[cutout], name[level]]] with call[name[open], parameter[name[filename], constant[w]]] begin[:] variable[lines] assign[=] list[[<ast.Constant object at 0x7da1b02b8a30>, <ast.Constant object at 0x7da1b0241000>, <ast.BinOp object at 0x7da1b0243430>, <ast.Constant object at 0x7da1b02420b0>, <ast.Constant object at 0x7da1b0243a30>, <ast.Constant object at 0x7da1b0242710>, <ast.BinOp object at 0x7da1b0241090>, <ast.Constant object at 0x7da1b0243760>, <ast.Constant object at 0x7da1b0243550>]] call[name[fh].writelines, parameter[name[lines]]] for taget[name[v]] in starred[name[vs]] begin[:] call[name[fh].write, parameter[call[constant[{} {} {}].format, parameter[call[name[v]][constant[0]], call[name[v]][constant[1]], call[name[v]][constant[2]]]]]] for taget[name[f]] in starred[name[fs]] begin[:] call[name[fh].write, parameter[call[constant[3 {} {} {}].format, parameter[call[name[f]][constant[0]], call[name[f]][constant[1]], call[name[f]][constant[2]]]]]]
keyword[def] identifier[export_ply] ( identifier[filename] , identifier[cutout] , identifier[level] = literal[int] ): literal[string] keyword[if] literal[string] keyword[not] keyword[in] identifier[filename] : identifier[filename] = identifier[filename] + literal[string] identifier[vs] , identifier[fs] = identifier[mcubes] . identifier[marching_cubes] ( identifier[cutout] , identifier[level] ) keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[fh] : identifier[lines] =[ literal[string] literal[string] , literal[string] , literal[string] + identifier[str] ( identifier[len] ( identifier[vs] )), literal[string] , literal[string] , literal[string] , literal[string] + identifier[str] ( identifier[len] ( identifier[fs] )), literal[string] , literal[string] ] identifier[fh] . identifier[writelines] ( identifier[lines] ) keyword[for] identifier[v] keyword[in] identifier[vs] : identifier[fh] . identifier[write] ( literal[string] . identifier[format] ( identifier[v] [ literal[int] ], identifier[v] [ literal[int] ], identifier[v] [ literal[int] ])) keyword[for] identifier[f] keyword[in] identifier[fs] : identifier[fh] . identifier[write] ( literal[string] . identifier[format] ( identifier[f] [ literal[int] ], identifier[f] [ literal[int] ], identifier[f] [ literal[int] ]))
def export_ply(filename, cutout, level=0): """ Converts a dense annotation to a .PLY, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success """ if '.ply' not in filename: filename = filename + '.ply' # depends on [control=['if'], data=['filename']] (vs, fs) = mcubes.marching_cubes(cutout, level) with open(filename, 'w') as fh: lines = ['plyformat ascii 1.0', 'comment generated by ndio', 'element vertex ' + str(len(vs)), 'property float32 x', 'property float32 y', 'property float32 z', 'element face ' + str(len(fs)), 'property list uint8 int32 vertex_index', 'end_header'] fh.writelines(lines) for v in vs: fh.write('{} {} {}'.format(v[0], v[1], v[2])) # depends on [control=['for'], data=['v']] for f in fs: fh.write('3 {} {} {}'.format(f[0], f[1], f[2])) # depends on [control=['for'], data=['f']] # depends on [control=['with'], data=['fh']]
def located_error( original_error: Union[Exception, GraphQLError], nodes: Sequence["Node"], path: Sequence[Union[str, int]], ) -> GraphQLError: """Located GraphQL Error Given an arbitrary Error, presumably thrown while attempting to execute a GraphQL operation, produce a new GraphQLError aware of the location in the document responsible for the original Error. """ if original_error: # Note: this uses a brand-check to support GraphQL errors originating from # other contexts. try: if isinstance(original_error.path, list): # type: ignore return original_error # type: ignore except AttributeError: pass try: message = original_error.message # type: ignore except AttributeError: message = str(original_error) try: source = original_error.source # type: ignore except AttributeError: source = None try: positions = original_error.positions # type: ignore except AttributeError: positions = None try: nodes = original_error.nodes or nodes # type: ignore except AttributeError: pass return GraphQLError(message, nodes, source, positions, path, original_error)
def function[located_error, parameter[original_error, nodes, path]]: constant[Located GraphQL Error Given an arbitrary Error, presumably thrown while attempting to execute a GraphQL operation, produce a new GraphQLError aware of the location in the document responsible for the original Error. ] if name[original_error] begin[:] <ast.Try object at 0x7da1b1da0c10> <ast.Try object at 0x7da1b1d0ec20> <ast.Try object at 0x7da1b1d0eda0> <ast.Try object at 0x7da1b1d0d450> <ast.Try object at 0x7da1b1d0c340> return[call[name[GraphQLError], parameter[name[message], name[nodes], name[source], name[positions], name[path], name[original_error]]]]
keyword[def] identifier[located_error] ( identifier[original_error] : identifier[Union] [ identifier[Exception] , identifier[GraphQLError] ], identifier[nodes] : identifier[Sequence] [ literal[string] ], identifier[path] : identifier[Sequence] [ identifier[Union] [ identifier[str] , identifier[int] ]], )-> identifier[GraphQLError] : literal[string] keyword[if] identifier[original_error] : keyword[try] : keyword[if] identifier[isinstance] ( identifier[original_error] . identifier[path] , identifier[list] ): keyword[return] identifier[original_error] keyword[except] identifier[AttributeError] : keyword[pass] keyword[try] : identifier[message] = identifier[original_error] . identifier[message] keyword[except] identifier[AttributeError] : identifier[message] = identifier[str] ( identifier[original_error] ) keyword[try] : identifier[source] = identifier[original_error] . identifier[source] keyword[except] identifier[AttributeError] : identifier[source] = keyword[None] keyword[try] : identifier[positions] = identifier[original_error] . identifier[positions] keyword[except] identifier[AttributeError] : identifier[positions] = keyword[None] keyword[try] : identifier[nodes] = identifier[original_error] . identifier[nodes] keyword[or] identifier[nodes] keyword[except] identifier[AttributeError] : keyword[pass] keyword[return] identifier[GraphQLError] ( identifier[message] , identifier[nodes] , identifier[source] , identifier[positions] , identifier[path] , identifier[original_error] )
def located_error(original_error: Union[Exception, GraphQLError], nodes: Sequence['Node'], path: Sequence[Union[str, int]]) -> GraphQLError: """Located GraphQL Error Given an arbitrary Error, presumably thrown while attempting to execute a GraphQL operation, produce a new GraphQLError aware of the location in the document responsible for the original Error. """ if original_error: # Note: this uses a brand-check to support GraphQL errors originating from # other contexts. try: if isinstance(original_error.path, list): # type: ignore return original_error # type: ignore # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except AttributeError: pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] try: message = original_error.message # type: ignore # depends on [control=['try'], data=[]] except AttributeError: message = str(original_error) # depends on [control=['except'], data=[]] try: source = original_error.source # type: ignore # depends on [control=['try'], data=[]] except AttributeError: source = None # depends on [control=['except'], data=[]] try: positions = original_error.positions # type: ignore # depends on [control=['try'], data=[]] except AttributeError: positions = None # depends on [control=['except'], data=[]] try: nodes = original_error.nodes or nodes # type: ignore # depends on [control=['try'], data=[]] except AttributeError: pass # depends on [control=['except'], data=[]] return GraphQLError(message, nodes, source, positions, path, original_error)
def som_train(som_pointer, data, epochs, autostop): """! @brief Trains self-organized feature map (SOM) using CCORE pyclustering library. @param[in] data (list): Input data - list of points where each point is represented by list of features, for example coordinates. @param[in] epochs (uint): Number of epochs for training. @param[in] autostop (bool): Automatic termination of learining process when adaptation is not occurred. @return (uint) Number of learining iterations. """ pointer_data = package_builder(data, c_double).create() ccore = ccore_library.get() ccore.som_train.restype = c_size_t return ccore.som_train(som_pointer, pointer_data, c_uint(epochs), autostop)
def function[som_train, parameter[som_pointer, data, epochs, autostop]]: constant[! @brief Trains self-organized feature map (SOM) using CCORE pyclustering library. @param[in] data (list): Input data - list of points where each point is represented by list of features, for example coordinates. @param[in] epochs (uint): Number of epochs for training. @param[in] autostop (bool): Automatic termination of learining process when adaptation is not occurred. @return (uint) Number of learining iterations. ] variable[pointer_data] assign[=] call[call[name[package_builder], parameter[name[data], name[c_double]]].create, parameter[]] variable[ccore] assign[=] call[name[ccore_library].get, parameter[]] name[ccore].som_train.restype assign[=] name[c_size_t] return[call[name[ccore].som_train, parameter[name[som_pointer], name[pointer_data], call[name[c_uint], parameter[name[epochs]]], name[autostop]]]]
keyword[def] identifier[som_train] ( identifier[som_pointer] , identifier[data] , identifier[epochs] , identifier[autostop] ): literal[string] identifier[pointer_data] = identifier[package_builder] ( identifier[data] , identifier[c_double] ). identifier[create] () identifier[ccore] = identifier[ccore_library] . identifier[get] () identifier[ccore] . identifier[som_train] . identifier[restype] = identifier[c_size_t] keyword[return] identifier[ccore] . identifier[som_train] ( identifier[som_pointer] , identifier[pointer_data] , identifier[c_uint] ( identifier[epochs] ), identifier[autostop] )
def som_train(som_pointer, data, epochs, autostop): """! @brief Trains self-organized feature map (SOM) using CCORE pyclustering library. @param[in] data (list): Input data - list of points where each point is represented by list of features, for example coordinates. @param[in] epochs (uint): Number of epochs for training. @param[in] autostop (bool): Automatic termination of learining process when adaptation is not occurred. @return (uint) Number of learining iterations. """ pointer_data = package_builder(data, c_double).create() ccore = ccore_library.get() ccore.som_train.restype = c_size_t return ccore.som_train(som_pointer, pointer_data, c_uint(epochs), autostop)
def get_energy_management_properties(self): """ Return the energy management properties of the CPC. The returned energy management properties are a subset of the properties of the CPC resource, and are also available as normal properties of the CPC resource. In so far, there is no new data provided by this method. However, because only a subset of the properties is returned, this method is faster than retrieving the complete set of CPC properties (e.g. via :meth:`~zhmcclient.BaseResource.pull_full_properties`). This method performs the HMC operation "Get CPC Energy Management Data", and returns only the energy management properties for this CPC from the operation result. Note that in non-ensemble mode of a CPC, the HMC operation result will only contain data for the CPC alone. It requires that the feature "Automate/advanced management suite" (FC 0020) is installed and enabled, and returns empty values for most properties, otherwise. Authorization requirements: * Object-access permission to this CPC. Returns: dict: A dictionary of properties of the CPC that are related to energy management. For details, see section "Energy management related additional properties" in the data model for the CPC resource in the :term:`HMC API` book. Raises: :exc:`~zhmcclient.HTTPError`: See the HTTP status and reason codes of operation "Get CPC Energy Management Data" in the :term:`HMC API` book. :exc:`~zhmcclient.ParseError`: Also raised by this method when the JSON response could be parsed but contains inconsistent data. :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ result = self.manager.session.get(self.uri + '/energy-management-data') em_list = result['objects'] if len(em_list) != 1: uris = [em_obj['object-uri'] for em_obj in em_list] raise ParseError("Energy management data returned for no resource " "or for more than one resource: %r" % uris) em_cpc_obj = em_list[0] if em_cpc_obj['object-uri'] != self.uri: raise ParseError("Energy management data returned for an " "unexpected resource: %r" % em_cpc_obj['object-uri']) if em_cpc_obj['error-occurred']: raise ParseError("Errors occurred when retrieving energy " "management data for CPC. Operation result: %r" % result) cpc_props = em_cpc_obj['properties'] return cpc_props
def function[get_energy_management_properties, parameter[self]]: constant[ Return the energy management properties of the CPC. The returned energy management properties are a subset of the properties of the CPC resource, and are also available as normal properties of the CPC resource. In so far, there is no new data provided by this method. However, because only a subset of the properties is returned, this method is faster than retrieving the complete set of CPC properties (e.g. via :meth:`~zhmcclient.BaseResource.pull_full_properties`). This method performs the HMC operation "Get CPC Energy Management Data", and returns only the energy management properties for this CPC from the operation result. Note that in non-ensemble mode of a CPC, the HMC operation result will only contain data for the CPC alone. It requires that the feature "Automate/advanced management suite" (FC 0020) is installed and enabled, and returns empty values for most properties, otherwise. Authorization requirements: * Object-access permission to this CPC. Returns: dict: A dictionary of properties of the CPC that are related to energy management. For details, see section "Energy management related additional properties" in the data model for the CPC resource in the :term:`HMC API` book. Raises: :exc:`~zhmcclient.HTTPError`: See the HTTP status and reason codes of operation "Get CPC Energy Management Data" in the :term:`HMC API` book. :exc:`~zhmcclient.ParseError`: Also raised by this method when the JSON response could be parsed but contains inconsistent data. :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` ] variable[result] assign[=] call[name[self].manager.session.get, parameter[binary_operation[name[self].uri + constant[/energy-management-data]]]] variable[em_list] assign[=] call[name[result]][constant[objects]] if compare[call[name[len], parameter[name[em_list]]] not_equal[!=] constant[1]] begin[:] variable[uris] assign[=] <ast.ListComp object at 0x7da20c991750> <ast.Raise object at 0x7da20c993700> variable[em_cpc_obj] assign[=] call[name[em_list]][constant[0]] if compare[call[name[em_cpc_obj]][constant[object-uri]] not_equal[!=] name[self].uri] begin[:] <ast.Raise object at 0x7da20c990310> if call[name[em_cpc_obj]][constant[error-occurred]] begin[:] <ast.Raise object at 0x7da2041d8fd0> variable[cpc_props] assign[=] call[name[em_cpc_obj]][constant[properties]] return[name[cpc_props]]
keyword[def] identifier[get_energy_management_properties] ( identifier[self] ): literal[string] identifier[result] = identifier[self] . identifier[manager] . identifier[session] . identifier[get] ( identifier[self] . identifier[uri] + literal[string] ) identifier[em_list] = identifier[result] [ literal[string] ] keyword[if] identifier[len] ( identifier[em_list] )!= literal[int] : identifier[uris] =[ identifier[em_obj] [ literal[string] ] keyword[for] identifier[em_obj] keyword[in] identifier[em_list] ] keyword[raise] identifier[ParseError] ( literal[string] literal[string] % identifier[uris] ) identifier[em_cpc_obj] = identifier[em_list] [ literal[int] ] keyword[if] identifier[em_cpc_obj] [ literal[string] ]!= identifier[self] . identifier[uri] : keyword[raise] identifier[ParseError] ( literal[string] literal[string] % identifier[em_cpc_obj] [ literal[string] ]) keyword[if] identifier[em_cpc_obj] [ literal[string] ]: keyword[raise] identifier[ParseError] ( literal[string] literal[string] % identifier[result] ) identifier[cpc_props] = identifier[em_cpc_obj] [ literal[string] ] keyword[return] identifier[cpc_props]
def get_energy_management_properties(self): """ Return the energy management properties of the CPC. The returned energy management properties are a subset of the properties of the CPC resource, and are also available as normal properties of the CPC resource. In so far, there is no new data provided by this method. However, because only a subset of the properties is returned, this method is faster than retrieving the complete set of CPC properties (e.g. via :meth:`~zhmcclient.BaseResource.pull_full_properties`). This method performs the HMC operation "Get CPC Energy Management Data", and returns only the energy management properties for this CPC from the operation result. Note that in non-ensemble mode of a CPC, the HMC operation result will only contain data for the CPC alone. It requires that the feature "Automate/advanced management suite" (FC 0020) is installed and enabled, and returns empty values for most properties, otherwise. Authorization requirements: * Object-access permission to this CPC. Returns: dict: A dictionary of properties of the CPC that are related to energy management. For details, see section "Energy management related additional properties" in the data model for the CPC resource in the :term:`HMC API` book. Raises: :exc:`~zhmcclient.HTTPError`: See the HTTP status and reason codes of operation "Get CPC Energy Management Data" in the :term:`HMC API` book. :exc:`~zhmcclient.ParseError`: Also raised by this method when the JSON response could be parsed but contains inconsistent data. :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ result = self.manager.session.get(self.uri + '/energy-management-data') em_list = result['objects'] if len(em_list) != 1: uris = [em_obj['object-uri'] for em_obj in em_list] raise ParseError('Energy management data returned for no resource or for more than one resource: %r' % uris) # depends on [control=['if'], data=[]] em_cpc_obj = em_list[0] if em_cpc_obj['object-uri'] != self.uri: raise ParseError('Energy management data returned for an unexpected resource: %r' % em_cpc_obj['object-uri']) # depends on [control=['if'], data=[]] if em_cpc_obj['error-occurred']: raise ParseError('Errors occurred when retrieving energy management data for CPC. Operation result: %r' % result) # depends on [control=['if'], data=[]] cpc_props = em_cpc_obj['properties'] return cpc_props
def _get_connection(self, conn_or_int_id): """Get the data for a connection by either conn_id or internal_id Args: conn_or_int_id (int, string): The external integer connection id or and internal string connection id Returns: dict: The context data associated with that connection or None if it cannot be found. Raises: ArgumentError: When the key is not found in the list of active connections or is invalid. """ key = conn_or_int_id if isinstance(key, str): table = self._int_connections elif isinstance(key, int): table = self._connections else: return None try: data = table[key] except KeyError: return None return data
def function[_get_connection, parameter[self, conn_or_int_id]]: constant[Get the data for a connection by either conn_id or internal_id Args: conn_or_int_id (int, string): The external integer connection id or and internal string connection id Returns: dict: The context data associated with that connection or None if it cannot be found. Raises: ArgumentError: When the key is not found in the list of active connections or is invalid. ] variable[key] assign[=] name[conn_or_int_id] if call[name[isinstance], parameter[name[key], name[str]]] begin[:] variable[table] assign[=] name[self]._int_connections <ast.Try object at 0x7da18f811ea0> return[name[data]]
keyword[def] identifier[_get_connection] ( identifier[self] , identifier[conn_or_int_id] ): literal[string] identifier[key] = identifier[conn_or_int_id] keyword[if] identifier[isinstance] ( identifier[key] , identifier[str] ): identifier[table] = identifier[self] . identifier[_int_connections] keyword[elif] identifier[isinstance] ( identifier[key] , identifier[int] ): identifier[table] = identifier[self] . identifier[_connections] keyword[else] : keyword[return] keyword[None] keyword[try] : identifier[data] = identifier[table] [ identifier[key] ] keyword[except] identifier[KeyError] : keyword[return] keyword[None] keyword[return] identifier[data]
def _get_connection(self, conn_or_int_id): """Get the data for a connection by either conn_id or internal_id Args: conn_or_int_id (int, string): The external integer connection id or and internal string connection id Returns: dict: The context data associated with that connection or None if it cannot be found. Raises: ArgumentError: When the key is not found in the list of active connections or is invalid. """ key = conn_or_int_id if isinstance(key, str): table = self._int_connections # depends on [control=['if'], data=[]] elif isinstance(key, int): table = self._connections # depends on [control=['if'], data=[]] else: return None try: data = table[key] # depends on [control=['try'], data=[]] except KeyError: return None # depends on [control=['except'], data=[]] return data
def status(self): """ Check if the daemon is currently running. Requires procfs, so it will only work on POSIX compliant OS'. """ # Get the pid from the pidfile try: pf = file(self.pidfile,'r') pid = int(pf.read().strip()) pf.close() except IOError: pid = None if not pid: return False try: return os.path.exists("/proc/{0}".format(pid)) except OSError: return False
def function[status, parameter[self]]: constant[ Check if the daemon is currently running. Requires procfs, so it will only work on POSIX compliant OS'. ] <ast.Try object at 0x7da1b16d7880> if <ast.UnaryOp object at 0x7da1b16d7190> begin[:] return[constant[False]] <ast.Try object at 0x7da1b16d4280>
keyword[def] identifier[status] ( identifier[self] ): literal[string] keyword[try] : identifier[pf] = identifier[file] ( identifier[self] . identifier[pidfile] , literal[string] ) identifier[pid] = identifier[int] ( identifier[pf] . identifier[read] (). identifier[strip] ()) identifier[pf] . identifier[close] () keyword[except] identifier[IOError] : identifier[pid] = keyword[None] keyword[if] keyword[not] identifier[pid] : keyword[return] keyword[False] keyword[try] : keyword[return] identifier[os] . identifier[path] . identifier[exists] ( literal[string] . identifier[format] ( identifier[pid] )) keyword[except] identifier[OSError] : keyword[return] keyword[False]
def status(self): """ Check if the daemon is currently running. Requires procfs, so it will only work on POSIX compliant OS'. """ # Get the pid from the pidfile try: pf = file(self.pidfile, 'r') pid = int(pf.read().strip()) pf.close() # depends on [control=['try'], data=[]] except IOError: pid = None # depends on [control=['except'], data=[]] if not pid: return False # depends on [control=['if'], data=[]] try: return os.path.exists('/proc/{0}'.format(pid)) # depends on [control=['try'], data=[]] except OSError: return False # depends on [control=['except'], data=[]]
def get_grouped_translations(instances, **kwargs): """ Takes instances and returns grouped translations ready to be set in cache. """ grouped_translations = collections.defaultdict(list) if not instances: return grouped_translations if not isinstance(instances, collections.Iterable): instances = [instances] if isinstance(instances, QuerySet): model = instances.model else: model = instances[0]._meta.model instances_ids = [] for instance in instances: instances_ids.append(instance.pk) if instance._meta.model != model: raise Exception( "You cannot use different model instances, only one authorized." ) from .models import Translation from .mixins import ModelMixin decider = model._meta.linguist.get("decider", Translation) identifier = model._meta.linguist.get("identifier", None) chunks_length = kwargs.get("chunks_length", None) populate_missing = kwargs.get("populate_missing", True) if identifier is None: raise Exception('You must define Linguist "identifier" meta option') lookup = dict(identifier=identifier) for kwarg in ("field_names", "languages"): value = kwargs.get(kwarg, None) if value is not None: if not isinstance(value, (list, tuple)): value = [value] lookup["%s__in" % kwarg[:-1]] = value if chunks_length is not None: translations_qs = [] for ids in utils.chunks(instances_ids, chunks_length): ids_lookup = copy.copy(lookup) ids_lookup["object_id__in"] = ids translations_qs.append(decider.objects.filter(**ids_lookup)) translations = itertools.chain.from_iterable(translations_qs) else: lookup["object_id__in"] = instances_ids translations = decider.objects.filter(**lookup) for translation in translations: grouped_translations[translation.object_id].append(translation) return grouped_translations
def function[get_grouped_translations, parameter[instances]]: constant[ Takes instances and returns grouped translations ready to be set in cache. ] variable[grouped_translations] assign[=] call[name[collections].defaultdict, parameter[name[list]]] if <ast.UnaryOp object at 0x7da18c4cc7c0> begin[:] return[name[grouped_translations]] if <ast.UnaryOp object at 0x7da18c4ce680> begin[:] variable[instances] assign[=] list[[<ast.Name object at 0x7da18fe90280>]] if call[name[isinstance], parameter[name[instances], name[QuerySet]]] begin[:] variable[model] assign[=] name[instances].model variable[instances_ids] assign[=] list[[]] for taget[name[instance]] in starred[name[instances]] begin[:] call[name[instances_ids].append, parameter[name[instance].pk]] if compare[name[instance]._meta.model not_equal[!=] name[model]] begin[:] <ast.Raise object at 0x7da1b2847b80> from relative_module[models] import module[Translation] from relative_module[mixins] import module[ModelMixin] variable[decider] assign[=] call[name[model]._meta.linguist.get, parameter[constant[decider], name[Translation]]] variable[identifier] assign[=] call[name[model]._meta.linguist.get, parameter[constant[identifier], constant[None]]] variable[chunks_length] assign[=] call[name[kwargs].get, parameter[constant[chunks_length], constant[None]]] variable[populate_missing] assign[=] call[name[kwargs].get, parameter[constant[populate_missing], constant[True]]] if compare[name[identifier] is constant[None]] begin[:] <ast.Raise object at 0x7da18fe92950> variable[lookup] assign[=] call[name[dict], parameter[]] for taget[name[kwarg]] in starred[tuple[[<ast.Constant object at 0x7da18fe93490>, <ast.Constant object at 0x7da18fe91390>]]] begin[:] variable[value] assign[=] call[name[kwargs].get, parameter[name[kwarg], constant[None]]] if compare[name[value] is_not constant[None]] begin[:] if <ast.UnaryOp object at 0x7da18fe903a0> begin[:] variable[value] assign[=] list[[<ast.Name object at 0x7da18fe90be0>]] call[name[lookup]][binary_operation[constant[%s__in] <ast.Mod object at 0x7da2590d6920> call[name[kwarg]][<ast.Slice object at 0x7da18fe91750>]]] assign[=] name[value] if compare[name[chunks_length] is_not constant[None]] begin[:] variable[translations_qs] assign[=] list[[]] for taget[name[ids]] in starred[call[name[utils].chunks, parameter[name[instances_ids], name[chunks_length]]]] begin[:] variable[ids_lookup] assign[=] call[name[copy].copy, parameter[name[lookup]]] call[name[ids_lookup]][constant[object_id__in]] assign[=] name[ids] call[name[translations_qs].append, parameter[call[name[decider].objects.filter, parameter[]]]] variable[translations] assign[=] call[name[itertools].chain.from_iterable, parameter[name[translations_qs]]] for taget[name[translation]] in starred[name[translations]] begin[:] call[call[name[grouped_translations]][name[translation].object_id].append, parameter[name[translation]]] return[name[grouped_translations]]
keyword[def] identifier[get_grouped_translations] ( identifier[instances] ,** identifier[kwargs] ): literal[string] identifier[grouped_translations] = identifier[collections] . identifier[defaultdict] ( identifier[list] ) keyword[if] keyword[not] identifier[instances] : keyword[return] identifier[grouped_translations] keyword[if] keyword[not] identifier[isinstance] ( identifier[instances] , identifier[collections] . identifier[Iterable] ): identifier[instances] =[ identifier[instances] ] keyword[if] identifier[isinstance] ( identifier[instances] , identifier[QuerySet] ): identifier[model] = identifier[instances] . identifier[model] keyword[else] : identifier[model] = identifier[instances] [ literal[int] ]. identifier[_meta] . identifier[model] identifier[instances_ids] =[] keyword[for] identifier[instance] keyword[in] identifier[instances] : identifier[instances_ids] . identifier[append] ( identifier[instance] . identifier[pk] ) keyword[if] identifier[instance] . identifier[_meta] . identifier[model] != identifier[model] : keyword[raise] identifier[Exception] ( literal[string] ) keyword[from] . identifier[models] keyword[import] identifier[Translation] keyword[from] . identifier[mixins] keyword[import] identifier[ModelMixin] identifier[decider] = identifier[model] . identifier[_meta] . identifier[linguist] . identifier[get] ( literal[string] , identifier[Translation] ) identifier[identifier] = identifier[model] . identifier[_meta] . identifier[linguist] . identifier[get] ( literal[string] , keyword[None] ) identifier[chunks_length] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] ) identifier[populate_missing] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[True] ) keyword[if] identifier[identifier] keyword[is] keyword[None] : keyword[raise] identifier[Exception] ( literal[string] ) identifier[lookup] = identifier[dict] ( identifier[identifier] = identifier[identifier] ) keyword[for] identifier[kwarg] keyword[in] ( literal[string] , literal[string] ): identifier[value] = identifier[kwargs] . identifier[get] ( identifier[kwarg] , keyword[None] ) keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] : keyword[if] keyword[not] identifier[isinstance] ( identifier[value] ,( identifier[list] , identifier[tuple] )): identifier[value] =[ identifier[value] ] identifier[lookup] [ literal[string] % identifier[kwarg] [:- literal[int] ]]= identifier[value] keyword[if] identifier[chunks_length] keyword[is] keyword[not] keyword[None] : identifier[translations_qs] =[] keyword[for] identifier[ids] keyword[in] identifier[utils] . identifier[chunks] ( identifier[instances_ids] , identifier[chunks_length] ): identifier[ids_lookup] = identifier[copy] . identifier[copy] ( identifier[lookup] ) identifier[ids_lookup] [ literal[string] ]= identifier[ids] identifier[translations_qs] . identifier[append] ( identifier[decider] . identifier[objects] . identifier[filter] (** identifier[ids_lookup] )) identifier[translations] = identifier[itertools] . identifier[chain] . identifier[from_iterable] ( identifier[translations_qs] ) keyword[else] : identifier[lookup] [ literal[string] ]= identifier[instances_ids] identifier[translations] = identifier[decider] . identifier[objects] . identifier[filter] (** identifier[lookup] ) keyword[for] identifier[translation] keyword[in] identifier[translations] : identifier[grouped_translations] [ identifier[translation] . identifier[object_id] ]. identifier[append] ( identifier[translation] ) keyword[return] identifier[grouped_translations]
def get_grouped_translations(instances, **kwargs): """ Takes instances and returns grouped translations ready to be set in cache. """ grouped_translations = collections.defaultdict(list) if not instances: return grouped_translations # depends on [control=['if'], data=[]] if not isinstance(instances, collections.Iterable): instances = [instances] # depends on [control=['if'], data=[]] if isinstance(instances, QuerySet): model = instances.model # depends on [control=['if'], data=[]] else: model = instances[0]._meta.model instances_ids = [] for instance in instances: instances_ids.append(instance.pk) if instance._meta.model != model: raise Exception('You cannot use different model instances, only one authorized.') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['instance']] from .models import Translation from .mixins import ModelMixin decider = model._meta.linguist.get('decider', Translation) identifier = model._meta.linguist.get('identifier', None) chunks_length = kwargs.get('chunks_length', None) populate_missing = kwargs.get('populate_missing', True) if identifier is None: raise Exception('You must define Linguist "identifier" meta option') # depends on [control=['if'], data=[]] lookup = dict(identifier=identifier) for kwarg in ('field_names', 'languages'): value = kwargs.get(kwarg, None) if value is not None: if not isinstance(value, (list, tuple)): value = [value] # depends on [control=['if'], data=[]] lookup['%s__in' % kwarg[:-1]] = value # depends on [control=['if'], data=['value']] # depends on [control=['for'], data=['kwarg']] if chunks_length is not None: translations_qs = [] for ids in utils.chunks(instances_ids, chunks_length): ids_lookup = copy.copy(lookup) ids_lookup['object_id__in'] = ids translations_qs.append(decider.objects.filter(**ids_lookup)) # depends on [control=['for'], data=['ids']] translations = itertools.chain.from_iterable(translations_qs) # depends on [control=['if'], data=['chunks_length']] else: lookup['object_id__in'] = instances_ids translations = decider.objects.filter(**lookup) for translation in translations: grouped_translations[translation.object_id].append(translation) # depends on [control=['for'], data=['translation']] return grouped_translations
def __emulate_buttons(self, changed_buttons, timeval=None): """Make the button events use the Linux style format.""" events = [] for button in changed_buttons: code, value, ev_type = self.__map_button(button) event = self.create_event_object( ev_type, code, value, timeval=timeval) events.append(event) return events
def function[__emulate_buttons, parameter[self, changed_buttons, timeval]]: constant[Make the button events use the Linux style format.] variable[events] assign[=] list[[]] for taget[name[button]] in starred[name[changed_buttons]] begin[:] <ast.Tuple object at 0x7da1b084fb20> assign[=] call[name[self].__map_button, parameter[name[button]]] variable[event] assign[=] call[name[self].create_event_object, parameter[name[ev_type], name[code], name[value]]] call[name[events].append, parameter[name[event]]] return[name[events]]
keyword[def] identifier[__emulate_buttons] ( identifier[self] , identifier[changed_buttons] , identifier[timeval] = keyword[None] ): literal[string] identifier[events] =[] keyword[for] identifier[button] keyword[in] identifier[changed_buttons] : identifier[code] , identifier[value] , identifier[ev_type] = identifier[self] . identifier[__map_button] ( identifier[button] ) identifier[event] = identifier[self] . identifier[create_event_object] ( identifier[ev_type] , identifier[code] , identifier[value] , identifier[timeval] = identifier[timeval] ) identifier[events] . identifier[append] ( identifier[event] ) keyword[return] identifier[events]
def __emulate_buttons(self, changed_buttons, timeval=None): """Make the button events use the Linux style format.""" events = [] for button in changed_buttons: (code, value, ev_type) = self.__map_button(button) event = self.create_event_object(ev_type, code, value, timeval=timeval) events.append(event) # depends on [control=['for'], data=['button']] return events
def calculate_a(self): """ Calculate the client's public value A = g^a%N with the generated random number a :param {Long integer} a Randomly generated small A. :return {Long integer} Computed large A. """ big_a = pow(self.g, self.small_a_value, self.big_n) # safety check if (big_a % self.big_n) == 0: raise ValueError('Safety check for A failed') return big_a
def function[calculate_a, parameter[self]]: constant[ Calculate the client's public value A = g^a%N with the generated random number a :param {Long integer} a Randomly generated small A. :return {Long integer} Computed large A. ] variable[big_a] assign[=] call[name[pow], parameter[name[self].g, name[self].small_a_value, name[self].big_n]] if compare[binary_operation[name[big_a] <ast.Mod object at 0x7da2590d6920> name[self].big_n] equal[==] constant[0]] begin[:] <ast.Raise object at 0x7da1b1d3aec0> return[name[big_a]]
keyword[def] identifier[calculate_a] ( identifier[self] ): literal[string] identifier[big_a] = identifier[pow] ( identifier[self] . identifier[g] , identifier[self] . identifier[small_a_value] , identifier[self] . identifier[big_n] ) keyword[if] ( identifier[big_a] % identifier[self] . identifier[big_n] )== literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[return] identifier[big_a]
def calculate_a(self): """ Calculate the client's public value A = g^a%N with the generated random number a :param {Long integer} a Randomly generated small A. :return {Long integer} Computed large A. """ big_a = pow(self.g, self.small_a_value, self.big_n) # safety check if big_a % self.big_n == 0: raise ValueError('Safety check for A failed') # depends on [control=['if'], data=[]] return big_a
def writeto(fpath, to_write, aslines=False, verbose=None): r""" Writes (utf8) text to a file. Args: fpath (PathLike): file path to_write (str): text to write (must be unicode text) aslines (bool): if True to_write is assumed to be a list of lines verbose (bool): verbosity flag CommandLine: python -m ubelt.util_io writeto --verbose Example: >>> import ubelt as ub >>> dpath = ub.ensure_app_cache_dir('ubelt') >>> fpath = dpath + '/' + 'testwrite.txt' >>> if exists(fpath): >>> os.remove(fpath) >>> to_write = 'utf-8 symbols Δ, Й, ק, م, ๗, あ, 叶, 葉, and 말.' >>> writeto(fpath, to_write) >>> read_ = ub.readfrom(fpath) >>> print('read_ = ' + read_) >>> print('to_write = ' + to_write) >>> assert read_ == to_write Example: >>> import ubelt as ub >>> dpath = ub.ensure_app_cache_dir('ubelt') >>> fpath = dpath + '/' + 'testwrite2.txt' >>> if exists(fpath): >>> os.remove(fpath) >>> to_write = ['a\n', 'b\n', 'c\n', 'd\n'] >>> writeto(fpath, to_write, aslines=True) >>> read_ = ub.readfrom(fpath, aslines=True) >>> print('read_ = {}'.format(read_)) >>> print('to_write = {}'.format(to_write)) >>> assert read_ == to_write """ if verbose: print('Writing to text file: %r ' % (fpath,)) with open(fpath, 'wb') as file: if aslines: to_write = map(_ensure_bytes , to_write) file.writelines(to_write) else: # convert to bytes for writing bytes = _ensure_bytes(to_write) file.write(bytes)
def function[writeto, parameter[fpath, to_write, aslines, verbose]]: constant[ Writes (utf8) text to a file. Args: fpath (PathLike): file path to_write (str): text to write (must be unicode text) aslines (bool): if True to_write is assumed to be a list of lines verbose (bool): verbosity flag CommandLine: python -m ubelt.util_io writeto --verbose Example: >>> import ubelt as ub >>> dpath = ub.ensure_app_cache_dir('ubelt') >>> fpath = dpath + '/' + 'testwrite.txt' >>> if exists(fpath): >>> os.remove(fpath) >>> to_write = 'utf-8 symbols Δ, Й, ק, م, ๗, あ, 叶, 葉, and 말.' >>> writeto(fpath, to_write) >>> read_ = ub.readfrom(fpath) >>> print('read_ = ' + read_) >>> print('to_write = ' + to_write) >>> assert read_ == to_write Example: >>> import ubelt as ub >>> dpath = ub.ensure_app_cache_dir('ubelt') >>> fpath = dpath + '/' + 'testwrite2.txt' >>> if exists(fpath): >>> os.remove(fpath) >>> to_write = ['a\n', 'b\n', 'c\n', 'd\n'] >>> writeto(fpath, to_write, aslines=True) >>> read_ = ub.readfrom(fpath, aslines=True) >>> print('read_ = {}'.format(read_)) >>> print('to_write = {}'.format(to_write)) >>> assert read_ == to_write ] if name[verbose] begin[:] call[name[print], parameter[binary_operation[constant[Writing to text file: %r ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c7945e0>]]]]] with call[name[open], parameter[name[fpath], constant[wb]]] begin[:] if name[aslines] begin[:] variable[to_write] assign[=] call[name[map], parameter[name[_ensure_bytes], name[to_write]]] call[name[file].writelines, parameter[name[to_write]]]
keyword[def] identifier[writeto] ( identifier[fpath] , identifier[to_write] , identifier[aslines] = keyword[False] , identifier[verbose] = keyword[None] ): literal[string] keyword[if] identifier[verbose] : identifier[print] ( literal[string] %( identifier[fpath] ,)) keyword[with] identifier[open] ( identifier[fpath] , literal[string] ) keyword[as] identifier[file] : keyword[if] identifier[aslines] : identifier[to_write] = identifier[map] ( identifier[_ensure_bytes] , identifier[to_write] ) identifier[file] . identifier[writelines] ( identifier[to_write] ) keyword[else] : identifier[bytes] = identifier[_ensure_bytes] ( identifier[to_write] ) identifier[file] . identifier[write] ( identifier[bytes] )
def writeto(fpath, to_write, aslines=False, verbose=None): """ Writes (utf8) text to a file. Args: fpath (PathLike): file path to_write (str): text to write (must be unicode text) aslines (bool): if True to_write is assumed to be a list of lines verbose (bool): verbosity flag CommandLine: python -m ubelt.util_io writeto --verbose Example: >>> import ubelt as ub >>> dpath = ub.ensure_app_cache_dir('ubelt') >>> fpath = dpath + '/' + 'testwrite.txt' >>> if exists(fpath): >>> os.remove(fpath) >>> to_write = 'utf-8 symbols Δ, Й, ק, م, ๗, あ, 叶, 葉, and 말.' >>> writeto(fpath, to_write) >>> read_ = ub.readfrom(fpath) >>> print('read_ = ' + read_) >>> print('to_write = ' + to_write) >>> assert read_ == to_write Example: >>> import ubelt as ub >>> dpath = ub.ensure_app_cache_dir('ubelt') >>> fpath = dpath + '/' + 'testwrite2.txt' >>> if exists(fpath): >>> os.remove(fpath) >>> to_write = ['a\\n', 'b\\n', 'c\\n', 'd\\n'] >>> writeto(fpath, to_write, aslines=True) >>> read_ = ub.readfrom(fpath, aslines=True) >>> print('read_ = {}'.format(read_)) >>> print('to_write = {}'.format(to_write)) >>> assert read_ == to_write """ if verbose: print('Writing to text file: %r ' % (fpath,)) # depends on [control=['if'], data=[]] with open(fpath, 'wb') as file: if aslines: to_write = map(_ensure_bytes, to_write) file.writelines(to_write) # depends on [control=['if'], data=[]] else: # convert to bytes for writing bytes = _ensure_bytes(to_write) file.write(bytes) # depends on [control=['with'], data=['file']]
def _create_file_if_needed(self): """Create an empty file if necessary. This method will not initialize the file. Instead it implements a simple version of "touch" to ensure the file has been created. """ if not os.path.exists(self._filename): old_umask = os.umask(0o177) try: open(self._filename, 'a+b').close() finally: os.umask(old_umask)
def function[_create_file_if_needed, parameter[self]]: constant[Create an empty file if necessary. This method will not initialize the file. Instead it implements a simple version of "touch" to ensure the file has been created. ] if <ast.UnaryOp object at 0x7da1b014d840> begin[:] variable[old_umask] assign[=] call[name[os].umask, parameter[constant[127]]] <ast.Try object at 0x7da1b014c100>
keyword[def] identifier[_create_file_if_needed] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[_filename] ): identifier[old_umask] = identifier[os] . identifier[umask] ( literal[int] ) keyword[try] : identifier[open] ( identifier[self] . identifier[_filename] , literal[string] ). identifier[close] () keyword[finally] : identifier[os] . identifier[umask] ( identifier[old_umask] )
def _create_file_if_needed(self): """Create an empty file if necessary. This method will not initialize the file. Instead it implements a simple version of "touch" to ensure the file has been created. """ if not os.path.exists(self._filename): old_umask = os.umask(127) try: open(self._filename, 'a+b').close() # depends on [control=['try'], data=[]] finally: os.umask(old_umask) # depends on [control=['if'], data=[]]
def set_default_symbols(self): """Set self.symbols based on self.numbers and the periodic table.""" self.symbols = tuple(periodic[n].symbol for n in self.numbers)
def function[set_default_symbols, parameter[self]]: constant[Set self.symbols based on self.numbers and the periodic table.] name[self].symbols assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da20c6abb50>]]
keyword[def] identifier[set_default_symbols] ( identifier[self] ): literal[string] identifier[self] . identifier[symbols] = identifier[tuple] ( identifier[periodic] [ identifier[n] ]. identifier[symbol] keyword[for] identifier[n] keyword[in] identifier[self] . identifier[numbers] )
def set_default_symbols(self): """Set self.symbols based on self.numbers and the periodic table.""" self.symbols = tuple((periodic[n].symbol for n in self.numbers))
def run_radia_perchrom(job, bams, univ_options, radia_options, chrom): """ Run RADIA call on a single chromosome in the input bams. :param dict bams: Dict of bam and bai for tumor DNA-Seq, normal DNA-Seq and tumor RNA-Seq :param dict univ_options: Dict of universal options used by almost all tools :param dict radia_options: Options specific to RADIA :param str chrom: Chromosome to process :return: fsID for the chromsome vcf :rtype: toil.fileStore.FileID """ work_dir = os.getcwd() input_files = { 'rna.bam': bams['tumor_rna'], 'rna.bam.bai': bams['tumor_rnai'], 'tumor.bam': bams['tumor_dna'], 'tumor.bam.bai': bams['tumor_dnai'], 'normal.bam': bams['normal_dna'], 'normal.bam.bai': bams['normal_dnai'], 'genome.fa.tar.gz': radia_options['genome_fasta'], 'genome.fa.fai.tar.gz': radia_options['genome_fai']} input_files = get_files_from_filestore(job, input_files, work_dir, docker=False) for key in ('genome.fa', 'genome.fa.fai'): input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir) input_files = {key: docker_path(path) for key, path in input_files.items()} radia_output = ''.join([work_dir, '/radia_', chrom, '.vcf']) radia_log = ''.join([work_dir, '/radia_', chrom, '_radia.log']) parameters = [univ_options['patient'], # shortID chrom, '-n', input_files['normal.bam'], '-t', input_files['tumor.bam'], '-r', input_files['rna.bam'], ''.join(['--rnaTumorFasta=', input_files['genome.fa']]), '-f', input_files['genome.fa'], '-o', docker_path(radia_output), '-i', univ_options['ref'], '-m', input_files['genome.fa'], '-d', '[email protected]', '-q', 'Illumina', '--disease', 'CANCER', '-l', 'INFO', '-g', docker_path(radia_log)] docker_call(tool='radia', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], tool_version=radia_options['version']) output_file = job.fileStore.writeGlobalFile(radia_output) job.fileStore.logToMaster('Ran radia on %s:%s successfully' % (univ_options['patient'], chrom)) return output_file
def function[run_radia_perchrom, parameter[job, bams, univ_options, radia_options, chrom]]: constant[ Run RADIA call on a single chromosome in the input bams. :param dict bams: Dict of bam and bai for tumor DNA-Seq, normal DNA-Seq and tumor RNA-Seq :param dict univ_options: Dict of universal options used by almost all tools :param dict radia_options: Options specific to RADIA :param str chrom: Chromosome to process :return: fsID for the chromsome vcf :rtype: toil.fileStore.FileID ] variable[work_dir] assign[=] call[name[os].getcwd, parameter[]] variable[input_files] assign[=] dictionary[[<ast.Constant object at 0x7da1b25fbc10>, <ast.Constant object at 0x7da1b25fbbe0>, <ast.Constant object at 0x7da1b25fbbb0>, <ast.Constant object at 0x7da1b25fbb80>, <ast.Constant object at 0x7da1b25fbb50>, <ast.Constant object at 0x7da1b25fbb20>, <ast.Constant object at 0x7da1b25fbaf0>, <ast.Constant object at 0x7da1b25fbac0>], [<ast.Subscript object at 0x7da1b25fba90>, <ast.Subscript object at 0x7da1b25fba00>, <ast.Subscript object at 0x7da1b25fb970>, <ast.Subscript object at 0x7da1b25fb8e0>, <ast.Subscript object at 0x7da1b25fb850>, <ast.Subscript object at 0x7da1b25fb7c0>, <ast.Subscript object at 0x7da1b25fb730>, <ast.Subscript object at 0x7da1b25fb6a0>]] variable[input_files] assign[=] call[name[get_files_from_filestore], parameter[name[job], name[input_files], name[work_dir]]] for taget[name[key]] in starred[tuple[[<ast.Constant object at 0x7da1b25fb3d0>, <ast.Constant object at 0x7da1b25fb3a0>]]] begin[:] call[name[input_files]][name[key]] assign[=] call[name[untargz], parameter[call[name[input_files]][binary_operation[name[key] + constant[.tar.gz]]], name[work_dir]]] variable[input_files] assign[=] <ast.DictComp object at 0x7da1b25fabc0> variable[radia_output] assign[=] call[constant[].join, parameter[list[[<ast.Name object at 0x7da18eb55030>, <ast.Constant object at 0x7da18eb54af0>, <ast.Name object at 0x7da18eb559c0>, <ast.Constant object at 0x7da18eb565f0>]]]] variable[radia_log] assign[=] call[constant[].join, parameter[list[[<ast.Name object at 0x7da18eb56c20>, <ast.Constant object at 0x7da18eb54730>, <ast.Name object at 0x7da18eb54370>, <ast.Constant object at 0x7da18eb55390>]]]] variable[parameters] assign[=] list[[<ast.Subscript object at 0x7da18eb575b0>, <ast.Name object at 0x7da18eb56230>, <ast.Constant object at 0x7da18eb54a90>, <ast.Subscript object at 0x7da18eb55420>, <ast.Constant object at 0x7da18eb560e0>, <ast.Subscript object at 0x7da18eb546d0>, <ast.Constant object at 0x7da18eb56da0>, <ast.Subscript object at 0x7da18eb54250>, <ast.Call object at 0x7da18eb57910>, <ast.Constant object at 0x7da18eb57c10>, <ast.Subscript object at 0x7da18eb56260>, <ast.Constant object at 0x7da18eb565c0>, <ast.Call object at 0x7da18eb56650>, <ast.Constant object at 0x7da18eb57fd0>, <ast.Subscript object at 0x7da18eb57f70>, <ast.Constant object at 0x7da18eb572e0>, <ast.Subscript object at 0x7da18eb55e10>, <ast.Constant object at 0x7da18eb56710>, <ast.Constant object at 0x7da18eb546a0>, <ast.Constant object at 0x7da18eb55000>, <ast.Constant object at 0x7da18eb57b50>, <ast.Constant object at 0x7da18eb54430>, <ast.Constant object at 0x7da1b25fa080>, <ast.Constant object at 0x7da1b25fa0b0>, <ast.Constant object at 0x7da1b25fa0e0>, <ast.Constant object at 0x7da1b25fa110>, <ast.Call object at 0x7da1b25fa140>]] call[name[docker_call], parameter[]] variable[output_file] assign[=] call[name[job].fileStore.writeGlobalFile, parameter[name[radia_output]]] call[name[job].fileStore.logToMaster, parameter[binary_operation[constant[Ran radia on %s:%s successfully] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b25f9960>, <ast.Name object at 0x7da1b25f98d0>]]]]] return[name[output_file]]
keyword[def] identifier[run_radia_perchrom] ( identifier[job] , identifier[bams] , identifier[univ_options] , identifier[radia_options] , identifier[chrom] ): literal[string] identifier[work_dir] = identifier[os] . identifier[getcwd] () identifier[input_files] ={ literal[string] : identifier[bams] [ literal[string] ], literal[string] : identifier[bams] [ literal[string] ], literal[string] : identifier[bams] [ literal[string] ], literal[string] : identifier[bams] [ literal[string] ], literal[string] : identifier[bams] [ literal[string] ], literal[string] : identifier[bams] [ literal[string] ], literal[string] : identifier[radia_options] [ literal[string] ], literal[string] : identifier[radia_options] [ literal[string] ]} identifier[input_files] = identifier[get_files_from_filestore] ( identifier[job] , identifier[input_files] , identifier[work_dir] , identifier[docker] = keyword[False] ) keyword[for] identifier[key] keyword[in] ( literal[string] , literal[string] ): identifier[input_files] [ identifier[key] ]= identifier[untargz] ( identifier[input_files] [ identifier[key] + literal[string] ], identifier[work_dir] ) identifier[input_files] ={ identifier[key] : identifier[docker_path] ( identifier[path] ) keyword[for] identifier[key] , identifier[path] keyword[in] identifier[input_files] . identifier[items] ()} identifier[radia_output] = literal[string] . identifier[join] ([ identifier[work_dir] , literal[string] , identifier[chrom] , literal[string] ]) identifier[radia_log] = literal[string] . identifier[join] ([ identifier[work_dir] , literal[string] , identifier[chrom] , literal[string] ]) identifier[parameters] =[ identifier[univ_options] [ literal[string] ], identifier[chrom] , literal[string] , identifier[input_files] [ literal[string] ], literal[string] , identifier[input_files] [ literal[string] ], literal[string] , identifier[input_files] [ literal[string] ], literal[string] . identifier[join] ([ literal[string] , identifier[input_files] [ literal[string] ]]), literal[string] , identifier[input_files] [ literal[string] ], literal[string] , identifier[docker_path] ( identifier[radia_output] ), literal[string] , identifier[univ_options] [ literal[string] ], literal[string] , identifier[input_files] [ literal[string] ], literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , identifier[docker_path] ( identifier[radia_log] )] identifier[docker_call] ( identifier[tool] = literal[string] , identifier[tool_parameters] = identifier[parameters] , identifier[work_dir] = identifier[work_dir] , identifier[dockerhub] = identifier[univ_options] [ literal[string] ], identifier[tool_version] = identifier[radia_options] [ literal[string] ]) identifier[output_file] = identifier[job] . identifier[fileStore] . identifier[writeGlobalFile] ( identifier[radia_output] ) identifier[job] . identifier[fileStore] . identifier[logToMaster] ( literal[string] %( identifier[univ_options] [ literal[string] ], identifier[chrom] )) keyword[return] identifier[output_file]
def run_radia_perchrom(job, bams, univ_options, radia_options, chrom): """ Run RADIA call on a single chromosome in the input bams. :param dict bams: Dict of bam and bai for tumor DNA-Seq, normal DNA-Seq and tumor RNA-Seq :param dict univ_options: Dict of universal options used by almost all tools :param dict radia_options: Options specific to RADIA :param str chrom: Chromosome to process :return: fsID for the chromsome vcf :rtype: toil.fileStore.FileID """ work_dir = os.getcwd() input_files = {'rna.bam': bams['tumor_rna'], 'rna.bam.bai': bams['tumor_rnai'], 'tumor.bam': bams['tumor_dna'], 'tumor.bam.bai': bams['tumor_dnai'], 'normal.bam': bams['normal_dna'], 'normal.bam.bai': bams['normal_dnai'], 'genome.fa.tar.gz': radia_options['genome_fasta'], 'genome.fa.fai.tar.gz': radia_options['genome_fai']} input_files = get_files_from_filestore(job, input_files, work_dir, docker=False) for key in ('genome.fa', 'genome.fa.fai'): input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir) # depends on [control=['for'], data=['key']] input_files = {key: docker_path(path) for (key, path) in input_files.items()} radia_output = ''.join([work_dir, '/radia_', chrom, '.vcf']) radia_log = ''.join([work_dir, '/radia_', chrom, '_radia.log']) # shortID parameters = [univ_options['patient'], chrom, '-n', input_files['normal.bam'], '-t', input_files['tumor.bam'], '-r', input_files['rna.bam'], ''.join(['--rnaTumorFasta=', input_files['genome.fa']]), '-f', input_files['genome.fa'], '-o', docker_path(radia_output), '-i', univ_options['ref'], '-m', input_files['genome.fa'], '-d', '[email protected]', '-q', 'Illumina', '--disease', 'CANCER', '-l', 'INFO', '-g', docker_path(radia_log)] docker_call(tool='radia', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], tool_version=radia_options['version']) output_file = job.fileStore.writeGlobalFile(radia_output) job.fileStore.logToMaster('Ran radia on %s:%s successfully' % (univ_options['patient'], chrom)) return output_file
def rpc_is_name_zonefile_hash(self, name, zonefile_hash, **con_info): """ Was a zone file hash issued by a name? Return {'result': True/False} """ if not check_name(name) and not check_subdomain(name): return {'error': 'invalid name', 'http_status': 400} if not check_string(zonefile_hash, min_length=LENGTHS['value_hash']*2, max_length=LENGTHS['value_hash']*2, pattern=OP_HEX_PATTERN): return {'error': 'invalid zone file hash', 'http_status': 400} was_set = None if check_name(name): # on-chain name db = get_db_state(self.working_dir) was_set = db.is_name_zonefile_hash(name, zonefile_hash) db.close() else: # off-chain name was_set = is_subdomain_zonefile_hash(name, zonefile_hash) return self.success_response({'result': was_set})
def function[rpc_is_name_zonefile_hash, parameter[self, name, zonefile_hash]]: constant[ Was a zone file hash issued by a name? Return {'result': True/False} ] if <ast.BoolOp object at 0x7da18f00e950> begin[:] return[dictionary[[<ast.Constant object at 0x7da18f00f6d0>, <ast.Constant object at 0x7da18f00fe50>], [<ast.Constant object at 0x7da18f00d990>, <ast.Constant object at 0x7da18f00c910>]]] if <ast.UnaryOp object at 0x7da18f00c940> begin[:] return[dictionary[[<ast.Constant object at 0x7da18f00e230>, <ast.Constant object at 0x7da18f00faf0>], [<ast.Constant object at 0x7da18f00e410>, <ast.Constant object at 0x7da18f00e440>]]] variable[was_set] assign[=] constant[None] if call[name[check_name], parameter[name[name]]] begin[:] variable[db] assign[=] call[name[get_db_state], parameter[name[self].working_dir]] variable[was_set] assign[=] call[name[db].is_name_zonefile_hash, parameter[name[name], name[zonefile_hash]]] call[name[db].close, parameter[]] return[call[name[self].success_response, parameter[dictionary[[<ast.Constant object at 0x7da18f00f520>], [<ast.Name object at 0x7da18f00e560>]]]]]
keyword[def] identifier[rpc_is_name_zonefile_hash] ( identifier[self] , identifier[name] , identifier[zonefile_hash] ,** identifier[con_info] ): literal[string] keyword[if] keyword[not] identifier[check_name] ( identifier[name] ) keyword[and] keyword[not] identifier[check_subdomain] ( identifier[name] ): keyword[return] { literal[string] : literal[string] , literal[string] : literal[int] } keyword[if] keyword[not] identifier[check_string] ( identifier[zonefile_hash] , identifier[min_length] = identifier[LENGTHS] [ literal[string] ]* literal[int] , identifier[max_length] = identifier[LENGTHS] [ literal[string] ]* literal[int] , identifier[pattern] = identifier[OP_HEX_PATTERN] ): keyword[return] { literal[string] : literal[string] , literal[string] : literal[int] } identifier[was_set] = keyword[None] keyword[if] identifier[check_name] ( identifier[name] ): identifier[db] = identifier[get_db_state] ( identifier[self] . identifier[working_dir] ) identifier[was_set] = identifier[db] . identifier[is_name_zonefile_hash] ( identifier[name] , identifier[zonefile_hash] ) identifier[db] . identifier[close] () keyword[else] : identifier[was_set] = identifier[is_subdomain_zonefile_hash] ( identifier[name] , identifier[zonefile_hash] ) keyword[return] identifier[self] . identifier[success_response] ({ literal[string] : identifier[was_set] })
def rpc_is_name_zonefile_hash(self, name, zonefile_hash, **con_info): """ Was a zone file hash issued by a name? Return {'result': True/False} """ if not check_name(name) and (not check_subdomain(name)): return {'error': 'invalid name', 'http_status': 400} # depends on [control=['if'], data=[]] if not check_string(zonefile_hash, min_length=LENGTHS['value_hash'] * 2, max_length=LENGTHS['value_hash'] * 2, pattern=OP_HEX_PATTERN): return {'error': 'invalid zone file hash', 'http_status': 400} # depends on [control=['if'], data=[]] was_set = None if check_name(name): # on-chain name db = get_db_state(self.working_dir) was_set = db.is_name_zonefile_hash(name, zonefile_hash) db.close() # depends on [control=['if'], data=[]] else: # off-chain name was_set = is_subdomain_zonefile_hash(name, zonefile_hash) return self.success_response({'result': was_set})
def del_controller(self): """ Deletes the configured OpenFlow controller address. This method is corresponding to the following ovs-vsctl command:: $ ovs-vsctl del-controller <bridge> """ command = ovs_vsctl.VSCtlCommand('del-controller', [self.br_name]) self.run_command([command])
def function[del_controller, parameter[self]]: constant[ Deletes the configured OpenFlow controller address. This method is corresponding to the following ovs-vsctl command:: $ ovs-vsctl del-controller <bridge> ] variable[command] assign[=] call[name[ovs_vsctl].VSCtlCommand, parameter[constant[del-controller], list[[<ast.Attribute object at 0x7da1b1a346a0>]]]] call[name[self].run_command, parameter[list[[<ast.Name object at 0x7da1b1bace80>]]]]
keyword[def] identifier[del_controller] ( identifier[self] ): literal[string] identifier[command] = identifier[ovs_vsctl] . identifier[VSCtlCommand] ( literal[string] ,[ identifier[self] . identifier[br_name] ]) identifier[self] . identifier[run_command] ([ identifier[command] ])
def del_controller(self): """ Deletes the configured OpenFlow controller address. This method is corresponding to the following ovs-vsctl command:: $ ovs-vsctl del-controller <bridge> """ command = ovs_vsctl.VSCtlCommand('del-controller', [self.br_name]) self.run_command([command])
def _handle_status(self, msg): """ Reimplemented to refresh the namespacebrowser after kernel restarts """ state = msg['content'].get('execution_state', '') msg_type = msg['parent_header'].get('msg_type', '') if state == 'starting': # This is needed to show the time a kernel # has been alive in each console. self.ipyclient.t0 = time.monotonic() self.ipyclient.timer.timeout.connect(self.ipyclient.show_time) self.ipyclient.timer.start(1000) # This handles restarts when the kernel dies # unexpectedly if not self._kernel_is_starting: self._kernel_is_starting = True elif state == 'idle' and msg_type == 'shutdown_request': # This handles restarts asked by the user if self.namespacebrowser is not None: self.set_namespace_view_settings() self.refresh_namespacebrowser() self.ipyclient.t0 = time.monotonic() else: super(NamepaceBrowserWidget, self)._handle_status(msg)
def function[_handle_status, parameter[self, msg]]: constant[ Reimplemented to refresh the namespacebrowser after kernel restarts ] variable[state] assign[=] call[call[name[msg]][constant[content]].get, parameter[constant[execution_state], constant[]]] variable[msg_type] assign[=] call[call[name[msg]][constant[parent_header]].get, parameter[constant[msg_type], constant[]]] if compare[name[state] equal[==] constant[starting]] begin[:] name[self].ipyclient.t0 assign[=] call[name[time].monotonic, parameter[]] call[name[self].ipyclient.timer.timeout.connect, parameter[name[self].ipyclient.show_time]] call[name[self].ipyclient.timer.start, parameter[constant[1000]]] if <ast.UnaryOp object at 0x7da1b1fa1ff0> begin[:] name[self]._kernel_is_starting assign[=] constant[True]
keyword[def] identifier[_handle_status] ( identifier[self] , identifier[msg] ): literal[string] identifier[state] = identifier[msg] [ literal[string] ]. identifier[get] ( literal[string] , literal[string] ) identifier[msg_type] = identifier[msg] [ literal[string] ]. identifier[get] ( literal[string] , literal[string] ) keyword[if] identifier[state] == literal[string] : identifier[self] . identifier[ipyclient] . identifier[t0] = identifier[time] . identifier[monotonic] () identifier[self] . identifier[ipyclient] . identifier[timer] . identifier[timeout] . identifier[connect] ( identifier[self] . identifier[ipyclient] . identifier[show_time] ) identifier[self] . identifier[ipyclient] . identifier[timer] . identifier[start] ( literal[int] ) keyword[if] keyword[not] identifier[self] . identifier[_kernel_is_starting] : identifier[self] . identifier[_kernel_is_starting] = keyword[True] keyword[elif] identifier[state] == literal[string] keyword[and] identifier[msg_type] == literal[string] : keyword[if] identifier[self] . identifier[namespacebrowser] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[set_namespace_view_settings] () identifier[self] . identifier[refresh_namespacebrowser] () identifier[self] . identifier[ipyclient] . identifier[t0] = identifier[time] . identifier[monotonic] () keyword[else] : identifier[super] ( identifier[NamepaceBrowserWidget] , identifier[self] ). identifier[_handle_status] ( identifier[msg] )
def _handle_status(self, msg): """ Reimplemented to refresh the namespacebrowser after kernel restarts """ state = msg['content'].get('execution_state', '') msg_type = msg['parent_header'].get('msg_type', '') if state == 'starting': # This is needed to show the time a kernel # has been alive in each console. self.ipyclient.t0 = time.monotonic() self.ipyclient.timer.timeout.connect(self.ipyclient.show_time) self.ipyclient.timer.start(1000) # This handles restarts when the kernel dies # unexpectedly if not self._kernel_is_starting: self._kernel_is_starting = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif state == 'idle' and msg_type == 'shutdown_request': # This handles restarts asked by the user if self.namespacebrowser is not None: self.set_namespace_view_settings() self.refresh_namespacebrowser() # depends on [control=['if'], data=[]] self.ipyclient.t0 = time.monotonic() # depends on [control=['if'], data=[]] else: super(NamepaceBrowserWidget, self)._handle_status(msg)
def code(self): """the http status code to return to the client, by default, 200 if a body is present otherwise 204""" code = getattr(self, '_code', None) if not code: if self.has_body(): code = 200 else: code = 204 return code
def function[code, parameter[self]]: constant[the http status code to return to the client, by default, 200 if a body is present otherwise 204] variable[code] assign[=] call[name[getattr], parameter[name[self], constant[_code], constant[None]]] if <ast.UnaryOp object at 0x7da2049625c0> begin[:] if call[name[self].has_body, parameter[]] begin[:] variable[code] assign[=] constant[200] return[name[code]]
keyword[def] identifier[code] ( identifier[self] ): literal[string] identifier[code] = identifier[getattr] ( identifier[self] , literal[string] , keyword[None] ) keyword[if] keyword[not] identifier[code] : keyword[if] identifier[self] . identifier[has_body] (): identifier[code] = literal[int] keyword[else] : identifier[code] = literal[int] keyword[return] identifier[code]
def code(self): """the http status code to return to the client, by default, 200 if a body is present otherwise 204""" code = getattr(self, '_code', None) if not code: if self.has_body(): code = 200 # depends on [control=['if'], data=[]] else: code = 204 # depends on [control=['if'], data=[]] return code
def yiq_to_rgb(yiq): """ Convert a YIQ color representation to an RGB color representation. (y, i, q) :: y -> [0, 1] i -> [-0.5957, 0.5957] q -> [-0.5226, 0.5226] :param yiq: A tuple of three numeric values corresponding to the luma and chrominance. :return: RGB representation of the input YIQ value. :rtype: tuple """ y, i, q = yiq r = y + (0.956 * i) + (0.621 * q) g = y - (0.272 * i) - (0.647 * q) b = y - (1.108 * i) + (1.705 * q) r = 1 if r > 1 else max(0, r) g = 1 if g > 1 else max(0, g) b = 1 if b > 1 else max(0, b) return round(r * 255, 3), round(g * 255, 3), round(b * 255, 3)
def function[yiq_to_rgb, parameter[yiq]]: constant[ Convert a YIQ color representation to an RGB color representation. (y, i, q) :: y -> [0, 1] i -> [-0.5957, 0.5957] q -> [-0.5226, 0.5226] :param yiq: A tuple of three numeric values corresponding to the luma and chrominance. :return: RGB representation of the input YIQ value. :rtype: tuple ] <ast.Tuple object at 0x7da18f720d90> assign[=] name[yiq] variable[r] assign[=] binary_operation[binary_operation[name[y] + binary_operation[constant[0.956] * name[i]]] + binary_operation[constant[0.621] * name[q]]] variable[g] assign[=] binary_operation[binary_operation[name[y] - binary_operation[constant[0.272] * name[i]]] - binary_operation[constant[0.647] * name[q]]] variable[b] assign[=] binary_operation[binary_operation[name[y] - binary_operation[constant[1.108] * name[i]]] + binary_operation[constant[1.705] * name[q]]] variable[r] assign[=] <ast.IfExp object at 0x7da18f722170> variable[g] assign[=] <ast.IfExp object at 0x7da18f722bf0> variable[b] assign[=] <ast.IfExp object at 0x7da18f721450> return[tuple[[<ast.Call object at 0x7da18f7212a0>, <ast.Call object at 0x7da18f722380>, <ast.Call object at 0x7da18f722620>]]]
keyword[def] identifier[yiq_to_rgb] ( identifier[yiq] ): literal[string] identifier[y] , identifier[i] , identifier[q] = identifier[yiq] identifier[r] = identifier[y] +( literal[int] * identifier[i] )+( literal[int] * identifier[q] ) identifier[g] = identifier[y] -( literal[int] * identifier[i] )-( literal[int] * identifier[q] ) identifier[b] = identifier[y] -( literal[int] * identifier[i] )+( literal[int] * identifier[q] ) identifier[r] = literal[int] keyword[if] identifier[r] > literal[int] keyword[else] identifier[max] ( literal[int] , identifier[r] ) identifier[g] = literal[int] keyword[if] identifier[g] > literal[int] keyword[else] identifier[max] ( literal[int] , identifier[g] ) identifier[b] = literal[int] keyword[if] identifier[b] > literal[int] keyword[else] identifier[max] ( literal[int] , identifier[b] ) keyword[return] identifier[round] ( identifier[r] * literal[int] , literal[int] ), identifier[round] ( identifier[g] * literal[int] , literal[int] ), identifier[round] ( identifier[b] * literal[int] , literal[int] )
def yiq_to_rgb(yiq): """ Convert a YIQ color representation to an RGB color representation. (y, i, q) :: y -> [0, 1] i -> [-0.5957, 0.5957] q -> [-0.5226, 0.5226] :param yiq: A tuple of three numeric values corresponding to the luma and chrominance. :return: RGB representation of the input YIQ value. :rtype: tuple """ (y, i, q) = yiq r = y + 0.956 * i + 0.621 * q g = y - 0.272 * i - 0.647 * q b = y - 1.108 * i + 1.705 * q r = 1 if r > 1 else max(0, r) g = 1 if g > 1 else max(0, g) b = 1 if b > 1 else max(0, b) return (round(r * 255, 3), round(g * 255, 3), round(b * 255, 3))
def consumer_partitions_for_topic(consumer, topic): """Returns a list of all TopicPartitions for a given topic. Arguments: consumer: an initialized KafkaConsumer topic: a topic name to fetch TopicPartitions for :returns: list(TopicPartition): A list of TopicPartitions that belong to the given topic """ topic_partitions = [] partitions = consumer.partitions_for_topic(topic) if partitions is not None: for partition in partitions: topic_partitions.append(TopicPartition(topic, partition)) else: logging.error( "No partitions found for topic {}. Maybe it doesn't exist?".format(topic), ) return topic_partitions
def function[consumer_partitions_for_topic, parameter[consumer, topic]]: constant[Returns a list of all TopicPartitions for a given topic. Arguments: consumer: an initialized KafkaConsumer topic: a topic name to fetch TopicPartitions for :returns: list(TopicPartition): A list of TopicPartitions that belong to the given topic ] variable[topic_partitions] assign[=] list[[]] variable[partitions] assign[=] call[name[consumer].partitions_for_topic, parameter[name[topic]]] if compare[name[partitions] is_not constant[None]] begin[:] for taget[name[partition]] in starred[name[partitions]] begin[:] call[name[topic_partitions].append, parameter[call[name[TopicPartition], parameter[name[topic], name[partition]]]]] return[name[topic_partitions]]
keyword[def] identifier[consumer_partitions_for_topic] ( identifier[consumer] , identifier[topic] ): literal[string] identifier[topic_partitions] =[] identifier[partitions] = identifier[consumer] . identifier[partitions_for_topic] ( identifier[topic] ) keyword[if] identifier[partitions] keyword[is] keyword[not] keyword[None] : keyword[for] identifier[partition] keyword[in] identifier[partitions] : identifier[topic_partitions] . identifier[append] ( identifier[TopicPartition] ( identifier[topic] , identifier[partition] )) keyword[else] : identifier[logging] . identifier[error] ( literal[string] . identifier[format] ( identifier[topic] ), ) keyword[return] identifier[topic_partitions]
def consumer_partitions_for_topic(consumer, topic): """Returns a list of all TopicPartitions for a given topic. Arguments: consumer: an initialized KafkaConsumer topic: a topic name to fetch TopicPartitions for :returns: list(TopicPartition): A list of TopicPartitions that belong to the given topic """ topic_partitions = [] partitions = consumer.partitions_for_topic(topic) if partitions is not None: for partition in partitions: topic_partitions.append(TopicPartition(topic, partition)) # depends on [control=['for'], data=['partition']] # depends on [control=['if'], data=['partitions']] else: logging.error("No partitions found for topic {}. Maybe it doesn't exist?".format(topic)) return topic_partitions
def apply_smoothing(self, smooth_fwhm): """Set self._smooth_fwhm and then smooths the data. See boyle.nifti.smooth.smooth_imgs. Returns ------- the smoothed data deepcopied. """ if smooth_fwhm <= 0: return old_smooth_fwhm = self._smooth_fwhm self._smooth_fwhm = smooth_fwhm try: data = self.get_data(smoothed=True, masked=True, safe_copy=True) except ValueError as ve: self._smooth_fwhm = old_smooth_fwhm raise else: self._smooth_fwhm = smooth_fwhm return data
def function[apply_smoothing, parameter[self, smooth_fwhm]]: constant[Set self._smooth_fwhm and then smooths the data. See boyle.nifti.smooth.smooth_imgs. Returns ------- the smoothed data deepcopied. ] if compare[name[smooth_fwhm] less_or_equal[<=] constant[0]] begin[:] return[None] variable[old_smooth_fwhm] assign[=] name[self]._smooth_fwhm name[self]._smooth_fwhm assign[=] name[smooth_fwhm] <ast.Try object at 0x7da1afe0e230>
keyword[def] identifier[apply_smoothing] ( identifier[self] , identifier[smooth_fwhm] ): literal[string] keyword[if] identifier[smooth_fwhm] <= literal[int] : keyword[return] identifier[old_smooth_fwhm] = identifier[self] . identifier[_smooth_fwhm] identifier[self] . identifier[_smooth_fwhm] = identifier[smooth_fwhm] keyword[try] : identifier[data] = identifier[self] . identifier[get_data] ( identifier[smoothed] = keyword[True] , identifier[masked] = keyword[True] , identifier[safe_copy] = keyword[True] ) keyword[except] identifier[ValueError] keyword[as] identifier[ve] : identifier[self] . identifier[_smooth_fwhm] = identifier[old_smooth_fwhm] keyword[raise] keyword[else] : identifier[self] . identifier[_smooth_fwhm] = identifier[smooth_fwhm] keyword[return] identifier[data]
def apply_smoothing(self, smooth_fwhm): """Set self._smooth_fwhm and then smooths the data. See boyle.nifti.smooth.smooth_imgs. Returns ------- the smoothed data deepcopied. """ if smooth_fwhm <= 0: return # depends on [control=['if'], data=[]] old_smooth_fwhm = self._smooth_fwhm self._smooth_fwhm = smooth_fwhm try: data = self.get_data(smoothed=True, masked=True, safe_copy=True) # depends on [control=['try'], data=[]] except ValueError as ve: self._smooth_fwhm = old_smooth_fwhm raise # depends on [control=['except'], data=[]] else: self._smooth_fwhm = smooth_fwhm return data
def path(self, which=None): """Extend ``nailgun.entity_mixins.Entity.path``. The format of the returned path depends on the value of ``which``: download_debug_certificate /organizations/<id>/download_debug_certificate subscriptions /organizations/<id>/subscriptions subscriptions/upload /organizations/<id>/subscriptions/upload subscriptions/delete_manifest /organizations/<id>/subscriptions/delete_manifest subscriptions/refresh_manifest /organizations/<id>/subscriptions/refresh_manifest sync_plans /organizations/<id>/sync_plans Otherwise, call ``super``. """ if which in ( 'download_debug_certificate', 'subscriptions', 'subscriptions/delete_manifest', 'subscriptions/manifest_history', 'subscriptions/refresh_manifest', 'subscriptions/upload', 'sync_plans', ): return '{0}/{1}'.format( super(Organization, self).path(which='self'), which ) return super(Organization, self).path(which)
def function[path, parameter[self, which]]: constant[Extend ``nailgun.entity_mixins.Entity.path``. The format of the returned path depends on the value of ``which``: download_debug_certificate /organizations/<id>/download_debug_certificate subscriptions /organizations/<id>/subscriptions subscriptions/upload /organizations/<id>/subscriptions/upload subscriptions/delete_manifest /organizations/<id>/subscriptions/delete_manifest subscriptions/refresh_manifest /organizations/<id>/subscriptions/refresh_manifest sync_plans /organizations/<id>/sync_plans Otherwise, call ``super``. ] if compare[name[which] in tuple[[<ast.Constant object at 0x7da18bcca7d0>, <ast.Constant object at 0x7da18bcc9840>, <ast.Constant object at 0x7da18bcc9b70>, <ast.Constant object at 0x7da18bccbf10>, <ast.Constant object at 0x7da18bccab00>, <ast.Constant object at 0x7da18bcc9300>, <ast.Constant object at 0x7da18bccbc10>]]] begin[:] return[call[constant[{0}/{1}].format, parameter[call[call[name[super], parameter[name[Organization], name[self]]].path, parameter[]], name[which]]]] return[call[call[name[super], parameter[name[Organization], name[self]]].path, parameter[name[which]]]]
keyword[def] identifier[path] ( identifier[self] , identifier[which] = keyword[None] ): literal[string] keyword[if] identifier[which] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , ): keyword[return] literal[string] . identifier[format] ( identifier[super] ( identifier[Organization] , identifier[self] ). identifier[path] ( identifier[which] = literal[string] ), identifier[which] ) keyword[return] identifier[super] ( identifier[Organization] , identifier[self] ). identifier[path] ( identifier[which] )
def path(self, which=None): """Extend ``nailgun.entity_mixins.Entity.path``. The format of the returned path depends on the value of ``which``: download_debug_certificate /organizations/<id>/download_debug_certificate subscriptions /organizations/<id>/subscriptions subscriptions/upload /organizations/<id>/subscriptions/upload subscriptions/delete_manifest /organizations/<id>/subscriptions/delete_manifest subscriptions/refresh_manifest /organizations/<id>/subscriptions/refresh_manifest sync_plans /organizations/<id>/sync_plans Otherwise, call ``super``. """ if which in ('download_debug_certificate', 'subscriptions', 'subscriptions/delete_manifest', 'subscriptions/manifest_history', 'subscriptions/refresh_manifest', 'subscriptions/upload', 'sync_plans'): return '{0}/{1}'.format(super(Organization, self).path(which='self'), which) # depends on [control=['if'], data=['which']] return super(Organization, self).path(which)
def add_indicator(self, indicator_data): """Add an indicator to Batch Job. .. code-block:: javascript { "type": "File", "rating": 5.00, "confidence": 50, "summary": "53c3609411c83f363e051d455ade78a7 : 57a49b478310e4313c54c0fee46e4d70a73dd580 : db31cb2a748b7e0046d8c97a32a7eb4efde32a0593e5dbd58e07a3b4ae6bf3d7", "associatedGroups": [ { "groupXid": "e336e2dd-5dfb-48cd-a33a-f8809e83e904" } ], "attribute": [{ "type": "Source", "displayed": true, "value": "Malware Analysis provided by external AMA." }], "fileOccurrence": [{ "fileName": "drop1.exe", "date": "2017-03-03T18:00:00-06:00" }], "tag": [{ "name": "China" }], "xid": "e336e2dd-5dfb-48cd-a33a-f8809e83e904:170139" } Args: indicator_data (dict): The Full Indicator data including attributes, labels, tags, and associations. """ if indicator_data.get('type') not in ['Address', 'EmailAddress', 'File', 'Host', 'URL']: # for custom indicator types the valueX fields are required. # using the summary we can build the values index = 1 for value in self._indicator_values(indicator_data.get('summary')): indicator_data['value{}'.format(index)] = value index += 1 if indicator_data.get('type') == 'File': # convert custom field name to the appropriate value for batch v2 size = indicator_data.pop('size', None) if size is not None: indicator_data['intValue1'] = size if indicator_data.get('type') == 'Host': # convert custom field name to the appropriate value for batch v2 dns_active = indicator_data.pop('dnsActive', None) if dns_active is not None: indicator_data['flag1'] = dns_active whois_active = indicator_data.pop('whoisActive', None) if whois_active is not None: indicator_data['flag2'] = whois_active return self._indicator(indicator_data)
def function[add_indicator, parameter[self, indicator_data]]: constant[Add an indicator to Batch Job. .. code-block:: javascript { "type": "File", "rating": 5.00, "confidence": 50, "summary": "53c3609411c83f363e051d455ade78a7 : 57a49b478310e4313c54c0fee46e4d70a73dd580 : db31cb2a748b7e0046d8c97a32a7eb4efde32a0593e5dbd58e07a3b4ae6bf3d7", "associatedGroups": [ { "groupXid": "e336e2dd-5dfb-48cd-a33a-f8809e83e904" } ], "attribute": [{ "type": "Source", "displayed": true, "value": "Malware Analysis provided by external AMA." }], "fileOccurrence": [{ "fileName": "drop1.exe", "date": "2017-03-03T18:00:00-06:00" }], "tag": [{ "name": "China" }], "xid": "e336e2dd-5dfb-48cd-a33a-f8809e83e904:170139" } Args: indicator_data (dict): The Full Indicator data including attributes, labels, tags, and associations. ] if compare[call[name[indicator_data].get, parameter[constant[type]]] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da1b0cef310>, <ast.Constant object at 0x7da1b0cefca0>, <ast.Constant object at 0x7da1b0cef4c0>, <ast.Constant object at 0x7da1b0cee0e0>, <ast.Constant object at 0x7da1b0cee110>]]] begin[:] variable[index] assign[=] constant[1] for taget[name[value]] in starred[call[name[self]._indicator_values, parameter[call[name[indicator_data].get, parameter[constant[summary]]]]]] begin[:] call[name[indicator_data]][call[constant[value{}].format, parameter[name[index]]]] assign[=] name[value] <ast.AugAssign object at 0x7da1b2345600> if compare[call[name[indicator_data].get, parameter[constant[type]]] equal[==] constant[File]] begin[:] variable[size] assign[=] call[name[indicator_data].pop, parameter[constant[size], constant[None]]] if compare[name[size] is_not constant[None]] begin[:] call[name[indicator_data]][constant[intValue1]] assign[=] name[size] if compare[call[name[indicator_data].get, parameter[constant[type]]] equal[==] constant[Host]] begin[:] variable[dns_active] assign[=] call[name[indicator_data].pop, parameter[constant[dnsActive], constant[None]]] if compare[name[dns_active] is_not constant[None]] begin[:] call[name[indicator_data]][constant[flag1]] assign[=] name[dns_active] variable[whois_active] assign[=] call[name[indicator_data].pop, parameter[constant[whoisActive], constant[None]]] if compare[name[whois_active] is_not constant[None]] begin[:] call[name[indicator_data]][constant[flag2]] assign[=] name[whois_active] return[call[name[self]._indicator, parameter[name[indicator_data]]]]
keyword[def] identifier[add_indicator] ( identifier[self] , identifier[indicator_data] ): literal[string] keyword[if] identifier[indicator_data] . identifier[get] ( literal[string] ) keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]: identifier[index] = literal[int] keyword[for] identifier[value] keyword[in] identifier[self] . identifier[_indicator_values] ( identifier[indicator_data] . identifier[get] ( literal[string] )): identifier[indicator_data] [ literal[string] . identifier[format] ( identifier[index] )]= identifier[value] identifier[index] += literal[int] keyword[if] identifier[indicator_data] . identifier[get] ( literal[string] )== literal[string] : identifier[size] = identifier[indicator_data] . identifier[pop] ( literal[string] , keyword[None] ) keyword[if] identifier[size] keyword[is] keyword[not] keyword[None] : identifier[indicator_data] [ literal[string] ]= identifier[size] keyword[if] identifier[indicator_data] . identifier[get] ( literal[string] )== literal[string] : identifier[dns_active] = identifier[indicator_data] . identifier[pop] ( literal[string] , keyword[None] ) keyword[if] identifier[dns_active] keyword[is] keyword[not] keyword[None] : identifier[indicator_data] [ literal[string] ]= identifier[dns_active] identifier[whois_active] = identifier[indicator_data] . identifier[pop] ( literal[string] , keyword[None] ) keyword[if] identifier[whois_active] keyword[is] keyword[not] keyword[None] : identifier[indicator_data] [ literal[string] ]= identifier[whois_active] keyword[return] identifier[self] . identifier[_indicator] ( identifier[indicator_data] )
def add_indicator(self, indicator_data): """Add an indicator to Batch Job. .. code-block:: javascript { "type": "File", "rating": 5.00, "confidence": 50, "summary": "53c3609411c83f363e051d455ade78a7 : 57a49b478310e4313c54c0fee46e4d70a73dd580 : db31cb2a748b7e0046d8c97a32a7eb4efde32a0593e5dbd58e07a3b4ae6bf3d7", "associatedGroups": [ { "groupXid": "e336e2dd-5dfb-48cd-a33a-f8809e83e904" } ], "attribute": [{ "type": "Source", "displayed": true, "value": "Malware Analysis provided by external AMA." }], "fileOccurrence": [{ "fileName": "drop1.exe", "date": "2017-03-03T18:00:00-06:00" }], "tag": [{ "name": "China" }], "xid": "e336e2dd-5dfb-48cd-a33a-f8809e83e904:170139" } Args: indicator_data (dict): The Full Indicator data including attributes, labels, tags, and associations. """ if indicator_data.get('type') not in ['Address', 'EmailAddress', 'File', 'Host', 'URL']: # for custom indicator types the valueX fields are required. # using the summary we can build the values index = 1 for value in self._indicator_values(indicator_data.get('summary')): indicator_data['value{}'.format(index)] = value index += 1 # depends on [control=['for'], data=['value']] # depends on [control=['if'], data=[]] if indicator_data.get('type') == 'File': # convert custom field name to the appropriate value for batch v2 size = indicator_data.pop('size', None) if size is not None: indicator_data['intValue1'] = size # depends on [control=['if'], data=['size']] # depends on [control=['if'], data=[]] if indicator_data.get('type') == 'Host': # convert custom field name to the appropriate value for batch v2 dns_active = indicator_data.pop('dnsActive', None) if dns_active is not None: indicator_data['flag1'] = dns_active # depends on [control=['if'], data=['dns_active']] whois_active = indicator_data.pop('whoisActive', None) if whois_active is not None: indicator_data['flag2'] = whois_active # depends on [control=['if'], data=['whois_active']] # depends on [control=['if'], data=[]] return self._indicator(indicator_data)
def descendants(self): """Recursively return every dataset below current item.""" for i in self.current_item.items: self.move_to(i) if i.type == TYPE_COLLECTION: for c in self.children: yield c else: yield i self.move_up()
def function[descendants, parameter[self]]: constant[Recursively return every dataset below current item.] for taget[name[i]] in starred[name[self].current_item.items] begin[:] call[name[self].move_to, parameter[name[i]]] if compare[name[i].type equal[==] name[TYPE_COLLECTION]] begin[:] for taget[name[c]] in starred[name[self].children] begin[:] <ast.Yield object at 0x7da1b271dea0> call[name[self].move_up, parameter[]]
keyword[def] identifier[descendants] ( identifier[self] ): literal[string] keyword[for] identifier[i] keyword[in] identifier[self] . identifier[current_item] . identifier[items] : identifier[self] . identifier[move_to] ( identifier[i] ) keyword[if] identifier[i] . identifier[type] == identifier[TYPE_COLLECTION] : keyword[for] identifier[c] keyword[in] identifier[self] . identifier[children] : keyword[yield] identifier[c] keyword[else] : keyword[yield] identifier[i] identifier[self] . identifier[move_up] ()
def descendants(self): """Recursively return every dataset below current item.""" for i in self.current_item.items: self.move_to(i) if i.type == TYPE_COLLECTION: for c in self.children: yield c # depends on [control=['for'], data=['c']] # depends on [control=['if'], data=[]] else: yield i self.move_up() # depends on [control=['for'], data=['i']]
def _make_rank(dist_obj, n, mu, sigma, crit=0.5, upper=10000, xtol=1): """ Make rank distribution using both ppf and brute force. Setting crit = 1 is equivalent to just using the ppf Parameters ---------- {0} """ qs = (np.arange(1, n + 1) - 0.5) / n rank = np.empty(len(qs)) brute_ppf = lambda val, prob: prob - dist_obj.cdf(val, mu, sigma) qs_less = qs <= crit ind = np.sum(qs_less) # Use ppf if qs are below crit rank[qs_less] = dist_obj.ppf(qs[qs_less], mu, sigma) # Use brute force if they are above for i, tq in enumerate(qs[~qs_less]): j = ind + i try: # TODO: Use an adaptable lower bound to increase speed rank[j] = np.abs(np.ceil(optim.brentq(brute_ppf, -1, upper, args=(tq,), xtol=xtol))) except ValueError: # If it is above the upper bound set all remaining values # to the previous value rank[j:] = np.repeat(rank[j - 1], len(rank[j:])) break return rank
def function[_make_rank, parameter[dist_obj, n, mu, sigma, crit, upper, xtol]]: constant[ Make rank distribution using both ppf and brute force. Setting crit = 1 is equivalent to just using the ppf Parameters ---------- {0} ] variable[qs] assign[=] binary_operation[binary_operation[call[name[np].arange, parameter[constant[1], binary_operation[name[n] + constant[1]]]] - constant[0.5]] / name[n]] variable[rank] assign[=] call[name[np].empty, parameter[call[name[len], parameter[name[qs]]]]] variable[brute_ppf] assign[=] <ast.Lambda object at 0x7da1b287aa40> variable[qs_less] assign[=] compare[name[qs] less_or_equal[<=] name[crit]] variable[ind] assign[=] call[name[np].sum, parameter[name[qs_less]]] call[name[rank]][name[qs_less]] assign[=] call[name[dist_obj].ppf, parameter[call[name[qs]][name[qs_less]], name[mu], name[sigma]]] for taget[tuple[[<ast.Name object at 0x7da1b2776980>, <ast.Name object at 0x7da1b27744c0>]]] in starred[call[name[enumerate], parameter[call[name[qs]][<ast.UnaryOp object at 0x7da1b277c070>]]]] begin[:] variable[j] assign[=] binary_operation[name[ind] + name[i]] <ast.Try object at 0x7da1b277c6d0> return[name[rank]]
keyword[def] identifier[_make_rank] ( identifier[dist_obj] , identifier[n] , identifier[mu] , identifier[sigma] , identifier[crit] = literal[int] , identifier[upper] = literal[int] , identifier[xtol] = literal[int] ): literal[string] identifier[qs] =( identifier[np] . identifier[arange] ( literal[int] , identifier[n] + literal[int] )- literal[int] )/ identifier[n] identifier[rank] = identifier[np] . identifier[empty] ( identifier[len] ( identifier[qs] )) identifier[brute_ppf] = keyword[lambda] identifier[val] , identifier[prob] : identifier[prob] - identifier[dist_obj] . identifier[cdf] ( identifier[val] , identifier[mu] , identifier[sigma] ) identifier[qs_less] = identifier[qs] <= identifier[crit] identifier[ind] = identifier[np] . identifier[sum] ( identifier[qs_less] ) identifier[rank] [ identifier[qs_less] ]= identifier[dist_obj] . identifier[ppf] ( identifier[qs] [ identifier[qs_less] ], identifier[mu] , identifier[sigma] ) keyword[for] identifier[i] , identifier[tq] keyword[in] identifier[enumerate] ( identifier[qs] [~ identifier[qs_less] ]): identifier[j] = identifier[ind] + identifier[i] keyword[try] : identifier[rank] [ identifier[j] ]= identifier[np] . identifier[abs] ( identifier[np] . identifier[ceil] ( identifier[optim] . identifier[brentq] ( identifier[brute_ppf] ,- literal[int] , identifier[upper] , identifier[args] =( identifier[tq] ,), identifier[xtol] = identifier[xtol] ))) keyword[except] identifier[ValueError] : identifier[rank] [ identifier[j] :]= identifier[np] . identifier[repeat] ( identifier[rank] [ identifier[j] - literal[int] ], identifier[len] ( identifier[rank] [ identifier[j] :])) keyword[break] keyword[return] identifier[rank]
def _make_rank(dist_obj, n, mu, sigma, crit=0.5, upper=10000, xtol=1): """ Make rank distribution using both ppf and brute force. Setting crit = 1 is equivalent to just using the ppf Parameters ---------- {0} """ qs = (np.arange(1, n + 1) - 0.5) / n rank = np.empty(len(qs)) brute_ppf = lambda val, prob: prob - dist_obj.cdf(val, mu, sigma) qs_less = qs <= crit ind = np.sum(qs_less) # Use ppf if qs are below crit rank[qs_less] = dist_obj.ppf(qs[qs_less], mu, sigma) # Use brute force if they are above for (i, tq) in enumerate(qs[~qs_less]): j = ind + i try: # TODO: Use an adaptable lower bound to increase speed rank[j] = np.abs(np.ceil(optim.brentq(brute_ppf, -1, upper, args=(tq,), xtol=xtol))) # depends on [control=['try'], data=[]] except ValueError: # If it is above the upper bound set all remaining values # to the previous value rank[j:] = np.repeat(rank[j - 1], len(rank[j:])) break # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] return rank
def robust_backtrack(self): """Estimate step size L by computing a linesearch that guarantees that F <= Q according to the robust FISTA backtracking strategy in :cite:`florea-2017-robust`. This also updates all the supporting variables. """ self.L *= self.L_gamma_d maxiter = self.L_maxiter iterBTrack = 0 linesearch = 1 self.store_Yprev() while linesearch and iterBTrack < maxiter: t = float(1. + np.sqrt(1. + 4. * self.L * self.Tk)) / (2. * self.L) T = self.Tk + t y = (self.Tk * self.var_xprv() + t * self.ZZ) / T self.update_var_y(y) gradY = self.proximal_step() # Given Y(f), L, this updates X(f) f = self.obfn_f(self.var_x()) Dxy = self.eval_Dxy() Q = self.obfn_f(self.var_y()) + \ self.eval_linear_approx(Dxy, gradY) + \ (self.L / 2.) * np.linalg.norm(Dxy.flatten(), 2)**2 if f <= Q: linesearch = 0 else: self.L *= self.L_gamma_u iterBTrack += 1 self.Tk = T self.ZZ += (t * self.L * (self.var_x() - self.var_y())) self.F = f self.Q = Q self.iterBTrack = iterBTrack
def function[robust_backtrack, parameter[self]]: constant[Estimate step size L by computing a linesearch that guarantees that F <= Q according to the robust FISTA backtracking strategy in :cite:`florea-2017-robust`. This also updates all the supporting variables. ] <ast.AugAssign object at 0x7da1b06c5bd0> variable[maxiter] assign[=] name[self].L_maxiter variable[iterBTrack] assign[=] constant[0] variable[linesearch] assign[=] constant[1] call[name[self].store_Yprev, parameter[]] while <ast.BoolOp object at 0x7da1b06c61a0> begin[:] variable[t] assign[=] binary_operation[call[name[float], parameter[binary_operation[constant[1.0] + call[name[np].sqrt, parameter[binary_operation[constant[1.0] + binary_operation[binary_operation[constant[4.0] * name[self].L] * name[self].Tk]]]]]]] / binary_operation[constant[2.0] * name[self].L]] variable[T] assign[=] binary_operation[name[self].Tk + name[t]] variable[y] assign[=] binary_operation[binary_operation[binary_operation[name[self].Tk * call[name[self].var_xprv, parameter[]]] + binary_operation[name[t] * name[self].ZZ]] / name[T]] call[name[self].update_var_y, parameter[name[y]]] variable[gradY] assign[=] call[name[self].proximal_step, parameter[]] variable[f] assign[=] call[name[self].obfn_f, parameter[call[name[self].var_x, parameter[]]]] variable[Dxy] assign[=] call[name[self].eval_Dxy, parameter[]] variable[Q] assign[=] binary_operation[binary_operation[call[name[self].obfn_f, parameter[call[name[self].var_y, parameter[]]]] + call[name[self].eval_linear_approx, parameter[name[Dxy], name[gradY]]]] + binary_operation[binary_operation[name[self].L / constant[2.0]] * binary_operation[call[name[np].linalg.norm, parameter[call[name[Dxy].flatten, parameter[]], constant[2]]] ** constant[2]]]] if compare[name[f] less_or_equal[<=] name[Q]] begin[:] variable[linesearch] assign[=] constant[0] <ast.AugAssign object at 0x7da1b06c5ae0> name[self].Tk assign[=] name[T] <ast.AugAssign object at 0x7da1b06c5c60> name[self].F assign[=] name[f] name[self].Q assign[=] name[Q] name[self].iterBTrack assign[=] name[iterBTrack]
keyword[def] identifier[robust_backtrack] ( identifier[self] ): literal[string] identifier[self] . identifier[L] *= identifier[self] . identifier[L_gamma_d] identifier[maxiter] = identifier[self] . identifier[L_maxiter] identifier[iterBTrack] = literal[int] identifier[linesearch] = literal[int] identifier[self] . identifier[store_Yprev] () keyword[while] identifier[linesearch] keyword[and] identifier[iterBTrack] < identifier[maxiter] : identifier[t] = identifier[float] ( literal[int] + identifier[np] . identifier[sqrt] ( literal[int] + literal[int] * identifier[self] . identifier[L] * identifier[self] . identifier[Tk] ))/( literal[int] * identifier[self] . identifier[L] ) identifier[T] = identifier[self] . identifier[Tk] + identifier[t] identifier[y] =( identifier[self] . identifier[Tk] * identifier[self] . identifier[var_xprv] ()+ identifier[t] * identifier[self] . identifier[ZZ] )/ identifier[T] identifier[self] . identifier[update_var_y] ( identifier[y] ) identifier[gradY] = identifier[self] . identifier[proximal_step] () identifier[f] = identifier[self] . identifier[obfn_f] ( identifier[self] . identifier[var_x] ()) identifier[Dxy] = identifier[self] . identifier[eval_Dxy] () identifier[Q] = identifier[self] . identifier[obfn_f] ( identifier[self] . identifier[var_y] ())+ identifier[self] . identifier[eval_linear_approx] ( identifier[Dxy] , identifier[gradY] )+( identifier[self] . identifier[L] / literal[int] )* identifier[np] . identifier[linalg] . identifier[norm] ( identifier[Dxy] . identifier[flatten] (), literal[int] )** literal[int] keyword[if] identifier[f] <= identifier[Q] : identifier[linesearch] = literal[int] keyword[else] : identifier[self] . identifier[L] *= identifier[self] . identifier[L_gamma_u] identifier[iterBTrack] += literal[int] identifier[self] . identifier[Tk] = identifier[T] identifier[self] . identifier[ZZ] +=( identifier[t] * identifier[self] . identifier[L] *( identifier[self] . identifier[var_x] ()- identifier[self] . identifier[var_y] ())) identifier[self] . identifier[F] = identifier[f] identifier[self] . identifier[Q] = identifier[Q] identifier[self] . identifier[iterBTrack] = identifier[iterBTrack]
def robust_backtrack(self): """Estimate step size L by computing a linesearch that guarantees that F <= Q according to the robust FISTA backtracking strategy in :cite:`florea-2017-robust`. This also updates all the supporting variables. """ self.L *= self.L_gamma_d maxiter = self.L_maxiter iterBTrack = 0 linesearch = 1 self.store_Yprev() while linesearch and iterBTrack < maxiter: t = float(1.0 + np.sqrt(1.0 + 4.0 * self.L * self.Tk)) / (2.0 * self.L) T = self.Tk + t y = (self.Tk * self.var_xprv() + t * self.ZZ) / T self.update_var_y(y) gradY = self.proximal_step() # Given Y(f), L, this updates X(f) f = self.obfn_f(self.var_x()) Dxy = self.eval_Dxy() Q = self.obfn_f(self.var_y()) + self.eval_linear_approx(Dxy, gradY) + self.L / 2.0 * np.linalg.norm(Dxy.flatten(), 2) ** 2 if f <= Q: linesearch = 0 # depends on [control=['if'], data=[]] else: self.L *= self.L_gamma_u iterBTrack += 1 # depends on [control=['while'], data=[]] self.Tk = T self.ZZ += t * self.L * (self.var_x() - self.var_y()) self.F = f self.Q = Q self.iterBTrack = iterBTrack
def _sparse_blockify(tuples, dtype=None): """ return an array of blocks that potentially have different dtypes (and are sparse) """ new_blocks = [] for i, names, array in tuples: array = _maybe_to_sparse(array) block = make_block(array, placement=[i]) new_blocks.append(block) return new_blocks
def function[_sparse_blockify, parameter[tuples, dtype]]: constant[ return an array of blocks that potentially have different dtypes (and are sparse) ] variable[new_blocks] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da18f00ea40>, <ast.Name object at 0x7da18f00fb20>, <ast.Name object at 0x7da18f00dff0>]]] in starred[name[tuples]] begin[:] variable[array] assign[=] call[name[_maybe_to_sparse], parameter[name[array]]] variable[block] assign[=] call[name[make_block], parameter[name[array]]] call[name[new_blocks].append, parameter[name[block]]] return[name[new_blocks]]
keyword[def] identifier[_sparse_blockify] ( identifier[tuples] , identifier[dtype] = keyword[None] ): literal[string] identifier[new_blocks] =[] keyword[for] identifier[i] , identifier[names] , identifier[array] keyword[in] identifier[tuples] : identifier[array] = identifier[_maybe_to_sparse] ( identifier[array] ) identifier[block] = identifier[make_block] ( identifier[array] , identifier[placement] =[ identifier[i] ]) identifier[new_blocks] . identifier[append] ( identifier[block] ) keyword[return] identifier[new_blocks]
def _sparse_blockify(tuples, dtype=None): """ return an array of blocks that potentially have different dtypes (and are sparse) """ new_blocks = [] for (i, names, array) in tuples: array = _maybe_to_sparse(array) block = make_block(array, placement=[i]) new_blocks.append(block) # depends on [control=['for'], data=[]] return new_blocks
def histogram(self, counts, bin_edges, linestyle='solid'): """Plot a polar histogram. The user needs to supply the histogram. This method only plots the results. You can use NumPy's histogram function. :param counts: array containing the count values. :param bin_edges: array containing the bin edges in degrees (or radians). :param linestyle: the line style used to connect the data points. May be None, or any line style accepted by TikZ (e.g. solid, dashed, dotted, thick, or even combinations like "red,thick,dashed"). Example:: >>> plot = artist.PolarPlot() >>> x = np.random.uniform(0, 360, size=1000) >>> n, bins = np.histogram(x, bins=np.linspace(0, 360, 37)) >>> plot.histogram(n, bins) """ if len(bin_edges) - 1 != len(counts): raise RuntimeError( 'The length of bin_edges should be length of counts + 1') x = [] y = [] if self.use_radians: circle = 2 * np.pi else: circle = 360. step = circle / 1800. for i in range(len(bin_edges) - 1): for bin_edge in np.arange(bin_edges[i], bin_edges[i + 1], step=step): x.append(bin_edge) y.append(counts[i]) x.append(bin_edges[i + 1]) y.append(counts[i]) # If last edge is same as first bin edge, connect the ends. if bin_edges[-1] % circle == bin_edges[0] % circle: x.append(bin_edges[0]) y.append(counts[0]) self.plot(x, y, mark=None, linestyle=linestyle)
def function[histogram, parameter[self, counts, bin_edges, linestyle]]: constant[Plot a polar histogram. The user needs to supply the histogram. This method only plots the results. You can use NumPy's histogram function. :param counts: array containing the count values. :param bin_edges: array containing the bin edges in degrees (or radians). :param linestyle: the line style used to connect the data points. May be None, or any line style accepted by TikZ (e.g. solid, dashed, dotted, thick, or even combinations like "red,thick,dashed"). Example:: >>> plot = artist.PolarPlot() >>> x = np.random.uniform(0, 360, size=1000) >>> n, bins = np.histogram(x, bins=np.linspace(0, 360, 37)) >>> plot.histogram(n, bins) ] if compare[binary_operation[call[name[len], parameter[name[bin_edges]]] - constant[1]] not_equal[!=] call[name[len], parameter[name[counts]]]] begin[:] <ast.Raise object at 0x7da1b2368e80> variable[x] assign[=] list[[]] variable[y] assign[=] list[[]] if name[self].use_radians begin[:] variable[circle] assign[=] binary_operation[constant[2] * name[np].pi] variable[step] assign[=] binary_operation[name[circle] / constant[1800.0]] for taget[name[i]] in starred[call[name[range], parameter[binary_operation[call[name[len], parameter[name[bin_edges]]] - constant[1]]]]] begin[:] for taget[name[bin_edge]] in starred[call[name[np].arange, parameter[call[name[bin_edges]][name[i]], call[name[bin_edges]][binary_operation[name[i] + constant[1]]]]]] begin[:] call[name[x].append, parameter[name[bin_edge]]] call[name[y].append, parameter[call[name[counts]][name[i]]]] call[name[x].append, parameter[call[name[bin_edges]][binary_operation[name[i] + constant[1]]]]] call[name[y].append, parameter[call[name[counts]][name[i]]]] if compare[binary_operation[call[name[bin_edges]][<ast.UnaryOp object at 0x7da1b236ab60>] <ast.Mod object at 0x7da2590d6920> name[circle]] equal[==] binary_operation[call[name[bin_edges]][constant[0]] <ast.Mod object at 0x7da2590d6920> name[circle]]] begin[:] call[name[x].append, parameter[call[name[bin_edges]][constant[0]]]] call[name[y].append, parameter[call[name[counts]][constant[0]]]] call[name[self].plot, parameter[name[x], name[y]]]
keyword[def] identifier[histogram] ( identifier[self] , identifier[counts] , identifier[bin_edges] , identifier[linestyle] = literal[string] ): literal[string] keyword[if] identifier[len] ( identifier[bin_edges] )- literal[int] != identifier[len] ( identifier[counts] ): keyword[raise] identifier[RuntimeError] ( literal[string] ) identifier[x] =[] identifier[y] =[] keyword[if] identifier[self] . identifier[use_radians] : identifier[circle] = literal[int] * identifier[np] . identifier[pi] keyword[else] : identifier[circle] = literal[int] identifier[step] = identifier[circle] / literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[bin_edges] )- literal[int] ): keyword[for] identifier[bin_edge] keyword[in] identifier[np] . identifier[arange] ( identifier[bin_edges] [ identifier[i] ], identifier[bin_edges] [ identifier[i] + literal[int] ], identifier[step] = identifier[step] ): identifier[x] . identifier[append] ( identifier[bin_edge] ) identifier[y] . identifier[append] ( identifier[counts] [ identifier[i] ]) identifier[x] . identifier[append] ( identifier[bin_edges] [ identifier[i] + literal[int] ]) identifier[y] . identifier[append] ( identifier[counts] [ identifier[i] ]) keyword[if] identifier[bin_edges] [- literal[int] ]% identifier[circle] == identifier[bin_edges] [ literal[int] ]% identifier[circle] : identifier[x] . identifier[append] ( identifier[bin_edges] [ literal[int] ]) identifier[y] . identifier[append] ( identifier[counts] [ literal[int] ]) identifier[self] . identifier[plot] ( identifier[x] , identifier[y] , identifier[mark] = keyword[None] , identifier[linestyle] = identifier[linestyle] )
def histogram(self, counts, bin_edges, linestyle='solid'): """Plot a polar histogram. The user needs to supply the histogram. This method only plots the results. You can use NumPy's histogram function. :param counts: array containing the count values. :param bin_edges: array containing the bin edges in degrees (or radians). :param linestyle: the line style used to connect the data points. May be None, or any line style accepted by TikZ (e.g. solid, dashed, dotted, thick, or even combinations like "red,thick,dashed"). Example:: >>> plot = artist.PolarPlot() >>> x = np.random.uniform(0, 360, size=1000) >>> n, bins = np.histogram(x, bins=np.linspace(0, 360, 37)) >>> plot.histogram(n, bins) """ if len(bin_edges) - 1 != len(counts): raise RuntimeError('The length of bin_edges should be length of counts + 1') # depends on [control=['if'], data=[]] x = [] y = [] if self.use_radians: circle = 2 * np.pi # depends on [control=['if'], data=[]] else: circle = 360.0 step = circle / 1800.0 for i in range(len(bin_edges) - 1): for bin_edge in np.arange(bin_edges[i], bin_edges[i + 1], step=step): x.append(bin_edge) y.append(counts[i]) # depends on [control=['for'], data=['bin_edge']] x.append(bin_edges[i + 1]) y.append(counts[i]) # depends on [control=['for'], data=['i']] # If last edge is same as first bin edge, connect the ends. if bin_edges[-1] % circle == bin_edges[0] % circle: x.append(bin_edges[0]) y.append(counts[0]) # depends on [control=['if'], data=[]] self.plot(x, y, mark=None, linestyle=linestyle)
def save_items(self, rows=None, verbose=False): """ Return a dictionary of row data for selected rows: {1: {col1: val1, col2: val2}, ...} If a list of row numbers isn't provided, get data for all. """ if rows: rows = rows else: rows = list(range(self.GetNumberRows())) cols = list(range(self.GetNumberCols())) data = {} for row in rows: data[row] = {} for col in cols: col_name = self.GetColLabelValue(col) if verbose: print(col_name, ":", self.GetCellValue(row, col)) data[row][col_name] = self.GetCellValue(row, col) return data
def function[save_items, parameter[self, rows, verbose]]: constant[ Return a dictionary of row data for selected rows: {1: {col1: val1, col2: val2}, ...} If a list of row numbers isn't provided, get data for all. ] if name[rows] begin[:] variable[rows] assign[=] name[rows] variable[cols] assign[=] call[name[list], parameter[call[name[range], parameter[call[name[self].GetNumberCols, parameter[]]]]]] variable[data] assign[=] dictionary[[], []] for taget[name[row]] in starred[name[rows]] begin[:] call[name[data]][name[row]] assign[=] dictionary[[], []] for taget[name[col]] in starred[name[cols]] begin[:] variable[col_name] assign[=] call[name[self].GetColLabelValue, parameter[name[col]]] if name[verbose] begin[:] call[name[print], parameter[name[col_name], constant[:], call[name[self].GetCellValue, parameter[name[row], name[col]]]]] call[call[name[data]][name[row]]][name[col_name]] assign[=] call[name[self].GetCellValue, parameter[name[row], name[col]]] return[name[data]]
keyword[def] identifier[save_items] ( identifier[self] , identifier[rows] = keyword[None] , identifier[verbose] = keyword[False] ): literal[string] keyword[if] identifier[rows] : identifier[rows] = identifier[rows] keyword[else] : identifier[rows] = identifier[list] ( identifier[range] ( identifier[self] . identifier[GetNumberRows] ())) identifier[cols] = identifier[list] ( identifier[range] ( identifier[self] . identifier[GetNumberCols] ())) identifier[data] ={} keyword[for] identifier[row] keyword[in] identifier[rows] : identifier[data] [ identifier[row] ]={} keyword[for] identifier[col] keyword[in] identifier[cols] : identifier[col_name] = identifier[self] . identifier[GetColLabelValue] ( identifier[col] ) keyword[if] identifier[verbose] : identifier[print] ( identifier[col_name] , literal[string] , identifier[self] . identifier[GetCellValue] ( identifier[row] , identifier[col] )) identifier[data] [ identifier[row] ][ identifier[col_name] ]= identifier[self] . identifier[GetCellValue] ( identifier[row] , identifier[col] ) keyword[return] identifier[data]
def save_items(self, rows=None, verbose=False): """ Return a dictionary of row data for selected rows: {1: {col1: val1, col2: val2}, ...} If a list of row numbers isn't provided, get data for all. """ if rows: rows = rows # depends on [control=['if'], data=[]] else: rows = list(range(self.GetNumberRows())) cols = list(range(self.GetNumberCols())) data = {} for row in rows: data[row] = {} for col in cols: col_name = self.GetColLabelValue(col) if verbose: print(col_name, ':', self.GetCellValue(row, col)) # depends on [control=['if'], data=[]] data[row][col_name] = self.GetCellValue(row, col) # depends on [control=['for'], data=['col']] # depends on [control=['for'], data=['row']] return data
def _isCompatible(self, other, reporter): """ This is the environment implementation of :meth:`BaseGuideline.isCompatible`. Subclasses may override this method. """ guideline1 = self guideline2 = other # guideline names if guideline1.name != guideline2.name: reporter.nameDifference = True reporter.warning = True
def function[_isCompatible, parameter[self, other, reporter]]: constant[ This is the environment implementation of :meth:`BaseGuideline.isCompatible`. Subclasses may override this method. ] variable[guideline1] assign[=] name[self] variable[guideline2] assign[=] name[other] if compare[name[guideline1].name not_equal[!=] name[guideline2].name] begin[:] name[reporter].nameDifference assign[=] constant[True] name[reporter].warning assign[=] constant[True]
keyword[def] identifier[_isCompatible] ( identifier[self] , identifier[other] , identifier[reporter] ): literal[string] identifier[guideline1] = identifier[self] identifier[guideline2] = identifier[other] keyword[if] identifier[guideline1] . identifier[name] != identifier[guideline2] . identifier[name] : identifier[reporter] . identifier[nameDifference] = keyword[True] identifier[reporter] . identifier[warning] = keyword[True]
def _isCompatible(self, other, reporter): """ This is the environment implementation of :meth:`BaseGuideline.isCompatible`. Subclasses may override this method. """ guideline1 = self guideline2 = other # guideline names if guideline1.name != guideline2.name: reporter.nameDifference = True reporter.warning = True # depends on [control=['if'], data=[]]
def initialize(self, id=None, text=None): self.id = none_or(id, int) """ Contributing user's identifier : int | None """ self.text = none_or(text, str) """ Username or IP address of the user at the time of the edit : str | None """
def function[initialize, parameter[self, id, text]]: name[self].id assign[=] call[name[none_or], parameter[name[id], name[int]]] constant[ Contributing user's identifier : int | None ] name[self].text assign[=] call[name[none_or], parameter[name[text], name[str]]] constant[ Username or IP address of the user at the time of the edit : str | None ]
keyword[def] identifier[initialize] ( identifier[self] , identifier[id] = keyword[None] , identifier[text] = keyword[None] ): identifier[self] . identifier[id] = identifier[none_or] ( identifier[id] , identifier[int] ) literal[string] identifier[self] . identifier[text] = identifier[none_or] ( identifier[text] , identifier[str] ) literal[string]
def initialize(self, id=None, text=None): self.id = none_or(id, int) "\n Contributing user's identifier : int | None\n " self.text = none_or(text, str) '\n Username or IP address of the user at the time of the edit : str | None\n '
def p_factor_unary_operators(self, p): """ term : SUB factor | ADD factor """ p[0] = p[2] if p[1] == '-': p[0] = Instruction('-x', context={'x': p[0]})
def function[p_factor_unary_operators, parameter[self, p]]: constant[ term : SUB factor | ADD factor ] call[name[p]][constant[0]] assign[=] call[name[p]][constant[2]] if compare[call[name[p]][constant[1]] equal[==] constant[-]] begin[:] call[name[p]][constant[0]] assign[=] call[name[Instruction], parameter[constant[-x]]]
keyword[def] identifier[p_factor_unary_operators] ( identifier[self] , identifier[p] ): literal[string] identifier[p] [ literal[int] ]= identifier[p] [ literal[int] ] keyword[if] identifier[p] [ literal[int] ]== literal[string] : identifier[p] [ literal[int] ]= identifier[Instruction] ( literal[string] , identifier[context] ={ literal[string] : identifier[p] [ literal[int] ]})
def p_factor_unary_operators(self, p): """ term : SUB factor | ADD factor """ p[0] = p[2] if p[1] == '-': p[0] = Instruction('-x', context={'x': p[0]}) # depends on [control=['if'], data=[]]
def _get_column(in_file, out_file, column, data=None): """Subset one column from a file """ with file_transaction(data, out_file) as tx_out_file: with open(in_file) as in_handle: with open(tx_out_file, 'w') as out_handle: for line in in_handle: cols = line.strip().split("\t") if line.find("eff_count") > 0: continue number = cols[column] if column == 7: number = int(round(float(number), 0)) out_handle.write("%s\t%s\n" % (cols[1], number)) return out_file
def function[_get_column, parameter[in_file, out_file, column, data]]: constant[Subset one column from a file ] with call[name[file_transaction], parameter[name[data], name[out_file]]] begin[:] with call[name[open], parameter[name[in_file]]] begin[:] with call[name[open], parameter[name[tx_out_file], constant[w]]] begin[:] for taget[name[line]] in starred[name[in_handle]] begin[:] variable[cols] assign[=] call[call[name[line].strip, parameter[]].split, parameter[constant[ ]]] if compare[call[name[line].find, parameter[constant[eff_count]]] greater[>] constant[0]] begin[:] continue variable[number] assign[=] call[name[cols]][name[column]] if compare[name[column] equal[==] constant[7]] begin[:] variable[number] assign[=] call[name[int], parameter[call[name[round], parameter[call[name[float], parameter[name[number]]], constant[0]]]]] call[name[out_handle].write, parameter[binary_operation[constant[%s %s ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da18f09fe20>, <ast.Name object at 0x7da18f09cd90>]]]]] return[name[out_file]]
keyword[def] identifier[_get_column] ( identifier[in_file] , identifier[out_file] , identifier[column] , identifier[data] = keyword[None] ): literal[string] keyword[with] identifier[file_transaction] ( identifier[data] , identifier[out_file] ) keyword[as] identifier[tx_out_file] : keyword[with] identifier[open] ( identifier[in_file] ) keyword[as] identifier[in_handle] : keyword[with] identifier[open] ( identifier[tx_out_file] , literal[string] ) keyword[as] identifier[out_handle] : keyword[for] identifier[line] keyword[in] identifier[in_handle] : identifier[cols] = identifier[line] . identifier[strip] (). identifier[split] ( literal[string] ) keyword[if] identifier[line] . identifier[find] ( literal[string] )> literal[int] : keyword[continue] identifier[number] = identifier[cols] [ identifier[column] ] keyword[if] identifier[column] == literal[int] : identifier[number] = identifier[int] ( identifier[round] ( identifier[float] ( identifier[number] ), literal[int] )) identifier[out_handle] . identifier[write] ( literal[string] %( identifier[cols] [ literal[int] ], identifier[number] )) keyword[return] identifier[out_file]
def _get_column(in_file, out_file, column, data=None): """Subset one column from a file """ with file_transaction(data, out_file) as tx_out_file: with open(in_file) as in_handle: with open(tx_out_file, 'w') as out_handle: for line in in_handle: cols = line.strip().split('\t') if line.find('eff_count') > 0: continue # depends on [control=['if'], data=[]] number = cols[column] if column == 7: number = int(round(float(number), 0)) # depends on [control=['if'], data=[]] out_handle.write('%s\t%s\n' % (cols[1], number)) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['out_handle']] # depends on [control=['with'], data=['open', 'in_handle']] # depends on [control=['with'], data=['tx_out_file']] return out_file
def check_package_data(dist, attr, value): """Verify that value is a dictionary of package names to glob lists""" if isinstance(value, dict): for k, v in value.items(): if not isinstance(k, str): break try: iter(v) except TypeError: break else: return raise DistutilsSetupError( attr + " must be a dictionary mapping package names to lists of " "wildcard patterns" )
def function[check_package_data, parameter[dist, attr, value]]: constant[Verify that value is a dictionary of package names to glob lists] if call[name[isinstance], parameter[name[value], name[dict]]] begin[:] for taget[tuple[[<ast.Name object at 0x7da1b1b14700>, <ast.Name object at 0x7da1b1b15ed0>]]] in starred[call[name[value].items, parameter[]]] begin[:] if <ast.UnaryOp object at 0x7da1b1b14670> begin[:] break <ast.Try object at 0x7da1b1b14910> <ast.Raise object at 0x7da1b1b17c10>
keyword[def] identifier[check_package_data] ( identifier[dist] , identifier[attr] , identifier[value] ): literal[string] keyword[if] identifier[isinstance] ( identifier[value] , identifier[dict] ): keyword[for] identifier[k] , identifier[v] keyword[in] identifier[value] . identifier[items] (): keyword[if] keyword[not] identifier[isinstance] ( identifier[k] , identifier[str] ): keyword[break] keyword[try] : identifier[iter] ( identifier[v] ) keyword[except] identifier[TypeError] : keyword[break] keyword[else] : keyword[return] keyword[raise] identifier[DistutilsSetupError] ( identifier[attr] + literal[string] literal[string] )
def check_package_data(dist, attr, value): """Verify that value is a dictionary of package names to glob lists""" if isinstance(value, dict): for (k, v) in value.items(): if not isinstance(k, str): break # depends on [control=['if'], data=[]] try: iter(v) # depends on [control=['try'], data=[]] except TypeError: break # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] else: return # depends on [control=['if'], data=[]] raise DistutilsSetupError(attr + ' must be a dictionary mapping package names to lists of wildcard patterns')
def validate(self, name, value): """Validate a cookie attribute with an appropriate validator. The value comes in already parsed (for example, an expires value should be a datetime). Called automatically when an attribute value is set. """ validator = self.attribute_validators.get(name, None) if validator: return True if validator(value) else False return True
def function[validate, parameter[self, name, value]]: constant[Validate a cookie attribute with an appropriate validator. The value comes in already parsed (for example, an expires value should be a datetime). Called automatically when an attribute value is set. ] variable[validator] assign[=] call[name[self].attribute_validators.get, parameter[name[name], constant[None]]] if name[validator] begin[:] return[<ast.IfExp object at 0x7da18f00f400>] return[constant[True]]
keyword[def] identifier[validate] ( identifier[self] , identifier[name] , identifier[value] ): literal[string] identifier[validator] = identifier[self] . identifier[attribute_validators] . identifier[get] ( identifier[name] , keyword[None] ) keyword[if] identifier[validator] : keyword[return] keyword[True] keyword[if] identifier[validator] ( identifier[value] ) keyword[else] keyword[False] keyword[return] keyword[True]
def validate(self, name, value): """Validate a cookie attribute with an appropriate validator. The value comes in already parsed (for example, an expires value should be a datetime). Called automatically when an attribute value is set. """ validator = self.attribute_validators.get(name, None) if validator: return True if validator(value) else False # depends on [control=['if'], data=[]] return True
def generate(num_nodes, num_edges, directed=False, weight_range=(1, 1)): """ Create a random graph. @type num_nodes: number @param num_nodes: Number of nodes. @type num_edges: number @param num_edges: Number of edges. @type directed: bool @param directed: Whether the generated graph should be directed or not. @type weight_range: tuple @param weight_range: tuple of two integers as lower and upper limits on randomly generated weights (uniform distribution). """ # Graph creation if directed: random_graph = digraph() else: random_graph = graph() # Nodes nodes = range(num_nodes) random_graph.add_nodes(nodes) # Build a list of all possible edges edges = [] edges_append = edges.append for x in nodes: for y in nodes: if ((directed and x != y) or (x > y)): edges_append((x, y)) # Randomize the list shuffle(edges) # Add edges to the graph min_wt = min(weight_range) max_wt = max(weight_range) for i in range(num_edges): each = edges[i] random_graph.add_edge((each[0], each[1]), wt = randint(min_wt, max_wt)) return random_graph
def function[generate, parameter[num_nodes, num_edges, directed, weight_range]]: constant[ Create a random graph. @type num_nodes: number @param num_nodes: Number of nodes. @type num_edges: number @param num_edges: Number of edges. @type directed: bool @param directed: Whether the generated graph should be directed or not. @type weight_range: tuple @param weight_range: tuple of two integers as lower and upper limits on randomly generated weights (uniform distribution). ] if name[directed] begin[:] variable[random_graph] assign[=] call[name[digraph], parameter[]] variable[nodes] assign[=] call[name[range], parameter[name[num_nodes]]] call[name[random_graph].add_nodes, parameter[name[nodes]]] variable[edges] assign[=] list[[]] variable[edges_append] assign[=] name[edges].append for taget[name[x]] in starred[name[nodes]] begin[:] for taget[name[y]] in starred[name[nodes]] begin[:] if <ast.BoolOp object at 0x7da2045675e0> begin[:] call[name[edges_append], parameter[tuple[[<ast.Name object at 0x7da2045667a0>, <ast.Name object at 0x7da2045656f0>]]]] call[name[shuffle], parameter[name[edges]]] variable[min_wt] assign[=] call[name[min], parameter[name[weight_range]]] variable[max_wt] assign[=] call[name[max], parameter[name[weight_range]]] for taget[name[i]] in starred[call[name[range], parameter[name[num_edges]]]] begin[:] variable[each] assign[=] call[name[edges]][name[i]] call[name[random_graph].add_edge, parameter[tuple[[<ast.Subscript object at 0x7da204564340>, <ast.Subscript object at 0x7da204567d60>]]]] return[name[random_graph]]
keyword[def] identifier[generate] ( identifier[num_nodes] , identifier[num_edges] , identifier[directed] = keyword[False] , identifier[weight_range] =( literal[int] , literal[int] )): literal[string] keyword[if] identifier[directed] : identifier[random_graph] = identifier[digraph] () keyword[else] : identifier[random_graph] = identifier[graph] () identifier[nodes] = identifier[range] ( identifier[num_nodes] ) identifier[random_graph] . identifier[add_nodes] ( identifier[nodes] ) identifier[edges] =[] identifier[edges_append] = identifier[edges] . identifier[append] keyword[for] identifier[x] keyword[in] identifier[nodes] : keyword[for] identifier[y] keyword[in] identifier[nodes] : keyword[if] (( identifier[directed] keyword[and] identifier[x] != identifier[y] ) keyword[or] ( identifier[x] > identifier[y] )): identifier[edges_append] (( identifier[x] , identifier[y] )) identifier[shuffle] ( identifier[edges] ) identifier[min_wt] = identifier[min] ( identifier[weight_range] ) identifier[max_wt] = identifier[max] ( identifier[weight_range] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[num_edges] ): identifier[each] = identifier[edges] [ identifier[i] ] identifier[random_graph] . identifier[add_edge] (( identifier[each] [ literal[int] ], identifier[each] [ literal[int] ]), identifier[wt] = identifier[randint] ( identifier[min_wt] , identifier[max_wt] )) keyword[return] identifier[random_graph]
def generate(num_nodes, num_edges, directed=False, weight_range=(1, 1)): """ Create a random graph. @type num_nodes: number @param num_nodes: Number of nodes. @type num_edges: number @param num_edges: Number of edges. @type directed: bool @param directed: Whether the generated graph should be directed or not. @type weight_range: tuple @param weight_range: tuple of two integers as lower and upper limits on randomly generated weights (uniform distribution). """ # Graph creation if directed: random_graph = digraph() # depends on [control=['if'], data=[]] else: random_graph = graph() # Nodes nodes = range(num_nodes) random_graph.add_nodes(nodes) # Build a list of all possible edges edges = [] edges_append = edges.append for x in nodes: for y in nodes: if directed and x != y or x > y: edges_append((x, y)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['y']] # depends on [control=['for'], data=['x']] # Randomize the list shuffle(edges) # Add edges to the graph min_wt = min(weight_range) max_wt = max(weight_range) for i in range(num_edges): each = edges[i] random_graph.add_edge((each[0], each[1]), wt=randint(min_wt, max_wt)) # depends on [control=['for'], data=['i']] return random_graph
def run(configobj=None): """ TEAL interface for the `acsccd` function. """ acsccd(configobj['input'], exec_path=configobj['exec_path'], time_stamps=configobj['time_stamps'], verbose=configobj['verbose'], quiet=configobj['quiet'] #, #dqicorr=configobj['dqicorr'], #atodcorr=configobj['atodcorr'], #blevcorr=configobj['blevcorr'], #biascorr=configobj['biascorr'] )
def function[run, parameter[configobj]]: constant[ TEAL interface for the `acsccd` function. ] call[name[acsccd], parameter[call[name[configobj]][constant[input]]]]
keyword[def] identifier[run] ( identifier[configobj] = keyword[None] ): literal[string] identifier[acsccd] ( identifier[configobj] [ literal[string] ], identifier[exec_path] = identifier[configobj] [ literal[string] ], identifier[time_stamps] = identifier[configobj] [ literal[string] ], identifier[verbose] = identifier[configobj] [ literal[string] ], identifier[quiet] = identifier[configobj] [ literal[string] ] )
def run(configobj=None): """ TEAL interface for the `acsccd` function. """ #, #dqicorr=configobj['dqicorr'], #atodcorr=configobj['atodcorr'], #blevcorr=configobj['blevcorr'], #biascorr=configobj['biascorr'] acsccd(configobj['input'], exec_path=configobj['exec_path'], time_stamps=configobj['time_stamps'], verbose=configobj['verbose'], quiet=configobj['quiet'])
def parallel_check(vec1, vec2): """Checks whether two vectors are parallel OR anti-parallel. Vectors must be of the same dimension. Parameters ---------- vec1 length-R |npfloat_| -- First vector to compare vec2 length-R |npfloat_| -- Second vector to compare Returns ------- par |bool| -- |True| if (anti-)parallel to within :data:`opan.const.PRM.NON_PARALLEL_TOL` degrees. |False| otherwise. """ # Imports from ..const import PRM import numpy as np # Initialize False par = False # Shape check for n,v in enumerate([vec1, vec2]): if not len(v.shape) == 1: raise ValueError("Bad shape for vector #{0}".format(n)) ## end if ## next v,n if not vec1.shape[0] == vec2.shape[0]: raise ValueError("Vector length mismatch") ## end if # Check for (anti-)parallel character and return angle = vec_angle(vec1, vec2) if min([abs(angle), abs(angle - 180.)]) < PRM.NON_PARALLEL_TOL: par = True ## end if return par
def function[parallel_check, parameter[vec1, vec2]]: constant[Checks whether two vectors are parallel OR anti-parallel. Vectors must be of the same dimension. Parameters ---------- vec1 length-R |npfloat_| -- First vector to compare vec2 length-R |npfloat_| -- Second vector to compare Returns ------- par |bool| -- |True| if (anti-)parallel to within :data:`opan.const.PRM.NON_PARALLEL_TOL` degrees. |False| otherwise. ] from relative_module[const] import module[PRM] import module[numpy] as alias[np] variable[par] assign[=] constant[False] for taget[tuple[[<ast.Name object at 0x7da1b257f7f0>, <ast.Name object at 0x7da1b257de10>]]] in starred[call[name[enumerate], parameter[list[[<ast.Name object at 0x7da1b257dc30>, <ast.Name object at 0x7da1b257c580>]]]]] begin[:] if <ast.UnaryOp object at 0x7da1b257c370> begin[:] <ast.Raise object at 0x7da1b257d8d0> if <ast.UnaryOp object at 0x7da1b257c3a0> begin[:] <ast.Raise object at 0x7da1b257cd60> variable[angle] assign[=] call[name[vec_angle], parameter[name[vec1], name[vec2]]] if compare[call[name[min], parameter[list[[<ast.Call object at 0x7da1b257f670>, <ast.Call object at 0x7da1b257caf0>]]]] less[<] name[PRM].NON_PARALLEL_TOL] begin[:] variable[par] assign[=] constant[True] return[name[par]]
keyword[def] identifier[parallel_check] ( identifier[vec1] , identifier[vec2] ): literal[string] keyword[from] .. identifier[const] keyword[import] identifier[PRM] keyword[import] identifier[numpy] keyword[as] identifier[np] identifier[par] = keyword[False] keyword[for] identifier[n] , identifier[v] keyword[in] identifier[enumerate] ([ identifier[vec1] , identifier[vec2] ]): keyword[if] keyword[not] identifier[len] ( identifier[v] . identifier[shape] )== literal[int] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[n] )) keyword[if] keyword[not] identifier[vec1] . identifier[shape] [ literal[int] ]== identifier[vec2] . identifier[shape] [ literal[int] ]: keyword[raise] identifier[ValueError] ( literal[string] ) identifier[angle] = identifier[vec_angle] ( identifier[vec1] , identifier[vec2] ) keyword[if] identifier[min] ([ identifier[abs] ( identifier[angle] ), identifier[abs] ( identifier[angle] - literal[int] )])< identifier[PRM] . identifier[NON_PARALLEL_TOL] : identifier[par] = keyword[True] keyword[return] identifier[par]
def parallel_check(vec1, vec2): """Checks whether two vectors are parallel OR anti-parallel. Vectors must be of the same dimension. Parameters ---------- vec1 length-R |npfloat_| -- First vector to compare vec2 length-R |npfloat_| -- Second vector to compare Returns ------- par |bool| -- |True| if (anti-)parallel to within :data:`opan.const.PRM.NON_PARALLEL_TOL` degrees. |False| otherwise. """ # Imports from ..const import PRM import numpy as np # Initialize False par = False # Shape check for (n, v) in enumerate([vec1, vec2]): if not len(v.shape) == 1: raise ValueError('Bad shape for vector #{0}'.format(n)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] ## end if ## next v,n if not vec1.shape[0] == vec2.shape[0]: raise ValueError('Vector length mismatch') # depends on [control=['if'], data=[]] ## end if # Check for (anti-)parallel character and return angle = vec_angle(vec1, vec2) if min([abs(angle), abs(angle - 180.0)]) < PRM.NON_PARALLEL_TOL: par = True # depends on [control=['if'], data=[]] ## end if return par
def disconnect(self, sid, namespace): """Register a client disconnect from a namespace.""" if namespace not in self.rooms: return rooms = [] for room_name, room in six.iteritems(self.rooms[namespace].copy()): if sid in room: rooms.append(room_name) for room in rooms: self.leave_room(sid, namespace, room) if sid in self.callbacks and namespace in self.callbacks[sid]: del self.callbacks[sid][namespace] if len(self.callbacks[sid]) == 0: del self.callbacks[sid] if namespace in self.pending_disconnect and \ sid in self.pending_disconnect[namespace]: self.pending_disconnect[namespace].remove(sid) if len(self.pending_disconnect[namespace]) == 0: del self.pending_disconnect[namespace]
def function[disconnect, parameter[self, sid, namespace]]: constant[Register a client disconnect from a namespace.] if compare[name[namespace] <ast.NotIn object at 0x7da2590d7190> name[self].rooms] begin[:] return[None] variable[rooms] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da18ede5120>, <ast.Name object at 0x7da18ede6080>]]] in starred[call[name[six].iteritems, parameter[call[call[name[self].rooms][name[namespace]].copy, parameter[]]]]] begin[:] if compare[name[sid] in name[room]] begin[:] call[name[rooms].append, parameter[name[room_name]]] for taget[name[room]] in starred[name[rooms]] begin[:] call[name[self].leave_room, parameter[name[sid], name[namespace], name[room]]] if <ast.BoolOp object at 0x7da1b1cb1c60> begin[:] <ast.Delete object at 0x7da1b1cb1210> if compare[call[name[len], parameter[call[name[self].callbacks][name[sid]]]] equal[==] constant[0]] begin[:] <ast.Delete object at 0x7da18ede7130> if <ast.BoolOp object at 0x7da18ede6b30> begin[:] call[call[name[self].pending_disconnect][name[namespace]].remove, parameter[name[sid]]] if compare[call[name[len], parameter[call[name[self].pending_disconnect][name[namespace]]]] equal[==] constant[0]] begin[:] <ast.Delete object at 0x7da18ede5660>
keyword[def] identifier[disconnect] ( identifier[self] , identifier[sid] , identifier[namespace] ): literal[string] keyword[if] identifier[namespace] keyword[not] keyword[in] identifier[self] . identifier[rooms] : keyword[return] identifier[rooms] =[] keyword[for] identifier[room_name] , identifier[room] keyword[in] identifier[six] . identifier[iteritems] ( identifier[self] . identifier[rooms] [ identifier[namespace] ]. identifier[copy] ()): keyword[if] identifier[sid] keyword[in] identifier[room] : identifier[rooms] . identifier[append] ( identifier[room_name] ) keyword[for] identifier[room] keyword[in] identifier[rooms] : identifier[self] . identifier[leave_room] ( identifier[sid] , identifier[namespace] , identifier[room] ) keyword[if] identifier[sid] keyword[in] identifier[self] . identifier[callbacks] keyword[and] identifier[namespace] keyword[in] identifier[self] . identifier[callbacks] [ identifier[sid] ]: keyword[del] identifier[self] . identifier[callbacks] [ identifier[sid] ][ identifier[namespace] ] keyword[if] identifier[len] ( identifier[self] . identifier[callbacks] [ identifier[sid] ])== literal[int] : keyword[del] identifier[self] . identifier[callbacks] [ identifier[sid] ] keyword[if] identifier[namespace] keyword[in] identifier[self] . identifier[pending_disconnect] keyword[and] identifier[sid] keyword[in] identifier[self] . identifier[pending_disconnect] [ identifier[namespace] ]: identifier[self] . identifier[pending_disconnect] [ identifier[namespace] ]. identifier[remove] ( identifier[sid] ) keyword[if] identifier[len] ( identifier[self] . identifier[pending_disconnect] [ identifier[namespace] ])== literal[int] : keyword[del] identifier[self] . identifier[pending_disconnect] [ identifier[namespace] ]
def disconnect(self, sid, namespace): """Register a client disconnect from a namespace.""" if namespace not in self.rooms: return # depends on [control=['if'], data=[]] rooms = [] for (room_name, room) in six.iteritems(self.rooms[namespace].copy()): if sid in room: rooms.append(room_name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] for room in rooms: self.leave_room(sid, namespace, room) # depends on [control=['for'], data=['room']] if sid in self.callbacks and namespace in self.callbacks[sid]: del self.callbacks[sid][namespace] if len(self.callbacks[sid]) == 0: del self.callbacks[sid] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if namespace in self.pending_disconnect and sid in self.pending_disconnect[namespace]: self.pending_disconnect[namespace].remove(sid) if len(self.pending_disconnect[namespace]) == 0: del self.pending_disconnect[namespace] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def queue_put_stoppable(self, q, obj): """ Put obj to queue, but will give up when the thread is stopped""" while not self.stopped(): try: q.put(obj, timeout=5) break except queue.Full: pass
def function[queue_put_stoppable, parameter[self, q, obj]]: constant[ Put obj to queue, but will give up when the thread is stopped] while <ast.UnaryOp object at 0x7da18f00d510> begin[:] <ast.Try object at 0x7da18f00c100>
keyword[def] identifier[queue_put_stoppable] ( identifier[self] , identifier[q] , identifier[obj] ): literal[string] keyword[while] keyword[not] identifier[self] . identifier[stopped] (): keyword[try] : identifier[q] . identifier[put] ( identifier[obj] , identifier[timeout] = literal[int] ) keyword[break] keyword[except] identifier[queue] . identifier[Full] : keyword[pass]
def queue_put_stoppable(self, q, obj): """ Put obj to queue, but will give up when the thread is stopped""" while not self.stopped(): try: q.put(obj, timeout=5) break # depends on [control=['try'], data=[]] except queue.Full: pass # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]]
def moment1(self): """The first time delay weighted statistical moment of the instantaneous unit hydrograph.""" delays, response = self.delay_response_series return statstools.calc_mean_time(delays, response)
def function[moment1, parameter[self]]: constant[The first time delay weighted statistical moment of the instantaneous unit hydrograph.] <ast.Tuple object at 0x7da20e960a90> assign[=] name[self].delay_response_series return[call[name[statstools].calc_mean_time, parameter[name[delays], name[response]]]]
keyword[def] identifier[moment1] ( identifier[self] ): literal[string] identifier[delays] , identifier[response] = identifier[self] . identifier[delay_response_series] keyword[return] identifier[statstools] . identifier[calc_mean_time] ( identifier[delays] , identifier[response] )
def moment1(self): """The first time delay weighted statistical moment of the instantaneous unit hydrograph.""" (delays, response) = self.delay_response_series return statstools.calc_mean_time(delays, response)
def convert_pad(node, **kwargs): """Map MXNet's pad operator attributes to onnx's Pad operator and return the created node. """ name, input_nodes, attrs = get_inputs(node, kwargs) mxnet_pad_width = convert_string_to_list(attrs.get("pad_width")) onnx_pad_width = transform_padding(mxnet_pad_width) pad_mode = attrs.get("mode") if pad_mode == "constant": pad_value = float(attrs.get("constant_value")) \ if "constant_value" in attrs else 0.0 node = onnx.helper.make_node( 'Pad', inputs=input_nodes, outputs=[name], mode='constant', value=pad_value, pads=onnx_pad_width, name=name ) else: node = onnx.helper.make_node( 'Pad', inputs=input_nodes, outputs=[name], mode=pad_mode, pads=onnx_pad_width, name=name ) return [node]
def function[convert_pad, parameter[node]]: constant[Map MXNet's pad operator attributes to onnx's Pad operator and return the created node. ] <ast.Tuple object at 0x7da20c991510> assign[=] call[name[get_inputs], parameter[name[node], name[kwargs]]] variable[mxnet_pad_width] assign[=] call[name[convert_string_to_list], parameter[call[name[attrs].get, parameter[constant[pad_width]]]]] variable[onnx_pad_width] assign[=] call[name[transform_padding], parameter[name[mxnet_pad_width]]] variable[pad_mode] assign[=] call[name[attrs].get, parameter[constant[mode]]] if compare[name[pad_mode] equal[==] constant[constant]] begin[:] variable[pad_value] assign[=] <ast.IfExp object at 0x7da20c992290> variable[node] assign[=] call[name[onnx].helper.make_node, parameter[constant[Pad]]] return[list[[<ast.Name object at 0x7da20c993400>]]]
keyword[def] identifier[convert_pad] ( identifier[node] ,** identifier[kwargs] ): literal[string] identifier[name] , identifier[input_nodes] , identifier[attrs] = identifier[get_inputs] ( identifier[node] , identifier[kwargs] ) identifier[mxnet_pad_width] = identifier[convert_string_to_list] ( identifier[attrs] . identifier[get] ( literal[string] )) identifier[onnx_pad_width] = identifier[transform_padding] ( identifier[mxnet_pad_width] ) identifier[pad_mode] = identifier[attrs] . identifier[get] ( literal[string] ) keyword[if] identifier[pad_mode] == literal[string] : identifier[pad_value] = identifier[float] ( identifier[attrs] . identifier[get] ( literal[string] )) keyword[if] literal[string] keyword[in] identifier[attrs] keyword[else] literal[int] identifier[node] = identifier[onnx] . identifier[helper] . identifier[make_node] ( literal[string] , identifier[inputs] = identifier[input_nodes] , identifier[outputs] =[ identifier[name] ], identifier[mode] = literal[string] , identifier[value] = identifier[pad_value] , identifier[pads] = identifier[onnx_pad_width] , identifier[name] = identifier[name] ) keyword[else] : identifier[node] = identifier[onnx] . identifier[helper] . identifier[make_node] ( literal[string] , identifier[inputs] = identifier[input_nodes] , identifier[outputs] =[ identifier[name] ], identifier[mode] = identifier[pad_mode] , identifier[pads] = identifier[onnx_pad_width] , identifier[name] = identifier[name] ) keyword[return] [ identifier[node] ]
def convert_pad(node, **kwargs): """Map MXNet's pad operator attributes to onnx's Pad operator and return the created node. """ (name, input_nodes, attrs) = get_inputs(node, kwargs) mxnet_pad_width = convert_string_to_list(attrs.get('pad_width')) onnx_pad_width = transform_padding(mxnet_pad_width) pad_mode = attrs.get('mode') if pad_mode == 'constant': pad_value = float(attrs.get('constant_value')) if 'constant_value' in attrs else 0.0 node = onnx.helper.make_node('Pad', inputs=input_nodes, outputs=[name], mode='constant', value=pad_value, pads=onnx_pad_width, name=name) # depends on [control=['if'], data=[]] else: node = onnx.helper.make_node('Pad', inputs=input_nodes, outputs=[name], mode=pad_mode, pads=onnx_pad_width, name=name) return [node]
def binary_n(total_N, min_n=50): """ Creates a list of values by successively halving the total length total_N until the resulting value is less than min_n. Non-integer results are rounded down. Args: total_N (int): total length Kwargs: min_n (int): minimal length after division Returns: list of integers: total_N/2, total_N/4, total_N/8, ... until total_N/2^i < min_n """ max_exp = np.log2(1.0 * total_N / min_n) max_exp = int(np.floor(max_exp)) return [int(np.floor(1.0 * total_N / (2**i))) for i in range(1, max_exp + 1)]
def function[binary_n, parameter[total_N, min_n]]: constant[ Creates a list of values by successively halving the total length total_N until the resulting value is less than min_n. Non-integer results are rounded down. Args: total_N (int): total length Kwargs: min_n (int): minimal length after division Returns: list of integers: total_N/2, total_N/4, total_N/8, ... until total_N/2^i < min_n ] variable[max_exp] assign[=] call[name[np].log2, parameter[binary_operation[binary_operation[constant[1.0] * name[total_N]] / name[min_n]]]] variable[max_exp] assign[=] call[name[int], parameter[call[name[np].floor, parameter[name[max_exp]]]]] return[<ast.ListComp object at 0x7da20c9900d0>]
keyword[def] identifier[binary_n] ( identifier[total_N] , identifier[min_n] = literal[int] ): literal[string] identifier[max_exp] = identifier[np] . identifier[log2] ( literal[int] * identifier[total_N] / identifier[min_n] ) identifier[max_exp] = identifier[int] ( identifier[np] . identifier[floor] ( identifier[max_exp] )) keyword[return] [ identifier[int] ( identifier[np] . identifier[floor] ( literal[int] * identifier[total_N] /( literal[int] ** identifier[i] ))) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[max_exp] + literal[int] )]
def binary_n(total_N, min_n=50): """ Creates a list of values by successively halving the total length total_N until the resulting value is less than min_n. Non-integer results are rounded down. Args: total_N (int): total length Kwargs: min_n (int): minimal length after division Returns: list of integers: total_N/2, total_N/4, total_N/8, ... until total_N/2^i < min_n """ max_exp = np.log2(1.0 * total_N / min_n) max_exp = int(np.floor(max_exp)) return [int(np.floor(1.0 * total_N / 2 ** i)) for i in range(1, max_exp + 1)]
def move(self, source, dest): """Move contents from one path to another relative to the Group. Parameters ---------- source : string Name or path to a Zarr object to move. dest : string New name or path of the Zarr object. """ source = self._item_path(source) dest = self._item_path(dest) # Check that source exists. if not (contains_array(self._store, source) or contains_group(self._store, source)): raise ValueError('The source, "%s", does not exist.' % source) if contains_array(self._store, dest) or contains_group(self._store, dest): raise ValueError('The dest, "%s", already exists.' % dest) # Ensure groups needed for `dest` exist. if "/" in dest: self.require_group("/" + dest.rsplit("/", 1)[0]) self._write_op(self._move_nosync, source, dest)
def function[move, parameter[self, source, dest]]: constant[Move contents from one path to another relative to the Group. Parameters ---------- source : string Name or path to a Zarr object to move. dest : string New name or path of the Zarr object. ] variable[source] assign[=] call[name[self]._item_path, parameter[name[source]]] variable[dest] assign[=] call[name[self]._item_path, parameter[name[dest]]] if <ast.UnaryOp object at 0x7da1b19eeb30> begin[:] <ast.Raise object at 0x7da1b19ed120> if <ast.BoolOp object at 0x7da1b19edc90> begin[:] <ast.Raise object at 0x7da1b19822c0> if compare[constant[/] in name[dest]] begin[:] call[name[self].require_group, parameter[binary_operation[constant[/] + call[call[name[dest].rsplit, parameter[constant[/], constant[1]]]][constant[0]]]]] call[name[self]._write_op, parameter[name[self]._move_nosync, name[source], name[dest]]]
keyword[def] identifier[move] ( identifier[self] , identifier[source] , identifier[dest] ): literal[string] identifier[source] = identifier[self] . identifier[_item_path] ( identifier[source] ) identifier[dest] = identifier[self] . identifier[_item_path] ( identifier[dest] ) keyword[if] keyword[not] ( identifier[contains_array] ( identifier[self] . identifier[_store] , identifier[source] ) keyword[or] identifier[contains_group] ( identifier[self] . identifier[_store] , identifier[source] )): keyword[raise] identifier[ValueError] ( literal[string] % identifier[source] ) keyword[if] identifier[contains_array] ( identifier[self] . identifier[_store] , identifier[dest] ) keyword[or] identifier[contains_group] ( identifier[self] . identifier[_store] , identifier[dest] ): keyword[raise] identifier[ValueError] ( literal[string] % identifier[dest] ) keyword[if] literal[string] keyword[in] identifier[dest] : identifier[self] . identifier[require_group] ( literal[string] + identifier[dest] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]) identifier[self] . identifier[_write_op] ( identifier[self] . identifier[_move_nosync] , identifier[source] , identifier[dest] )
def move(self, source, dest): """Move contents from one path to another relative to the Group. Parameters ---------- source : string Name or path to a Zarr object to move. dest : string New name or path of the Zarr object. """ source = self._item_path(source) dest = self._item_path(dest) # Check that source exists. if not (contains_array(self._store, source) or contains_group(self._store, source)): raise ValueError('The source, "%s", does not exist.' % source) # depends on [control=['if'], data=[]] if contains_array(self._store, dest) or contains_group(self._store, dest): raise ValueError('The dest, "%s", already exists.' % dest) # depends on [control=['if'], data=[]] # Ensure groups needed for `dest` exist. if '/' in dest: self.require_group('/' + dest.rsplit('/', 1)[0]) # depends on [control=['if'], data=['dest']] self._write_op(self._move_nosync, source, dest)
def report(self, item_id, report_format="json"): """Retrieves the specified report for the analyzed item, referenced by item_id. Available formats include: json. :type item_id: str :param item_id: File ID number :type report_format: str :param report_format: Return format :rtype: dict :return: Dictionary representing the JSON parsed data or raw, for other formats / JSON parsing failure. """ if report_format == "html": return "Report Unavailable" # grab an analysis id from the submission id. response = self._request("/analysis/sample/{sample_id}".format(sample_id=item_id), headers=self.headers) try: # the highest score is probably the most interesting. # vmray uses this internally with sample_highest_vti_score so this seems like a safe assumption. analysis_id = 0 top_score = -1 for analysis in response.json()['data']: if analysis['analysis_vti_score'] > top_score: top_score = analysis['analysis_vti_score'] analysis_id = analysis['analysis_id'] except (ValueError, KeyError) as e: raise sandboxapi.SandboxError(e) # assume report format json. response = self._request("/analysis/{analysis_id}/archive/logs/summary.json".format(analysis_id=analysis_id), headers=self.headers) # if response is JSON, return it as an object. try: return response.json() except ValueError: pass # otherwise, return the raw content. return response.content
def function[report, parameter[self, item_id, report_format]]: constant[Retrieves the specified report for the analyzed item, referenced by item_id. Available formats include: json. :type item_id: str :param item_id: File ID number :type report_format: str :param report_format: Return format :rtype: dict :return: Dictionary representing the JSON parsed data or raw, for other formats / JSON parsing failure. ] if compare[name[report_format] equal[==] constant[html]] begin[:] return[constant[Report Unavailable]] variable[response] assign[=] call[name[self]._request, parameter[call[constant[/analysis/sample/{sample_id}].format, parameter[]]]] <ast.Try object at 0x7da1b26aec80> variable[response] assign[=] call[name[self]._request, parameter[call[constant[/analysis/{analysis_id}/archive/logs/summary.json].format, parameter[]]]] <ast.Try object at 0x7da1b26afeb0> return[name[response].content]
keyword[def] identifier[report] ( identifier[self] , identifier[item_id] , identifier[report_format] = literal[string] ): literal[string] keyword[if] identifier[report_format] == literal[string] : keyword[return] literal[string] identifier[response] = identifier[self] . identifier[_request] ( literal[string] . identifier[format] ( identifier[sample_id] = identifier[item_id] ), identifier[headers] = identifier[self] . identifier[headers] ) keyword[try] : identifier[analysis_id] = literal[int] identifier[top_score] =- literal[int] keyword[for] identifier[analysis] keyword[in] identifier[response] . identifier[json] ()[ literal[string] ]: keyword[if] identifier[analysis] [ literal[string] ]> identifier[top_score] : identifier[top_score] = identifier[analysis] [ literal[string] ] identifier[analysis_id] = identifier[analysis] [ literal[string] ] keyword[except] ( identifier[ValueError] , identifier[KeyError] ) keyword[as] identifier[e] : keyword[raise] identifier[sandboxapi] . identifier[SandboxError] ( identifier[e] ) identifier[response] = identifier[self] . identifier[_request] ( literal[string] . identifier[format] ( identifier[analysis_id] = identifier[analysis_id] ), identifier[headers] = identifier[self] . identifier[headers] ) keyword[try] : keyword[return] identifier[response] . identifier[json] () keyword[except] identifier[ValueError] : keyword[pass] keyword[return] identifier[response] . identifier[content]
def report(self, item_id, report_format='json'): """Retrieves the specified report for the analyzed item, referenced by item_id. Available formats include: json. :type item_id: str :param item_id: File ID number :type report_format: str :param report_format: Return format :rtype: dict :return: Dictionary representing the JSON parsed data or raw, for other formats / JSON parsing failure. """ if report_format == 'html': return 'Report Unavailable' # depends on [control=['if'], data=[]] # grab an analysis id from the submission id. response = self._request('/analysis/sample/{sample_id}'.format(sample_id=item_id), headers=self.headers) try: # the highest score is probably the most interesting. # vmray uses this internally with sample_highest_vti_score so this seems like a safe assumption. analysis_id = 0 top_score = -1 for analysis in response.json()['data']: if analysis['analysis_vti_score'] > top_score: top_score = analysis['analysis_vti_score'] analysis_id = analysis['analysis_id'] # depends on [control=['if'], data=['top_score']] # depends on [control=['for'], data=['analysis']] # depends on [control=['try'], data=[]] except (ValueError, KeyError) as e: raise sandboxapi.SandboxError(e) # depends on [control=['except'], data=['e']] # assume report format json. response = self._request('/analysis/{analysis_id}/archive/logs/summary.json'.format(analysis_id=analysis_id), headers=self.headers) # if response is JSON, return it as an object. try: return response.json() # depends on [control=['try'], data=[]] except ValueError: pass # depends on [control=['except'], data=[]] # otherwise, return the raw content. return response.content
def _to_tuple(self, _list): """ Recursively converts lists to tuples """ result = list() for l in _list: if isinstance(l, list): result.append(tuple(self._to_tuple(l))) else: result.append(l) return tuple(result)
def function[_to_tuple, parameter[self, _list]]: constant[ Recursively converts lists to tuples ] variable[result] assign[=] call[name[list], parameter[]] for taget[name[l]] in starred[name[_list]] begin[:] if call[name[isinstance], parameter[name[l], name[list]]] begin[:] call[name[result].append, parameter[call[name[tuple], parameter[call[name[self]._to_tuple, parameter[name[l]]]]]]] return[call[name[tuple], parameter[name[result]]]]
keyword[def] identifier[_to_tuple] ( identifier[self] , identifier[_list] ): literal[string] identifier[result] = identifier[list] () keyword[for] identifier[l] keyword[in] identifier[_list] : keyword[if] identifier[isinstance] ( identifier[l] , identifier[list] ): identifier[result] . identifier[append] ( identifier[tuple] ( identifier[self] . identifier[_to_tuple] ( identifier[l] ))) keyword[else] : identifier[result] . identifier[append] ( identifier[l] ) keyword[return] identifier[tuple] ( identifier[result] )
def _to_tuple(self, _list): """ Recursively converts lists to tuples """ result = list() for l in _list: if isinstance(l, list): result.append(tuple(self._to_tuple(l))) # depends on [control=['if'], data=[]] else: result.append(l) # depends on [control=['for'], data=['l']] return tuple(result)
def provideObjectToInfer(self, inferenceConfig): """ Returns the sensations in a canonical format to be sent to an experiment. The input inferenceConfig should be a dict with the following form: { "numSteps": 2 # number of sensations for each column "pairs": { 0: [(1, 2), (2, 2)] # sensations for cortical column 0 1: [(2, 2), (1, 1)] # sensations for cortical column 1 } # The following are optional "noiseLevel": 0.05 # noise to add to feature sensations "locationNoise": 0.23 # noise to add to location signal "includeRandomLocation": True # Swap in a random location SDR "numAmbiguousLocations": 2 # Number of additional random locations to # union together in location input } The pairs of indices can be modified for custom inference: - a tuple instead of an index indicates that the union of designated patterns are being sensed (either in location or feature) - -1 as an index indicates that the input is empty for a feature, and is random for a location (since an empty location makes the layer 4 burst for now) The returned format is a a lists of sensations, each sensation being a mapping from cortical column index to a pair of SDR's (one location and one feature). Parameters: ---------------------------- @param inferenceConfig (dict) Inference spec for experiment (cf above for format) """ numSteps = inferenceConfig.get("numSteps", len(inferenceConfig["pairs"][0])) # some checks if numSteps == 0: raise ValueError("No inference steps were provided") for col in xrange(self.numColumns): if len(inferenceConfig["pairs"][col]) != numSteps: raise ValueError("Incompatible numSteps and actual inference steps") sensationSteps = [] for step in xrange(numSteps): pairs = [ inferenceConfig["pairs"][col][step] for col in xrange(self.numColumns) ] sdrPairs = self._getSDRPairs( pairs, noise=inferenceConfig.get("noiseLevel", None), locationNoise=inferenceConfig.get("locationNoise", None), includeRandomLocation=inferenceConfig.get("includeRandomLocation", False), numAmbiguousLocations=inferenceConfig.get("numAmbiguousLocations", 0)) sensationSteps.append(sdrPairs) self._checkObjectToInfer(sensationSteps) return sensationSteps
def function[provideObjectToInfer, parameter[self, inferenceConfig]]: constant[ Returns the sensations in a canonical format to be sent to an experiment. The input inferenceConfig should be a dict with the following form: { "numSteps": 2 # number of sensations for each column "pairs": { 0: [(1, 2), (2, 2)] # sensations for cortical column 0 1: [(2, 2), (1, 1)] # sensations for cortical column 1 } # The following are optional "noiseLevel": 0.05 # noise to add to feature sensations "locationNoise": 0.23 # noise to add to location signal "includeRandomLocation": True # Swap in a random location SDR "numAmbiguousLocations": 2 # Number of additional random locations to # union together in location input } The pairs of indices can be modified for custom inference: - a tuple instead of an index indicates that the union of designated patterns are being sensed (either in location or feature) - -1 as an index indicates that the input is empty for a feature, and is random for a location (since an empty location makes the layer 4 burst for now) The returned format is a a lists of sensations, each sensation being a mapping from cortical column index to a pair of SDR's (one location and one feature). Parameters: ---------------------------- @param inferenceConfig (dict) Inference spec for experiment (cf above for format) ] variable[numSteps] assign[=] call[name[inferenceConfig].get, parameter[constant[numSteps], call[name[len], parameter[call[call[name[inferenceConfig]][constant[pairs]]][constant[0]]]]]] if compare[name[numSteps] equal[==] constant[0]] begin[:] <ast.Raise object at 0x7da1b08d7010> for taget[name[col]] in starred[call[name[xrange], parameter[name[self].numColumns]]] begin[:] if compare[call[name[len], parameter[call[call[name[inferenceConfig]][constant[pairs]]][name[col]]]] not_equal[!=] name[numSteps]] begin[:] <ast.Raise object at 0x7da1b08be020> variable[sensationSteps] assign[=] list[[]] for taget[name[step]] in starred[call[name[xrange], parameter[name[numSteps]]]] begin[:] variable[pairs] assign[=] <ast.ListComp object at 0x7da1b08bc970> variable[sdrPairs] assign[=] call[name[self]._getSDRPairs, parameter[name[pairs]]] call[name[sensationSteps].append, parameter[name[sdrPairs]]] call[name[self]._checkObjectToInfer, parameter[name[sensationSteps]]] return[name[sensationSteps]]
keyword[def] identifier[provideObjectToInfer] ( identifier[self] , identifier[inferenceConfig] ): literal[string] identifier[numSteps] = identifier[inferenceConfig] . identifier[get] ( literal[string] , identifier[len] ( identifier[inferenceConfig] [ literal[string] ][ literal[int] ])) keyword[if] identifier[numSteps] == literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[for] identifier[col] keyword[in] identifier[xrange] ( identifier[self] . identifier[numColumns] ): keyword[if] identifier[len] ( identifier[inferenceConfig] [ literal[string] ][ identifier[col] ])!= identifier[numSteps] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[sensationSteps] =[] keyword[for] identifier[step] keyword[in] identifier[xrange] ( identifier[numSteps] ): identifier[pairs] =[ identifier[inferenceConfig] [ literal[string] ][ identifier[col] ][ identifier[step] ] keyword[for] identifier[col] keyword[in] identifier[xrange] ( identifier[self] . identifier[numColumns] ) ] identifier[sdrPairs] = identifier[self] . identifier[_getSDRPairs] ( identifier[pairs] , identifier[noise] = identifier[inferenceConfig] . identifier[get] ( literal[string] , keyword[None] ), identifier[locationNoise] = identifier[inferenceConfig] . identifier[get] ( literal[string] , keyword[None] ), identifier[includeRandomLocation] = identifier[inferenceConfig] . identifier[get] ( literal[string] , keyword[False] ), identifier[numAmbiguousLocations] = identifier[inferenceConfig] . identifier[get] ( literal[string] , literal[int] )) identifier[sensationSteps] . identifier[append] ( identifier[sdrPairs] ) identifier[self] . identifier[_checkObjectToInfer] ( identifier[sensationSteps] ) keyword[return] identifier[sensationSteps]
def provideObjectToInfer(self, inferenceConfig): """ Returns the sensations in a canonical format to be sent to an experiment. The input inferenceConfig should be a dict with the following form: { "numSteps": 2 # number of sensations for each column "pairs": { 0: [(1, 2), (2, 2)] # sensations for cortical column 0 1: [(2, 2), (1, 1)] # sensations for cortical column 1 } # The following are optional "noiseLevel": 0.05 # noise to add to feature sensations "locationNoise": 0.23 # noise to add to location signal "includeRandomLocation": True # Swap in a random location SDR "numAmbiguousLocations": 2 # Number of additional random locations to # union together in location input } The pairs of indices can be modified for custom inference: - a tuple instead of an index indicates that the union of designated patterns are being sensed (either in location or feature) - -1 as an index indicates that the input is empty for a feature, and is random for a location (since an empty location makes the layer 4 burst for now) The returned format is a a lists of sensations, each sensation being a mapping from cortical column index to a pair of SDR's (one location and one feature). Parameters: ---------------------------- @param inferenceConfig (dict) Inference spec for experiment (cf above for format) """ numSteps = inferenceConfig.get('numSteps', len(inferenceConfig['pairs'][0])) # some checks if numSteps == 0: raise ValueError('No inference steps were provided') # depends on [control=['if'], data=[]] for col in xrange(self.numColumns): if len(inferenceConfig['pairs'][col]) != numSteps: raise ValueError('Incompatible numSteps and actual inference steps') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['col']] sensationSteps = [] for step in xrange(numSteps): pairs = [inferenceConfig['pairs'][col][step] for col in xrange(self.numColumns)] sdrPairs = self._getSDRPairs(pairs, noise=inferenceConfig.get('noiseLevel', None), locationNoise=inferenceConfig.get('locationNoise', None), includeRandomLocation=inferenceConfig.get('includeRandomLocation', False), numAmbiguousLocations=inferenceConfig.get('numAmbiguousLocations', 0)) sensationSteps.append(sdrPairs) # depends on [control=['for'], data=['step']] self._checkObjectToInfer(sensationSteps) return sensationSteps
def dafgda(handle, begin, end): """ Read the double precision data bounded by two addresses within a DAF. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dafgda_c.html :param handle: Handle of a DAF. :type handle: int :param begin: Initial address within file. :type begin: int :param end: Final address within file. :type end: int :return: Data contained between begin and end. :rtype: Array of floats """ handle = ctypes.c_int(handle) data = stypes.emptyDoubleVector(abs(end - begin)) begin = ctypes.c_int(begin) end = ctypes.c_int(end) libspice.dafgda_c(handle, begin, end, data) return stypes.cVectorToPython(data)
def function[dafgda, parameter[handle, begin, end]]: constant[ Read the double precision data bounded by two addresses within a DAF. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dafgda_c.html :param handle: Handle of a DAF. :type handle: int :param begin: Initial address within file. :type begin: int :param end: Final address within file. :type end: int :return: Data contained between begin and end. :rtype: Array of floats ] variable[handle] assign[=] call[name[ctypes].c_int, parameter[name[handle]]] variable[data] assign[=] call[name[stypes].emptyDoubleVector, parameter[call[name[abs], parameter[binary_operation[name[end] - name[begin]]]]]] variable[begin] assign[=] call[name[ctypes].c_int, parameter[name[begin]]] variable[end] assign[=] call[name[ctypes].c_int, parameter[name[end]]] call[name[libspice].dafgda_c, parameter[name[handle], name[begin], name[end], name[data]]] return[call[name[stypes].cVectorToPython, parameter[name[data]]]]
keyword[def] identifier[dafgda] ( identifier[handle] , identifier[begin] , identifier[end] ): literal[string] identifier[handle] = identifier[ctypes] . identifier[c_int] ( identifier[handle] ) identifier[data] = identifier[stypes] . identifier[emptyDoubleVector] ( identifier[abs] ( identifier[end] - identifier[begin] )) identifier[begin] = identifier[ctypes] . identifier[c_int] ( identifier[begin] ) identifier[end] = identifier[ctypes] . identifier[c_int] ( identifier[end] ) identifier[libspice] . identifier[dafgda_c] ( identifier[handle] , identifier[begin] , identifier[end] , identifier[data] ) keyword[return] identifier[stypes] . identifier[cVectorToPython] ( identifier[data] )
def dafgda(handle, begin, end): """ Read the double precision data bounded by two addresses within a DAF. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dafgda_c.html :param handle: Handle of a DAF. :type handle: int :param begin: Initial address within file. :type begin: int :param end: Final address within file. :type end: int :return: Data contained between begin and end. :rtype: Array of floats """ handle = ctypes.c_int(handle) data = stypes.emptyDoubleVector(abs(end - begin)) begin = ctypes.c_int(begin) end = ctypes.c_int(end) libspice.dafgda_c(handle, begin, end, data) return stypes.cVectorToPython(data)
def JMS_to_array(C, sectors=None): """For a dictionary with JMS Wilson coefficients, return a dictionary of arrays.""" if sectors is None: wc_keys = wcxf.Basis['WET', 'JMS'].all_wcs else: try: wc_keys = [k for s in sectors for k in wcxf.Basis['WET', 'JMS'].sectors[s]] except KeyError: print(sectors) # fill in zeros for missing coefficients C_complete = {k: C.get(k, 0) for k in wc_keys} Ca = _scalar2array(C_complete) for k in Ca: if k in C_symm_keys[5]: Ca[k] = _symm_herm(Ca[k]) if k in C_symm_keys[41]: Ca[k] = _symm_current(Ca[k]) if k in C_symm_keys[4]: Ca[k] = _symm_herm(_symm_current(Ca[k])) if k in C_symm_keys[9]: Ca[k] = _antisymm_12(Ca[k]) return Ca
def function[JMS_to_array, parameter[C, sectors]]: constant[For a dictionary with JMS Wilson coefficients, return a dictionary of arrays.] if compare[name[sectors] is constant[None]] begin[:] variable[wc_keys] assign[=] call[name[wcxf].Basis][tuple[[<ast.Constant object at 0x7da20e9b33a0>, <ast.Constant object at 0x7da20e9b0100>]]].all_wcs variable[C_complete] assign[=] <ast.DictComp object at 0x7da1b1add1e0> variable[Ca] assign[=] call[name[_scalar2array], parameter[name[C_complete]]] for taget[name[k]] in starred[name[Ca]] begin[:] if compare[name[k] in call[name[C_symm_keys]][constant[5]]] begin[:] call[name[Ca]][name[k]] assign[=] call[name[_symm_herm], parameter[call[name[Ca]][name[k]]]] if compare[name[k] in call[name[C_symm_keys]][constant[41]]] begin[:] call[name[Ca]][name[k]] assign[=] call[name[_symm_current], parameter[call[name[Ca]][name[k]]]] if compare[name[k] in call[name[C_symm_keys]][constant[4]]] begin[:] call[name[Ca]][name[k]] assign[=] call[name[_symm_herm], parameter[call[name[_symm_current], parameter[call[name[Ca]][name[k]]]]]] if compare[name[k] in call[name[C_symm_keys]][constant[9]]] begin[:] call[name[Ca]][name[k]] assign[=] call[name[_antisymm_12], parameter[call[name[Ca]][name[k]]]] return[name[Ca]]
keyword[def] identifier[JMS_to_array] ( identifier[C] , identifier[sectors] = keyword[None] ): literal[string] keyword[if] identifier[sectors] keyword[is] keyword[None] : identifier[wc_keys] = identifier[wcxf] . identifier[Basis] [ literal[string] , literal[string] ]. identifier[all_wcs] keyword[else] : keyword[try] : identifier[wc_keys] =[ identifier[k] keyword[for] identifier[s] keyword[in] identifier[sectors] keyword[for] identifier[k] keyword[in] identifier[wcxf] . identifier[Basis] [ literal[string] , literal[string] ]. identifier[sectors] [ identifier[s] ]] keyword[except] identifier[KeyError] : identifier[print] ( identifier[sectors] ) identifier[C_complete] ={ identifier[k] : identifier[C] . identifier[get] ( identifier[k] , literal[int] ) keyword[for] identifier[k] keyword[in] identifier[wc_keys] } identifier[Ca] = identifier[_scalar2array] ( identifier[C_complete] ) keyword[for] identifier[k] keyword[in] identifier[Ca] : keyword[if] identifier[k] keyword[in] identifier[C_symm_keys] [ literal[int] ]: identifier[Ca] [ identifier[k] ]= identifier[_symm_herm] ( identifier[Ca] [ identifier[k] ]) keyword[if] identifier[k] keyword[in] identifier[C_symm_keys] [ literal[int] ]: identifier[Ca] [ identifier[k] ]= identifier[_symm_current] ( identifier[Ca] [ identifier[k] ]) keyword[if] identifier[k] keyword[in] identifier[C_symm_keys] [ literal[int] ]: identifier[Ca] [ identifier[k] ]= identifier[_symm_herm] ( identifier[_symm_current] ( identifier[Ca] [ identifier[k] ])) keyword[if] identifier[k] keyword[in] identifier[C_symm_keys] [ literal[int] ]: identifier[Ca] [ identifier[k] ]= identifier[_antisymm_12] ( identifier[Ca] [ identifier[k] ]) keyword[return] identifier[Ca]
def JMS_to_array(C, sectors=None): """For a dictionary with JMS Wilson coefficients, return a dictionary of arrays.""" if sectors is None: wc_keys = wcxf.Basis['WET', 'JMS'].all_wcs # depends on [control=['if'], data=[]] else: try: wc_keys = [k for s in sectors for k in wcxf.Basis['WET', 'JMS'].sectors[s]] # depends on [control=['try'], data=[]] except KeyError: print(sectors) # depends on [control=['except'], data=[]] # fill in zeros for missing coefficients C_complete = {k: C.get(k, 0) for k in wc_keys} Ca = _scalar2array(C_complete) for k in Ca: if k in C_symm_keys[5]: Ca[k] = _symm_herm(Ca[k]) # depends on [control=['if'], data=['k']] if k in C_symm_keys[41]: Ca[k] = _symm_current(Ca[k]) # depends on [control=['if'], data=['k']] if k in C_symm_keys[4]: Ca[k] = _symm_herm(_symm_current(Ca[k])) # depends on [control=['if'], data=['k']] if k in C_symm_keys[9]: Ca[k] = _antisymm_12(Ca[k]) # depends on [control=['if'], data=['k']] # depends on [control=['for'], data=['k']] return Ca
def import_from_dicts(data, samples=None, *args, **kwargs): """Import data from a iterable of dicts The algorithm will use the `samples` first `dict`s to determine the field names (if `samples` is `None` all `dict`s will be used). """ data = iter(data) cached_rows, headers = [], [] for index, row in enumerate(data, start=1): cached_rows.append(row) for key in row.keys(): if key not in headers: headers.append(key) if samples and index == samples: break data_rows = ( [row.get(header, None) for header in headers] for row in chain(cached_rows, data) ) kwargs["samples"] = samples meta = {"imported_from": "dicts"} return create_table(chain([headers], data_rows), meta=meta, *args, **kwargs)
def function[import_from_dicts, parameter[data, samples]]: constant[Import data from a iterable of dicts The algorithm will use the `samples` first `dict`s to determine the field names (if `samples` is `None` all `dict`s will be used). ] variable[data] assign[=] call[name[iter], parameter[name[data]]] <ast.Tuple object at 0x7da1b16bed40> assign[=] tuple[[<ast.List object at 0x7da1b16bfb20>, <ast.List object at 0x7da1b16bf130>]] for taget[tuple[[<ast.Name object at 0x7da1b16bd960>, <ast.Name object at 0x7da1b16bee00>]]] in starred[call[name[enumerate], parameter[name[data]]]] begin[:] call[name[cached_rows].append, parameter[name[row]]] for taget[name[key]] in starred[call[name[row].keys, parameter[]]] begin[:] if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[headers]] begin[:] call[name[headers].append, parameter[name[key]]] if <ast.BoolOp object at 0x7da1b16bcb50> begin[:] break variable[data_rows] assign[=] <ast.GeneratorExp object at 0x7da1b16bc550> call[name[kwargs]][constant[samples]] assign[=] name[samples] variable[meta] assign[=] dictionary[[<ast.Constant object at 0x7da1b16faa40>], [<ast.Constant object at 0x7da1b16fb430>]] return[call[name[create_table], parameter[call[name[chain], parameter[list[[<ast.Name object at 0x7da1b16f8c40>]], name[data_rows]]], <ast.Starred object at 0x7da1b16f99c0>]]]
keyword[def] identifier[import_from_dicts] ( identifier[data] , identifier[samples] = keyword[None] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[data] = identifier[iter] ( identifier[data] ) identifier[cached_rows] , identifier[headers] =[],[] keyword[for] identifier[index] , identifier[row] keyword[in] identifier[enumerate] ( identifier[data] , identifier[start] = literal[int] ): identifier[cached_rows] . identifier[append] ( identifier[row] ) keyword[for] identifier[key] keyword[in] identifier[row] . identifier[keys] (): keyword[if] identifier[key] keyword[not] keyword[in] identifier[headers] : identifier[headers] . identifier[append] ( identifier[key] ) keyword[if] identifier[samples] keyword[and] identifier[index] == identifier[samples] : keyword[break] identifier[data_rows] =( [ identifier[row] . identifier[get] ( identifier[header] , keyword[None] ) keyword[for] identifier[header] keyword[in] identifier[headers] ] keyword[for] identifier[row] keyword[in] identifier[chain] ( identifier[cached_rows] , identifier[data] ) ) identifier[kwargs] [ literal[string] ]= identifier[samples] identifier[meta] ={ literal[string] : literal[string] } keyword[return] identifier[create_table] ( identifier[chain] ([ identifier[headers] ], identifier[data_rows] ), identifier[meta] = identifier[meta] ,* identifier[args] ,** identifier[kwargs] )
def import_from_dicts(data, samples=None, *args, **kwargs): """Import data from a iterable of dicts The algorithm will use the `samples` first `dict`s to determine the field names (if `samples` is `None` all `dict`s will be used). """ data = iter(data) (cached_rows, headers) = ([], []) for (index, row) in enumerate(data, start=1): cached_rows.append(row) for key in row.keys(): if key not in headers: headers.append(key) # depends on [control=['if'], data=['key', 'headers']] # depends on [control=['for'], data=['key']] if samples and index == samples: break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] data_rows = ([row.get(header, None) for header in headers] for row in chain(cached_rows, data)) kwargs['samples'] = samples meta = {'imported_from': 'dicts'} return create_table(chain([headers], data_rows), *args, meta=meta, **kwargs)
def BooleanTake(input_vertex: vertex_constructor_param_types, index: Collection[int], label: Optional[str]=None) -> Vertex: """ A vertex that extracts a scalar at a given index :param input_vertex: the input vertex to extract from :param index: the index to extract at """ return Boolean(context.jvm_view().BooleanTakeVertex, label, cast_to_vertex(input_vertex), cast_to_long_array(index))
def function[BooleanTake, parameter[input_vertex, index, label]]: constant[ A vertex that extracts a scalar at a given index :param input_vertex: the input vertex to extract from :param index: the index to extract at ] return[call[name[Boolean], parameter[call[name[context].jvm_view, parameter[]].BooleanTakeVertex, name[label], call[name[cast_to_vertex], parameter[name[input_vertex]]], call[name[cast_to_long_array], parameter[name[index]]]]]]
keyword[def] identifier[BooleanTake] ( identifier[input_vertex] : identifier[vertex_constructor_param_types] , identifier[index] : identifier[Collection] [ identifier[int] ], identifier[label] : identifier[Optional] [ identifier[str] ]= keyword[None] )-> identifier[Vertex] : literal[string] keyword[return] identifier[Boolean] ( identifier[context] . identifier[jvm_view] (). identifier[BooleanTakeVertex] , identifier[label] , identifier[cast_to_vertex] ( identifier[input_vertex] ), identifier[cast_to_long_array] ( identifier[index] ))
def BooleanTake(input_vertex: vertex_constructor_param_types, index: Collection[int], label: Optional[str]=None) -> Vertex: """ A vertex that extracts a scalar at a given index :param input_vertex: the input vertex to extract from :param index: the index to extract at """ return Boolean(context.jvm_view().BooleanTakeVertex, label, cast_to_vertex(input_vertex), cast_to_long_array(index))
def _index_document(self, document, force=False): """ Adds parition document to the index. """ from ambry.util import int_maybe time_coverage = document.pop('time_coverage', []) from_year = None to_year = None if time_coverage: from_year = int_maybe(time_coverage[0]) to_year = int_maybe(time_coverage[-1]) query = text(""" INSERT INTO partition_index(vid, dataset_vid, title, keywords, doc, from_year, to_year) VALUES(:vid, :dataset_vid, :title, :keywords, :doc, :from_year, :to_year); """) self.backend.library.database.connection.execute( query, from_year=from_year, to_year=to_year, **document)
def function[_index_document, parameter[self, document, force]]: constant[ Adds parition document to the index. ] from relative_module[ambry.util] import module[int_maybe] variable[time_coverage] assign[=] call[name[document].pop, parameter[constant[time_coverage], list[[]]]] variable[from_year] assign[=] constant[None] variable[to_year] assign[=] constant[None] if name[time_coverage] begin[:] variable[from_year] assign[=] call[name[int_maybe], parameter[call[name[time_coverage]][constant[0]]]] variable[to_year] assign[=] call[name[int_maybe], parameter[call[name[time_coverage]][<ast.UnaryOp object at 0x7da18eb57f40>]]] variable[query] assign[=] call[name[text], parameter[constant[ INSERT INTO partition_index(vid, dataset_vid, title, keywords, doc, from_year, to_year) VALUES(:vid, :dataset_vid, :title, :keywords, :doc, :from_year, :to_year); ]]] call[name[self].backend.library.database.connection.execute, parameter[name[query]]]
keyword[def] identifier[_index_document] ( identifier[self] , identifier[document] , identifier[force] = keyword[False] ): literal[string] keyword[from] identifier[ambry] . identifier[util] keyword[import] identifier[int_maybe] identifier[time_coverage] = identifier[document] . identifier[pop] ( literal[string] ,[]) identifier[from_year] = keyword[None] identifier[to_year] = keyword[None] keyword[if] identifier[time_coverage] : identifier[from_year] = identifier[int_maybe] ( identifier[time_coverage] [ literal[int] ]) identifier[to_year] = identifier[int_maybe] ( identifier[time_coverage] [- literal[int] ]) identifier[query] = identifier[text] ( literal[string] ) identifier[self] . identifier[backend] . identifier[library] . identifier[database] . identifier[connection] . identifier[execute] ( identifier[query] , identifier[from_year] = identifier[from_year] , identifier[to_year] = identifier[to_year] ,** identifier[document] )
def _index_document(self, document, force=False): """ Adds parition document to the index. """ from ambry.util import int_maybe time_coverage = document.pop('time_coverage', []) from_year = None to_year = None if time_coverage: from_year = int_maybe(time_coverage[0]) to_year = int_maybe(time_coverage[-1]) # depends on [control=['if'], data=[]] query = text('\n INSERT INTO partition_index(vid, dataset_vid, title, keywords, doc, from_year, to_year)\n VALUES(:vid, :dataset_vid, :title, :keywords, :doc, :from_year, :to_year); ') self.backend.library.database.connection.execute(query, from_year=from_year, to_year=to_year, **document)
def _parse_dnamasq(filename): ''' Generic function for parsing dnsmasq files including includes. ''' fileopts = {} if not os.path.isfile(filename): raise CommandExecutionError( 'Error: No such file \'{0}\''.format(filename) ) with salt.utils.files.fopen(filename, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if not line.strip(): continue if line.startswith('#'): continue if '=' in line: comps = line.split('=') if comps[0] in fileopts: if isinstance(fileopts[comps[0]], six.string_types): temp = fileopts[comps[0]] fileopts[comps[0]] = [temp] fileopts[comps[0]].append(comps[1].strip()) else: fileopts[comps[0]] = comps[1].strip() else: if 'unparsed' not in fileopts: fileopts['unparsed'] = [] fileopts['unparsed'].append(line) return fileopts
def function[_parse_dnamasq, parameter[filename]]: constant[ Generic function for parsing dnsmasq files including includes. ] variable[fileopts] assign[=] dictionary[[], []] if <ast.UnaryOp object at 0x7da1b1f76020> begin[:] <ast.Raise object at 0x7da1b1f75db0> with call[name[salt].utils.files.fopen, parameter[name[filename], constant[r]]] begin[:] for taget[name[line]] in starred[name[fp_]] begin[:] variable[line] assign[=] call[name[salt].utils.stringutils.to_unicode, parameter[name[line]]] if <ast.UnaryOp object at 0x7da1b1f76e60> begin[:] continue if call[name[line].startswith, parameter[constant[#]]] begin[:] continue if compare[constant[=] in name[line]] begin[:] variable[comps] assign[=] call[name[line].split, parameter[constant[=]]] if compare[call[name[comps]][constant[0]] in name[fileopts]] begin[:] if call[name[isinstance], parameter[call[name[fileopts]][call[name[comps]][constant[0]]], name[six].string_types]] begin[:] variable[temp] assign[=] call[name[fileopts]][call[name[comps]][constant[0]]] call[name[fileopts]][call[name[comps]][constant[0]]] assign[=] list[[<ast.Name object at 0x7da1b1f75000>]] call[call[name[fileopts]][call[name[comps]][constant[0]]].append, parameter[call[call[name[comps]][constant[1]].strip, parameter[]]]] return[name[fileopts]]
keyword[def] identifier[_parse_dnamasq] ( identifier[filename] ): literal[string] identifier[fileopts] ={} keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[filename] ): keyword[raise] identifier[CommandExecutionError] ( literal[string] . identifier[format] ( identifier[filename] ) ) keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[filename] , literal[string] ) keyword[as] identifier[fp_] : keyword[for] identifier[line] keyword[in] identifier[fp_] : identifier[line] = identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[line] ) keyword[if] keyword[not] identifier[line] . identifier[strip] (): keyword[continue] keyword[if] identifier[line] . identifier[startswith] ( literal[string] ): keyword[continue] keyword[if] literal[string] keyword[in] identifier[line] : identifier[comps] = identifier[line] . identifier[split] ( literal[string] ) keyword[if] identifier[comps] [ literal[int] ] keyword[in] identifier[fileopts] : keyword[if] identifier[isinstance] ( identifier[fileopts] [ identifier[comps] [ literal[int] ]], identifier[six] . identifier[string_types] ): identifier[temp] = identifier[fileopts] [ identifier[comps] [ literal[int] ]] identifier[fileopts] [ identifier[comps] [ literal[int] ]]=[ identifier[temp] ] identifier[fileopts] [ identifier[comps] [ literal[int] ]]. identifier[append] ( identifier[comps] [ literal[int] ]. identifier[strip] ()) keyword[else] : identifier[fileopts] [ identifier[comps] [ literal[int] ]]= identifier[comps] [ literal[int] ]. identifier[strip] () keyword[else] : keyword[if] literal[string] keyword[not] keyword[in] identifier[fileopts] : identifier[fileopts] [ literal[string] ]=[] identifier[fileopts] [ literal[string] ]. identifier[append] ( identifier[line] ) keyword[return] identifier[fileopts]
def _parse_dnamasq(filename): """ Generic function for parsing dnsmasq files including includes. """ fileopts = {} if not os.path.isfile(filename): raise CommandExecutionError("Error: No such file '{0}'".format(filename)) # depends on [control=['if'], data=[]] with salt.utils.files.fopen(filename, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if not line.strip(): continue # depends on [control=['if'], data=[]] if line.startswith('#'): continue # depends on [control=['if'], data=[]] if '=' in line: comps = line.split('=') if comps[0] in fileopts: if isinstance(fileopts[comps[0]], six.string_types): temp = fileopts[comps[0]] fileopts[comps[0]] = [temp] # depends on [control=['if'], data=[]] fileopts[comps[0]].append(comps[1].strip()) # depends on [control=['if'], data=['fileopts']] else: fileopts[comps[0]] = comps[1].strip() # depends on [control=['if'], data=['line']] else: if 'unparsed' not in fileopts: fileopts['unparsed'] = [] # depends on [control=['if'], data=['fileopts']] fileopts['unparsed'].append(line) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['fp_']] return fileopts
def extract_pixels(X): """ Extract pixels from array X :param X: Array of images to be classified. :type X: numpy array, shape = [n_images, n_pixels_y, n_pixels_x, n_bands] :return: Reshaped 2D array :rtype: numpy array, [n_samples*n_pixels_y*n_pixels_x,n_bands] :raises: ValueError is input array has wrong dimensions """ if len(X.shape) != 4: raise ValueError('Array of input images has to be a 4-dimensional array of shape' '[n_images, n_pixels_y, n_pixels_x, n_bands]') new_shape = (X.shape[0] * X.shape[1] * X.shape[2], X.shape[3],) pixels = X.reshape(new_shape) return pixels
def function[extract_pixels, parameter[X]]: constant[ Extract pixels from array X :param X: Array of images to be classified. :type X: numpy array, shape = [n_images, n_pixels_y, n_pixels_x, n_bands] :return: Reshaped 2D array :rtype: numpy array, [n_samples*n_pixels_y*n_pixels_x,n_bands] :raises: ValueError is input array has wrong dimensions ] if compare[call[name[len], parameter[name[X].shape]] not_equal[!=] constant[4]] begin[:] <ast.Raise object at 0x7da2054a41c0> variable[new_shape] assign[=] tuple[[<ast.BinOp object at 0x7da2054a7af0>, <ast.Subscript object at 0x7da2054a4a90>]] variable[pixels] assign[=] call[name[X].reshape, parameter[name[new_shape]]] return[name[pixels]]
keyword[def] identifier[extract_pixels] ( identifier[X] ): literal[string] keyword[if] identifier[len] ( identifier[X] . identifier[shape] )!= literal[int] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) identifier[new_shape] =( identifier[X] . identifier[shape] [ literal[int] ]* identifier[X] . identifier[shape] [ literal[int] ]* identifier[X] . identifier[shape] [ literal[int] ], identifier[X] . identifier[shape] [ literal[int] ],) identifier[pixels] = identifier[X] . identifier[reshape] ( identifier[new_shape] ) keyword[return] identifier[pixels]
def extract_pixels(X): """ Extract pixels from array X :param X: Array of images to be classified. :type X: numpy array, shape = [n_images, n_pixels_y, n_pixels_x, n_bands] :return: Reshaped 2D array :rtype: numpy array, [n_samples*n_pixels_y*n_pixels_x,n_bands] :raises: ValueError is input array has wrong dimensions """ if len(X.shape) != 4: raise ValueError('Array of input images has to be a 4-dimensional array of shape[n_images, n_pixels_y, n_pixels_x, n_bands]') # depends on [control=['if'], data=[]] new_shape = (X.shape[0] * X.shape[1] * X.shape[2], X.shape[3]) pixels = X.reshape(new_shape) return pixels
def tiles_from_geom(self, geometry, zoom): """ Return all tiles intersecting with input geometry. Parameters ---------- geometry : ``shapely.geometry`` zoom : integer zoom level Yields ------ intersecting tiles : ``BufferedTile`` """ for tile in self.tile_pyramid.tiles_from_geom(geometry, zoom): yield self.tile(*tile.id)
def function[tiles_from_geom, parameter[self, geometry, zoom]]: constant[ Return all tiles intersecting with input geometry. Parameters ---------- geometry : ``shapely.geometry`` zoom : integer zoom level Yields ------ intersecting tiles : ``BufferedTile`` ] for taget[name[tile]] in starred[call[name[self].tile_pyramid.tiles_from_geom, parameter[name[geometry], name[zoom]]]] begin[:] <ast.Yield object at 0x7da1b0142800>
keyword[def] identifier[tiles_from_geom] ( identifier[self] , identifier[geometry] , identifier[zoom] ): literal[string] keyword[for] identifier[tile] keyword[in] identifier[self] . identifier[tile_pyramid] . identifier[tiles_from_geom] ( identifier[geometry] , identifier[zoom] ): keyword[yield] identifier[self] . identifier[tile] (* identifier[tile] . identifier[id] )
def tiles_from_geom(self, geometry, zoom): """ Return all tiles intersecting with input geometry. Parameters ---------- geometry : ``shapely.geometry`` zoom : integer zoom level Yields ------ intersecting tiles : ``BufferedTile`` """ for tile in self.tile_pyramid.tiles_from_geom(geometry, zoom): yield self.tile(*tile.id) # depends on [control=['for'], data=['tile']]
def _get_expanded_active_specs(specs): """ This function removes any unnecessary bundles, apps, libs, and services that aren't needed by the activated_bundles. It also expands inside specs.apps.depends.libs all libs that are needed indirectly by each app """ _filter_active(constants.CONFIG_BUNDLES_KEY, specs) _filter_active('apps', specs) _expand_libs_in_apps(specs) _filter_active('libs', specs) _filter_active('services', specs) _add_active_assets(specs)
def function[_get_expanded_active_specs, parameter[specs]]: constant[ This function removes any unnecessary bundles, apps, libs, and services that aren't needed by the activated_bundles. It also expands inside specs.apps.depends.libs all libs that are needed indirectly by each app ] call[name[_filter_active], parameter[name[constants].CONFIG_BUNDLES_KEY, name[specs]]] call[name[_filter_active], parameter[constant[apps], name[specs]]] call[name[_expand_libs_in_apps], parameter[name[specs]]] call[name[_filter_active], parameter[constant[libs], name[specs]]] call[name[_filter_active], parameter[constant[services], name[specs]]] call[name[_add_active_assets], parameter[name[specs]]]
keyword[def] identifier[_get_expanded_active_specs] ( identifier[specs] ): literal[string] identifier[_filter_active] ( identifier[constants] . identifier[CONFIG_BUNDLES_KEY] , identifier[specs] ) identifier[_filter_active] ( literal[string] , identifier[specs] ) identifier[_expand_libs_in_apps] ( identifier[specs] ) identifier[_filter_active] ( literal[string] , identifier[specs] ) identifier[_filter_active] ( literal[string] , identifier[specs] ) identifier[_add_active_assets] ( identifier[specs] )
def _get_expanded_active_specs(specs): """ This function removes any unnecessary bundles, apps, libs, and services that aren't needed by the activated_bundles. It also expands inside specs.apps.depends.libs all libs that are needed indirectly by each app """ _filter_active(constants.CONFIG_BUNDLES_KEY, specs) _filter_active('apps', specs) _expand_libs_in_apps(specs) _filter_active('libs', specs) _filter_active('services', specs) _add_active_assets(specs)
def quote_completions(self, completions, cword_prequote, first_colon_pos): ''' If the word under the cursor started with a quote (as indicated by a nonempty ``cword_prequote``), escapes occurrences of that quote character in the completions, and adds the quote to the beginning of each completion. Otherwise, escapes all characters that bash splits words on (``COMP_WORDBREAKS``), and removes portions of completions before the first colon. If there is only one completion, and it doesn't end with a **continuation character** (``/``, ``:``, or ``=``), adds a space after the completion. This method is exposed for overriding in subclasses; there is no need to use it directly. ''' comp_wordbreaks = os.environ.get('_ARGCOMPLETE_COMP_WORDBREAKS', os.environ.get('COMP_WORDBREAKS', self.wordbreaks)) if USING_PYTHON2: comp_wordbreaks = comp_wordbreaks.decode(sys_encoding) punctuation_chars = '();<>|&!`' for char in punctuation_chars: if char not in comp_wordbreaks: comp_wordbreaks += char # If the word under the cursor was quoted, escape the quote char and add the leading quote back in. # Otherwise, escape all COMP_WORDBREAKS chars. if cword_prequote == '': # Bash mangles completions which contain colons. # This workaround has the same effect as __ltrim_colon_completions in bash_completion. if first_colon_pos: completions = [c[first_colon_pos+1:] for c in completions] for wordbreak_char in comp_wordbreaks: completions = [c.replace(wordbreak_char, '\\'+wordbreak_char) for c in completions] else: if cword_prequote == '"': for char in '`$!': completions = [c.replace(char, '\\'+char) for c in completions] completions = [cword_prequote+c.replace(cword_prequote, '\\'+cword_prequote) for c in completions] # Note: similar functionality in bash is turned off by supplying the "-o nospace" option to complete. # We can't use that functionality because bash is not smart enough to recognize continuation characters (/) for # which no space should be added. continuation_chars = '=/:' if len(completions) == 1 and completions[0][-1] not in continuation_chars: if cword_prequote == '' and not completions[0].endswith(' '): completions[0] += ' ' return completions
def function[quote_completions, parameter[self, completions, cword_prequote, first_colon_pos]]: constant[ If the word under the cursor started with a quote (as indicated by a nonempty ``cword_prequote``), escapes occurrences of that quote character in the completions, and adds the quote to the beginning of each completion. Otherwise, escapes all characters that bash splits words on (``COMP_WORDBREAKS``), and removes portions of completions before the first colon. If there is only one completion, and it doesn't end with a **continuation character** (``/``, ``:``, or ``=``), adds a space after the completion. This method is exposed for overriding in subclasses; there is no need to use it directly. ] variable[comp_wordbreaks] assign[=] call[name[os].environ.get, parameter[constant[_ARGCOMPLETE_COMP_WORDBREAKS], call[name[os].environ.get, parameter[constant[COMP_WORDBREAKS], name[self].wordbreaks]]]] if name[USING_PYTHON2] begin[:] variable[comp_wordbreaks] assign[=] call[name[comp_wordbreaks].decode, parameter[name[sys_encoding]]] variable[punctuation_chars] assign[=] constant[();<>|&!`] for taget[name[char]] in starred[name[punctuation_chars]] begin[:] if compare[name[char] <ast.NotIn object at 0x7da2590d7190> name[comp_wordbreaks]] begin[:] <ast.AugAssign object at 0x7da1b17e2530> if compare[name[cword_prequote] equal[==] constant[]] begin[:] if name[first_colon_pos] begin[:] variable[completions] assign[=] <ast.ListComp object at 0x7da1b17e0d90> for taget[name[wordbreak_char]] in starred[name[comp_wordbreaks]] begin[:] variable[completions] assign[=] <ast.ListComp object at 0x7da1b17e2c80> variable[continuation_chars] assign[=] constant[=/:] if <ast.BoolOp object at 0x7da1b18813f0> begin[:] if <ast.BoolOp object at 0x7da1b1881900> begin[:] <ast.AugAssign object at 0x7da1b1883a90> return[name[completions]]
keyword[def] identifier[quote_completions] ( identifier[self] , identifier[completions] , identifier[cword_prequote] , identifier[first_colon_pos] ): literal[string] identifier[comp_wordbreaks] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] , identifier[os] . identifier[environ] . identifier[get] ( literal[string] , identifier[self] . identifier[wordbreaks] )) keyword[if] identifier[USING_PYTHON2] : identifier[comp_wordbreaks] = identifier[comp_wordbreaks] . identifier[decode] ( identifier[sys_encoding] ) identifier[punctuation_chars] = literal[string] keyword[for] identifier[char] keyword[in] identifier[punctuation_chars] : keyword[if] identifier[char] keyword[not] keyword[in] identifier[comp_wordbreaks] : identifier[comp_wordbreaks] += identifier[char] keyword[if] identifier[cword_prequote] == literal[string] : keyword[if] identifier[first_colon_pos] : identifier[completions] =[ identifier[c] [ identifier[first_colon_pos] + literal[int] :] keyword[for] identifier[c] keyword[in] identifier[completions] ] keyword[for] identifier[wordbreak_char] keyword[in] identifier[comp_wordbreaks] : identifier[completions] =[ identifier[c] . identifier[replace] ( identifier[wordbreak_char] , literal[string] + identifier[wordbreak_char] ) keyword[for] identifier[c] keyword[in] identifier[completions] ] keyword[else] : keyword[if] identifier[cword_prequote] == literal[string] : keyword[for] identifier[char] keyword[in] literal[string] : identifier[completions] =[ identifier[c] . identifier[replace] ( identifier[char] , literal[string] + identifier[char] ) keyword[for] identifier[c] keyword[in] identifier[completions] ] identifier[completions] =[ identifier[cword_prequote] + identifier[c] . identifier[replace] ( identifier[cword_prequote] , literal[string] + identifier[cword_prequote] ) keyword[for] identifier[c] keyword[in] identifier[completions] ] identifier[continuation_chars] = literal[string] keyword[if] identifier[len] ( identifier[completions] )== literal[int] keyword[and] identifier[completions] [ literal[int] ][- literal[int] ] keyword[not] keyword[in] identifier[continuation_chars] : keyword[if] identifier[cword_prequote] == literal[string] keyword[and] keyword[not] identifier[completions] [ literal[int] ]. identifier[endswith] ( literal[string] ): identifier[completions] [ literal[int] ]+= literal[string] keyword[return] identifier[completions]
def quote_completions(self, completions, cword_prequote, first_colon_pos): """ If the word under the cursor started with a quote (as indicated by a nonempty ``cword_prequote``), escapes occurrences of that quote character in the completions, and adds the quote to the beginning of each completion. Otherwise, escapes all characters that bash splits words on (``COMP_WORDBREAKS``), and removes portions of completions before the first colon. If there is only one completion, and it doesn't end with a **continuation character** (``/``, ``:``, or ``=``), adds a space after the completion. This method is exposed for overriding in subclasses; there is no need to use it directly. """ comp_wordbreaks = os.environ.get('_ARGCOMPLETE_COMP_WORDBREAKS', os.environ.get('COMP_WORDBREAKS', self.wordbreaks)) if USING_PYTHON2: comp_wordbreaks = comp_wordbreaks.decode(sys_encoding) # depends on [control=['if'], data=[]] punctuation_chars = '();<>|&!`' for char in punctuation_chars: if char not in comp_wordbreaks: comp_wordbreaks += char # depends on [control=['if'], data=['char', 'comp_wordbreaks']] # depends on [control=['for'], data=['char']] # If the word under the cursor was quoted, escape the quote char and add the leading quote back in. # Otherwise, escape all COMP_WORDBREAKS chars. if cword_prequote == '': # Bash mangles completions which contain colons. # This workaround has the same effect as __ltrim_colon_completions in bash_completion. if first_colon_pos: completions = [c[first_colon_pos + 1:] for c in completions] # depends on [control=['if'], data=[]] for wordbreak_char in comp_wordbreaks: completions = [c.replace(wordbreak_char, '\\' + wordbreak_char) for c in completions] # depends on [control=['for'], data=['wordbreak_char']] # depends on [control=['if'], data=[]] else: if cword_prequote == '"': for char in '`$!': completions = [c.replace(char, '\\' + char) for c in completions] # depends on [control=['for'], data=['char']] # depends on [control=['if'], data=[]] completions = [cword_prequote + c.replace(cword_prequote, '\\' + cword_prequote) for c in completions] # Note: similar functionality in bash is turned off by supplying the "-o nospace" option to complete. # We can't use that functionality because bash is not smart enough to recognize continuation characters (/) for # which no space should be added. continuation_chars = '=/:' if len(completions) == 1 and completions[0][-1] not in continuation_chars: if cword_prequote == '' and (not completions[0].endswith(' ')): completions[0] += ' ' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return completions
def schema_term(self): """Return the Table term for this resource, which is referenced either by the `table` property or the `schema` property""" if not self.name: raise MetapackError("Resource for url '{}' doe not have name".format(self.url)) t = self.doc.find_first('Root.Table', value=self.get_value('name')) frm = 'name' if not t: t = self.doc.find_first('Root.Table', value=self.get_value('schema')) frm = 'schema' if not t: frm = None return t
def function[schema_term, parameter[self]]: constant[Return the Table term for this resource, which is referenced either by the `table` property or the `schema` property] if <ast.UnaryOp object at 0x7da1b196d8a0> begin[:] <ast.Raise object at 0x7da1b196fd90> variable[t] assign[=] call[name[self].doc.find_first, parameter[constant[Root.Table]]] variable[frm] assign[=] constant[name] if <ast.UnaryOp object at 0x7da1b19ad5a0> begin[:] variable[t] assign[=] call[name[self].doc.find_first, parameter[constant[Root.Table]]] variable[frm] assign[=] constant[schema] if <ast.UnaryOp object at 0x7da1b19af6d0> begin[:] variable[frm] assign[=] constant[None] return[name[t]]
keyword[def] identifier[schema_term] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[name] : keyword[raise] identifier[MetapackError] ( literal[string] . identifier[format] ( identifier[self] . identifier[url] )) identifier[t] = identifier[self] . identifier[doc] . identifier[find_first] ( literal[string] , identifier[value] = identifier[self] . identifier[get_value] ( literal[string] )) identifier[frm] = literal[string] keyword[if] keyword[not] identifier[t] : identifier[t] = identifier[self] . identifier[doc] . identifier[find_first] ( literal[string] , identifier[value] = identifier[self] . identifier[get_value] ( literal[string] )) identifier[frm] = literal[string] keyword[if] keyword[not] identifier[t] : identifier[frm] = keyword[None] keyword[return] identifier[t]
def schema_term(self): """Return the Table term for this resource, which is referenced either by the `table` property or the `schema` property""" if not self.name: raise MetapackError("Resource for url '{}' doe not have name".format(self.url)) # depends on [control=['if'], data=[]] t = self.doc.find_first('Root.Table', value=self.get_value('name')) frm = 'name' if not t: t = self.doc.find_first('Root.Table', value=self.get_value('schema')) frm = 'schema' # depends on [control=['if'], data=[]] if not t: frm = None # depends on [control=['if'], data=[]] return t
def process_unknown_arguments(unknowns): """Process arguments unknown to the parser""" result = argparse.Namespace() result.extra_control = {} # It would be interesting to use argparse internal # machinery for this for unknown in unknowns: # Check prefixes prefix = '--parameter-' if unknown.startswith(prefix): # process '=' values = unknown.split('=') if len(values) == 2: key = values[0][len(prefix):] val = values[1] if key: result.extra_control[key] = val return result
def function[process_unknown_arguments, parameter[unknowns]]: constant[Process arguments unknown to the parser] variable[result] assign[=] call[name[argparse].Namespace, parameter[]] name[result].extra_control assign[=] dictionary[[], []] for taget[name[unknown]] in starred[name[unknowns]] begin[:] variable[prefix] assign[=] constant[--parameter-] if call[name[unknown].startswith, parameter[name[prefix]]] begin[:] variable[values] assign[=] call[name[unknown].split, parameter[constant[=]]] if compare[call[name[len], parameter[name[values]]] equal[==] constant[2]] begin[:] variable[key] assign[=] call[call[name[values]][constant[0]]][<ast.Slice object at 0x7da20e74ae30>] variable[val] assign[=] call[name[values]][constant[1]] if name[key] begin[:] call[name[result].extra_control][name[key]] assign[=] name[val] return[name[result]]
keyword[def] identifier[process_unknown_arguments] ( identifier[unknowns] ): literal[string] identifier[result] = identifier[argparse] . identifier[Namespace] () identifier[result] . identifier[extra_control] ={} keyword[for] identifier[unknown] keyword[in] identifier[unknowns] : identifier[prefix] = literal[string] keyword[if] identifier[unknown] . identifier[startswith] ( identifier[prefix] ): identifier[values] = identifier[unknown] . identifier[split] ( literal[string] ) keyword[if] identifier[len] ( identifier[values] )== literal[int] : identifier[key] = identifier[values] [ literal[int] ][ identifier[len] ( identifier[prefix] ):] identifier[val] = identifier[values] [ literal[int] ] keyword[if] identifier[key] : identifier[result] . identifier[extra_control] [ identifier[key] ]= identifier[val] keyword[return] identifier[result]
def process_unknown_arguments(unknowns): """Process arguments unknown to the parser""" result = argparse.Namespace() result.extra_control = {} # It would be interesting to use argparse internal # machinery for this for unknown in unknowns: # Check prefixes prefix = '--parameter-' if unknown.startswith(prefix): # process '=' values = unknown.split('=') if len(values) == 2: key = values[0][len(prefix):] val = values[1] if key: result.extra_control[key] = val # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['unknown']] return result
def detect(self, text): """Decide which language is used to write the text. The method tries first to detect the language with high reliability. If that is not possible, the method switches to best effort strategy. Args: text (string): A snippet of text, the longer it is the more reliable we can detect the language used to write the text. """ t = text.encode("utf-8") reliable, index, top_3_choices = cld2.detect(t, bestEffort=False) if not reliable: self.reliable = False reliable, index, top_3_choices = cld2.detect(t, bestEffort=True) if not self.quiet: if not reliable: raise UnknownLanguage("Try passing a longer snippet of text") else: logger.warning("Detector is not able to detect the language reliably.") self.languages = [Language(x) for x in top_3_choices] self.language = self.languages[0] return self.language
def function[detect, parameter[self, text]]: constant[Decide which language is used to write the text. The method tries first to detect the language with high reliability. If that is not possible, the method switches to best effort strategy. Args: text (string): A snippet of text, the longer it is the more reliable we can detect the language used to write the text. ] variable[t] assign[=] call[name[text].encode, parameter[constant[utf-8]]] <ast.Tuple object at 0x7da20cabc700> assign[=] call[name[cld2].detect, parameter[name[t]]] if <ast.UnaryOp object at 0x7da20cabf2b0> begin[:] name[self].reliable assign[=] constant[False] <ast.Tuple object at 0x7da20cabdb10> assign[=] call[name[cld2].detect, parameter[name[t]]] if <ast.UnaryOp object at 0x7da20cabebf0> begin[:] if <ast.UnaryOp object at 0x7da20cabd480> begin[:] <ast.Raise object at 0x7da20cabed70> name[self].languages assign[=] <ast.ListComp object at 0x7da20cabfa00> name[self].language assign[=] call[name[self].languages][constant[0]] return[name[self].language]
keyword[def] identifier[detect] ( identifier[self] , identifier[text] ): literal[string] identifier[t] = identifier[text] . identifier[encode] ( literal[string] ) identifier[reliable] , identifier[index] , identifier[top_3_choices] = identifier[cld2] . identifier[detect] ( identifier[t] , identifier[bestEffort] = keyword[False] ) keyword[if] keyword[not] identifier[reliable] : identifier[self] . identifier[reliable] = keyword[False] identifier[reliable] , identifier[index] , identifier[top_3_choices] = identifier[cld2] . identifier[detect] ( identifier[t] , identifier[bestEffort] = keyword[True] ) keyword[if] keyword[not] identifier[self] . identifier[quiet] : keyword[if] keyword[not] identifier[reliable] : keyword[raise] identifier[UnknownLanguage] ( literal[string] ) keyword[else] : identifier[logger] . identifier[warning] ( literal[string] ) identifier[self] . identifier[languages] =[ identifier[Language] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[top_3_choices] ] identifier[self] . identifier[language] = identifier[self] . identifier[languages] [ literal[int] ] keyword[return] identifier[self] . identifier[language]
def detect(self, text): """Decide which language is used to write the text. The method tries first to detect the language with high reliability. If that is not possible, the method switches to best effort strategy. Args: text (string): A snippet of text, the longer it is the more reliable we can detect the language used to write the text. """ t = text.encode('utf-8') (reliable, index, top_3_choices) = cld2.detect(t, bestEffort=False) if not reliable: self.reliable = False (reliable, index, top_3_choices) = cld2.detect(t, bestEffort=True) if not self.quiet: if not reliable: raise UnknownLanguage('Try passing a longer snippet of text') # depends on [control=['if'], data=[]] else: logger.warning('Detector is not able to detect the language reliably.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] self.languages = [Language(x) for x in top_3_choices] self.language = self.languages[0] return self.language
def message_options(self): """ Convert config namespace to emails.Message namespace """ o = {} options = self.options for key in self._default_message_options: if key in options: o[key] = options[key] return o
def function[message_options, parameter[self]]: constant[ Convert config namespace to emails.Message namespace ] variable[o] assign[=] dictionary[[], []] variable[options] assign[=] name[self].options for taget[name[key]] in starred[name[self]._default_message_options] begin[:] if compare[name[key] in name[options]] begin[:] call[name[o]][name[key]] assign[=] call[name[options]][name[key]] return[name[o]]
keyword[def] identifier[message_options] ( identifier[self] ): literal[string] identifier[o] ={} identifier[options] = identifier[self] . identifier[options] keyword[for] identifier[key] keyword[in] identifier[self] . identifier[_default_message_options] : keyword[if] identifier[key] keyword[in] identifier[options] : identifier[o] [ identifier[key] ]= identifier[options] [ identifier[key] ] keyword[return] identifier[o]
def message_options(self): """ Convert config namespace to emails.Message namespace """ o = {} options = self.options for key in self._default_message_options: if key in options: o[key] = options[key] # depends on [control=['if'], data=['key', 'options']] # depends on [control=['for'], data=['key']] return o
def get_all_response_headers(self): ''' Gets back all response headers. ''' bstr_headers = c_void_p() _WinHttpRequest._GetAllResponseHeaders(self, byref(bstr_headers)) bstr_headers = ctypes.cast(bstr_headers, c_wchar_p) headers = bstr_headers.value _SysFreeString(bstr_headers) return headers
def function[get_all_response_headers, parameter[self]]: constant[ Gets back all response headers. ] variable[bstr_headers] assign[=] call[name[c_void_p], parameter[]] call[name[_WinHttpRequest]._GetAllResponseHeaders, parameter[name[self], call[name[byref], parameter[name[bstr_headers]]]]] variable[bstr_headers] assign[=] call[name[ctypes].cast, parameter[name[bstr_headers], name[c_wchar_p]]] variable[headers] assign[=] name[bstr_headers].value call[name[_SysFreeString], parameter[name[bstr_headers]]] return[name[headers]]
keyword[def] identifier[get_all_response_headers] ( identifier[self] ): literal[string] identifier[bstr_headers] = identifier[c_void_p] () identifier[_WinHttpRequest] . identifier[_GetAllResponseHeaders] ( identifier[self] , identifier[byref] ( identifier[bstr_headers] )) identifier[bstr_headers] = identifier[ctypes] . identifier[cast] ( identifier[bstr_headers] , identifier[c_wchar_p] ) identifier[headers] = identifier[bstr_headers] . identifier[value] identifier[_SysFreeString] ( identifier[bstr_headers] ) keyword[return] identifier[headers]
def get_all_response_headers(self): """ Gets back all response headers. """ bstr_headers = c_void_p() _WinHttpRequest._GetAllResponseHeaders(self, byref(bstr_headers)) bstr_headers = ctypes.cast(bstr_headers, c_wchar_p) headers = bstr_headers.value _SysFreeString(bstr_headers) return headers
def prt_gene_aart_details(self, geneids, prt=sys.stdout): """For each gene, print ASCII art which represents its associated GO IDs.""" _go2nt = self.sortobj.grprobj.go2nt patgene = self.datobj.kws["fmtgene2"] patgo = self.datobj.kws["fmtgo2"] itemid2name = self.datobj.kws.get("itemid2name") chr2i = self.datobj.get_chr2idx() for geneid in geneids: gos_gene = self.gene2gos[geneid] symbol = "" if itemid2name is None else itemid2name.get(geneid, "") prt.write("\n") prt.write(patgene.format(AART=self.gene2aart[geneid], ID=geneid, NAME=symbol)) go2nt = {go:(_go2nt[go], "".join(self.go2chrs[go])) for go in gos_gene} for ntgo, abc in sorted(go2nt.values(), key=lambda t: [chr2i[t[1][:1]], t[0].NS, -1*t[0].dcnt]): prt.write("{ABC} ".format(ABC=abc)) prt.write(patgo.format(**ntgo._asdict()))
def function[prt_gene_aart_details, parameter[self, geneids, prt]]: constant[For each gene, print ASCII art which represents its associated GO IDs.] variable[_go2nt] assign[=] name[self].sortobj.grprobj.go2nt variable[patgene] assign[=] call[name[self].datobj.kws][constant[fmtgene2]] variable[patgo] assign[=] call[name[self].datobj.kws][constant[fmtgo2]] variable[itemid2name] assign[=] call[name[self].datobj.kws.get, parameter[constant[itemid2name]]] variable[chr2i] assign[=] call[name[self].datobj.get_chr2idx, parameter[]] for taget[name[geneid]] in starred[name[geneids]] begin[:] variable[gos_gene] assign[=] call[name[self].gene2gos][name[geneid]] variable[symbol] assign[=] <ast.IfExp object at 0x7da18f811750> call[name[prt].write, parameter[constant[ ]]] call[name[prt].write, parameter[call[name[patgene].format, parameter[]]]] variable[go2nt] assign[=] <ast.DictComp object at 0x7da18f813700> for taget[tuple[[<ast.Name object at 0x7da18f810a90>, <ast.Name object at 0x7da18f811300>]]] in starred[call[name[sorted], parameter[call[name[go2nt].values, parameter[]]]]] begin[:] call[name[prt].write, parameter[call[constant[{ABC} ].format, parameter[]]]] call[name[prt].write, parameter[call[name[patgo].format, parameter[]]]]
keyword[def] identifier[prt_gene_aart_details] ( identifier[self] , identifier[geneids] , identifier[prt] = identifier[sys] . identifier[stdout] ): literal[string] identifier[_go2nt] = identifier[self] . identifier[sortobj] . identifier[grprobj] . identifier[go2nt] identifier[patgene] = identifier[self] . identifier[datobj] . identifier[kws] [ literal[string] ] identifier[patgo] = identifier[self] . identifier[datobj] . identifier[kws] [ literal[string] ] identifier[itemid2name] = identifier[self] . identifier[datobj] . identifier[kws] . identifier[get] ( literal[string] ) identifier[chr2i] = identifier[self] . identifier[datobj] . identifier[get_chr2idx] () keyword[for] identifier[geneid] keyword[in] identifier[geneids] : identifier[gos_gene] = identifier[self] . identifier[gene2gos] [ identifier[geneid] ] identifier[symbol] = literal[string] keyword[if] identifier[itemid2name] keyword[is] keyword[None] keyword[else] identifier[itemid2name] . identifier[get] ( identifier[geneid] , literal[string] ) identifier[prt] . identifier[write] ( literal[string] ) identifier[prt] . identifier[write] ( identifier[patgene] . identifier[format] ( identifier[AART] = identifier[self] . identifier[gene2aart] [ identifier[geneid] ], identifier[ID] = identifier[geneid] , identifier[NAME] = identifier[symbol] )) identifier[go2nt] ={ identifier[go] :( identifier[_go2nt] [ identifier[go] ], literal[string] . identifier[join] ( identifier[self] . identifier[go2chrs] [ identifier[go] ])) keyword[for] identifier[go] keyword[in] identifier[gos_gene] } keyword[for] identifier[ntgo] , identifier[abc] keyword[in] identifier[sorted] ( identifier[go2nt] . identifier[values] (), identifier[key] = keyword[lambda] identifier[t] :[ identifier[chr2i] [ identifier[t] [ literal[int] ][: literal[int] ]], identifier[t] [ literal[int] ]. identifier[NS] ,- literal[int] * identifier[t] [ literal[int] ]. identifier[dcnt] ]): identifier[prt] . identifier[write] ( literal[string] . identifier[format] ( identifier[ABC] = identifier[abc] )) identifier[prt] . identifier[write] ( identifier[patgo] . identifier[format] (** identifier[ntgo] . identifier[_asdict] ()))
def prt_gene_aart_details(self, geneids, prt=sys.stdout): """For each gene, print ASCII art which represents its associated GO IDs.""" _go2nt = self.sortobj.grprobj.go2nt patgene = self.datobj.kws['fmtgene2'] patgo = self.datobj.kws['fmtgo2'] itemid2name = self.datobj.kws.get('itemid2name') chr2i = self.datobj.get_chr2idx() for geneid in geneids: gos_gene = self.gene2gos[geneid] symbol = '' if itemid2name is None else itemid2name.get(geneid, '') prt.write('\n') prt.write(patgene.format(AART=self.gene2aart[geneid], ID=geneid, NAME=symbol)) go2nt = {go: (_go2nt[go], ''.join(self.go2chrs[go])) for go in gos_gene} for (ntgo, abc) in sorted(go2nt.values(), key=lambda t: [chr2i[t[1][:1]], t[0].NS, -1 * t[0].dcnt]): prt.write('{ABC} '.format(ABC=abc)) prt.write(patgo.format(**ntgo._asdict())) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['geneid']]
def init_proxy(self, config): """Initialize and start proxy mode. If proxy configuration entry is not contained in the config this is a no op. Causes handler to become an instance of WSGIProxMiddleware. :param dict config: The configuration object used to configure this instance of FrontEndApp """ proxy_config = config.get('proxy') if not proxy_config: return if isinstance(proxy_config, str): proxy_coll = proxy_config proxy_config = {} else: proxy_coll = proxy_config['coll'] if '/' in proxy_coll: raise Exception('Proxy collection can not contain "/"') proxy_config['ca_name'] = proxy_config.get('ca_name', self.PROXY_CA_NAME) proxy_config['ca_file_cache'] = proxy_config.get('ca_file_cache', self.PROXY_CA_PATH) if proxy_config.get('recording'): logging.info('Proxy recording into collection "{0}"'.format(proxy_coll)) if proxy_coll in self.warcserver.list_fixed_routes(): raise Exception('Can not record into fixed collection') proxy_coll += self.RECORD_ROUTE if not config.get('recorder'): config['recorder'] = 'live' else: logging.info('Proxy enabled for collection "{0}"'.format(proxy_coll)) if proxy_config.get('enable_content_rewrite', True): self.proxy_prefix = '/{0}/bn_/'.format(proxy_coll) else: self.proxy_prefix = '/{0}/id_/'.format(proxy_coll) self.proxy_default_timestamp = proxy_config.get('default_timestamp') if self.proxy_default_timestamp: if not self.ALL_DIGITS.match(self.proxy_default_timestamp): try: self.proxy_default_timestamp = iso_date_to_timestamp(self.proxy_default_timestamp) except: raise Exception('Invalid Proxy Timestamp: Must Be All-Digit Timestamp or ISO Date Format') self.proxy_coll = proxy_coll self.handler = WSGIProxMiddleware(self.handle_request, self.proxy_route_request, proxy_host=proxy_config.get('host', 'pywb.proxy'), proxy_options=proxy_config)
def function[init_proxy, parameter[self, config]]: constant[Initialize and start proxy mode. If proxy configuration entry is not contained in the config this is a no op. Causes handler to become an instance of WSGIProxMiddleware. :param dict config: The configuration object used to configure this instance of FrontEndApp ] variable[proxy_config] assign[=] call[name[config].get, parameter[constant[proxy]]] if <ast.UnaryOp object at 0x7da18f58fa00> begin[:] return[None] if call[name[isinstance], parameter[name[proxy_config], name[str]]] begin[:] variable[proxy_coll] assign[=] name[proxy_config] variable[proxy_config] assign[=] dictionary[[], []] if compare[constant[/] in name[proxy_coll]] begin[:] <ast.Raise object at 0x7da18f58ef20> call[name[proxy_config]][constant[ca_name]] assign[=] call[name[proxy_config].get, parameter[constant[ca_name], name[self].PROXY_CA_NAME]] call[name[proxy_config]][constant[ca_file_cache]] assign[=] call[name[proxy_config].get, parameter[constant[ca_file_cache], name[self].PROXY_CA_PATH]] if call[name[proxy_config].get, parameter[constant[recording]]] begin[:] call[name[logging].info, parameter[call[constant[Proxy recording into collection "{0}"].format, parameter[name[proxy_coll]]]]] if compare[name[proxy_coll] in call[name[self].warcserver.list_fixed_routes, parameter[]]] begin[:] <ast.Raise object at 0x7da20c7c9cf0> <ast.AugAssign object at 0x7da20c7ca380> if <ast.UnaryOp object at 0x7da20c7c9ed0> begin[:] call[name[config]][constant[recorder]] assign[=] constant[live] if call[name[proxy_config].get, parameter[constant[enable_content_rewrite], constant[True]]] begin[:] name[self].proxy_prefix assign[=] call[constant[/{0}/bn_/].format, parameter[name[proxy_coll]]] name[self].proxy_default_timestamp assign[=] call[name[proxy_config].get, parameter[constant[default_timestamp]]] if name[self].proxy_default_timestamp begin[:] if <ast.UnaryOp object at 0x7da1b1e98f70> begin[:] <ast.Try object at 0x7da1b1e9bc10> name[self].proxy_coll assign[=] name[proxy_coll] name[self].handler assign[=] call[name[WSGIProxMiddleware], parameter[name[self].handle_request, name[self].proxy_route_request]]
keyword[def] identifier[init_proxy] ( identifier[self] , identifier[config] ): literal[string] identifier[proxy_config] = identifier[config] . identifier[get] ( literal[string] ) keyword[if] keyword[not] identifier[proxy_config] : keyword[return] keyword[if] identifier[isinstance] ( identifier[proxy_config] , identifier[str] ): identifier[proxy_coll] = identifier[proxy_config] identifier[proxy_config] ={} keyword[else] : identifier[proxy_coll] = identifier[proxy_config] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[proxy_coll] : keyword[raise] identifier[Exception] ( literal[string] ) identifier[proxy_config] [ literal[string] ]= identifier[proxy_config] . identifier[get] ( literal[string] , identifier[self] . identifier[PROXY_CA_NAME] ) identifier[proxy_config] [ literal[string] ]= identifier[proxy_config] . identifier[get] ( literal[string] , identifier[self] . identifier[PROXY_CA_PATH] ) keyword[if] identifier[proxy_config] . identifier[get] ( literal[string] ): identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[proxy_coll] )) keyword[if] identifier[proxy_coll] keyword[in] identifier[self] . identifier[warcserver] . identifier[list_fixed_routes] (): keyword[raise] identifier[Exception] ( literal[string] ) identifier[proxy_coll] += identifier[self] . identifier[RECORD_ROUTE] keyword[if] keyword[not] identifier[config] . identifier[get] ( literal[string] ): identifier[config] [ literal[string] ]= literal[string] keyword[else] : identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[proxy_coll] )) keyword[if] identifier[proxy_config] . identifier[get] ( literal[string] , keyword[True] ): identifier[self] . identifier[proxy_prefix] = literal[string] . identifier[format] ( identifier[proxy_coll] ) keyword[else] : identifier[self] . identifier[proxy_prefix] = literal[string] . identifier[format] ( identifier[proxy_coll] ) identifier[self] . identifier[proxy_default_timestamp] = identifier[proxy_config] . identifier[get] ( literal[string] ) keyword[if] identifier[self] . identifier[proxy_default_timestamp] : keyword[if] keyword[not] identifier[self] . identifier[ALL_DIGITS] . identifier[match] ( identifier[self] . identifier[proxy_default_timestamp] ): keyword[try] : identifier[self] . identifier[proxy_default_timestamp] = identifier[iso_date_to_timestamp] ( identifier[self] . identifier[proxy_default_timestamp] ) keyword[except] : keyword[raise] identifier[Exception] ( literal[string] ) identifier[self] . identifier[proxy_coll] = identifier[proxy_coll] identifier[self] . identifier[handler] = identifier[WSGIProxMiddleware] ( identifier[self] . identifier[handle_request] , identifier[self] . identifier[proxy_route_request] , identifier[proxy_host] = identifier[proxy_config] . identifier[get] ( literal[string] , literal[string] ), identifier[proxy_options] = identifier[proxy_config] )
def init_proxy(self, config): """Initialize and start proxy mode. If proxy configuration entry is not contained in the config this is a no op. Causes handler to become an instance of WSGIProxMiddleware. :param dict config: The configuration object used to configure this instance of FrontEndApp """ proxy_config = config.get('proxy') if not proxy_config: return # depends on [control=['if'], data=[]] if isinstance(proxy_config, str): proxy_coll = proxy_config proxy_config = {} # depends on [control=['if'], data=[]] else: proxy_coll = proxy_config['coll'] if '/' in proxy_coll: raise Exception('Proxy collection can not contain "/"') # depends on [control=['if'], data=[]] proxy_config['ca_name'] = proxy_config.get('ca_name', self.PROXY_CA_NAME) proxy_config['ca_file_cache'] = proxy_config.get('ca_file_cache', self.PROXY_CA_PATH) if proxy_config.get('recording'): logging.info('Proxy recording into collection "{0}"'.format(proxy_coll)) if proxy_coll in self.warcserver.list_fixed_routes(): raise Exception('Can not record into fixed collection') # depends on [control=['if'], data=[]] proxy_coll += self.RECORD_ROUTE if not config.get('recorder'): config['recorder'] = 'live' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: logging.info('Proxy enabled for collection "{0}"'.format(proxy_coll)) if proxy_config.get('enable_content_rewrite', True): self.proxy_prefix = '/{0}/bn_/'.format(proxy_coll) # depends on [control=['if'], data=[]] else: self.proxy_prefix = '/{0}/id_/'.format(proxy_coll) self.proxy_default_timestamp = proxy_config.get('default_timestamp') if self.proxy_default_timestamp: if not self.ALL_DIGITS.match(self.proxy_default_timestamp): try: self.proxy_default_timestamp = iso_date_to_timestamp(self.proxy_default_timestamp) # depends on [control=['try'], data=[]] except: raise Exception('Invalid Proxy Timestamp: Must Be All-Digit Timestamp or ISO Date Format') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] self.proxy_coll = proxy_coll self.handler = WSGIProxMiddleware(self.handle_request, self.proxy_route_request, proxy_host=proxy_config.get('host', 'pywb.proxy'), proxy_options=proxy_config)
def search(self, category = None, cuisine = None, location = (None, None), radius = None, tl_coord = (None, None), \ br_coord = (None, None), name = None, country = None, locality = None, \ region = None, postal_code = None, street_address = None,\ website_url = None, has_menu = None, open_at = None): """ Locu Venue Search API Call Wrapper Args: *Note that none of the arguments are required category : List of category types that need to be filtered by: ['restaurant', 'spa', 'beauty salon', 'gym', 'laundry', 'hair care', 'other'] type : [string] cuisine : List of cuisine types that need to be filtered by: ['american', 'italian', ...] type : [string] location : Tuple that consists of (latitude, longtitude) coordinates type : tuple(float, float) radius : Radius around the given lat, long type : float tl_coord : Tuple that consists of (latitude, longtitude) for bounding box top left coordinates type : tuple(float, float) br_coord : Tuple that consists of (latitude, longtitude) for bounding box bottom right coordinates type : tuple(float, float) name : Name of the venue type : string country : Country where venue is located type : string locality : Locality. Ex 'San Francisco' type : string region : Region/state. Ex. 'CA' type : string postal_code : Postal code type : string street_address : Address type : string open_at : Search for venues open at the specified time type : datetime website_url : Filter by the a website url type : string has_menu : Filter venues that have menus in them type : boolean Returns: A dictionary with a data returned by the server Raises: HttpException with the error message from the server """ params = self._get_params(category = category, cuisine = cuisine, location = location, radius = radius, tl_coord = tl_coord, \ br_coord = br_coord, name = name, country = country, locality = locality, \ region = region, postal_code = postal_code, street_address = street_address, \ website_url = website_url, has_menu = has_menu, open_at = open_at) return self._create_query('search', params)
def function[search, parameter[self, category, cuisine, location, radius, tl_coord, br_coord, name, country, locality, region, postal_code, street_address, website_url, has_menu, open_at]]: constant[ Locu Venue Search API Call Wrapper Args: *Note that none of the arguments are required category : List of category types that need to be filtered by: ['restaurant', 'spa', 'beauty salon', 'gym', 'laundry', 'hair care', 'other'] type : [string] cuisine : List of cuisine types that need to be filtered by: ['american', 'italian', ...] type : [string] location : Tuple that consists of (latitude, longtitude) coordinates type : tuple(float, float) radius : Radius around the given lat, long type : float tl_coord : Tuple that consists of (latitude, longtitude) for bounding box top left coordinates type : tuple(float, float) br_coord : Tuple that consists of (latitude, longtitude) for bounding box bottom right coordinates type : tuple(float, float) name : Name of the venue type : string country : Country where venue is located type : string locality : Locality. Ex 'San Francisco' type : string region : Region/state. Ex. 'CA' type : string postal_code : Postal code type : string street_address : Address type : string open_at : Search for venues open at the specified time type : datetime website_url : Filter by the a website url type : string has_menu : Filter venues that have menus in them type : boolean Returns: A dictionary with a data returned by the server Raises: HttpException with the error message from the server ] variable[params] assign[=] call[name[self]._get_params, parameter[]] return[call[name[self]._create_query, parameter[constant[search], name[params]]]]
keyword[def] identifier[search] ( identifier[self] , identifier[category] = keyword[None] , identifier[cuisine] = keyword[None] , identifier[location] =( keyword[None] , keyword[None] ), identifier[radius] = keyword[None] , identifier[tl_coord] =( keyword[None] , keyword[None] ), identifier[br_coord] =( keyword[None] , keyword[None] ), identifier[name] = keyword[None] , identifier[country] = keyword[None] , identifier[locality] = keyword[None] , identifier[region] = keyword[None] , identifier[postal_code] = keyword[None] , identifier[street_address] = keyword[None] , identifier[website_url] = keyword[None] , identifier[has_menu] = keyword[None] , identifier[open_at] = keyword[None] ): literal[string] identifier[params] = identifier[self] . identifier[_get_params] ( identifier[category] = identifier[category] , identifier[cuisine] = identifier[cuisine] , identifier[location] = identifier[location] , identifier[radius] = identifier[radius] , identifier[tl_coord] = identifier[tl_coord] , identifier[br_coord] = identifier[br_coord] , identifier[name] = identifier[name] , identifier[country] = identifier[country] , identifier[locality] = identifier[locality] , identifier[region] = identifier[region] , identifier[postal_code] = identifier[postal_code] , identifier[street_address] = identifier[street_address] , identifier[website_url] = identifier[website_url] , identifier[has_menu] = identifier[has_menu] , identifier[open_at] = identifier[open_at] ) keyword[return] identifier[self] . identifier[_create_query] ( literal[string] , identifier[params] )
def search(self, category=None, cuisine=None, location=(None, None), radius=None, tl_coord=(None, None), br_coord=(None, None), name=None, country=None, locality=None, region=None, postal_code=None, street_address=None, website_url=None, has_menu=None, open_at=None): """ Locu Venue Search API Call Wrapper Args: *Note that none of the arguments are required category : List of category types that need to be filtered by: ['restaurant', 'spa', 'beauty salon', 'gym', 'laundry', 'hair care', 'other'] type : [string] cuisine : List of cuisine types that need to be filtered by: ['american', 'italian', ...] type : [string] location : Tuple that consists of (latitude, longtitude) coordinates type : tuple(float, float) radius : Radius around the given lat, long type : float tl_coord : Tuple that consists of (latitude, longtitude) for bounding box top left coordinates type : tuple(float, float) br_coord : Tuple that consists of (latitude, longtitude) for bounding box bottom right coordinates type : tuple(float, float) name : Name of the venue type : string country : Country where venue is located type : string locality : Locality. Ex 'San Francisco' type : string region : Region/state. Ex. 'CA' type : string postal_code : Postal code type : string street_address : Address type : string open_at : Search for venues open at the specified time type : datetime website_url : Filter by the a website url type : string has_menu : Filter venues that have menus in them type : boolean Returns: A dictionary with a data returned by the server Raises: HttpException with the error message from the server """ params = self._get_params(category=category, cuisine=cuisine, location=location, radius=radius, tl_coord=tl_coord, br_coord=br_coord, name=name, country=country, locality=locality, region=region, postal_code=postal_code, street_address=street_address, website_url=website_url, has_menu=has_menu, open_at=open_at) return self._create_query('search', params)
def describe_arguments(func): """ Analyze a function's signature and return a data structure suitable for passing in as arguments to an argparse parser's add_argument() method.""" argspec = inspect.getargspec(func) # we should probably raise an exception somewhere if func includes **kwargs if argspec.defaults: positional_args = argspec.args[:-len(argspec.defaults)] keyword_names = argspec.args[-len(argspec.defaults):] for arg, default in zip(keyword_names, argspec.defaults): yield ('--{}'.format(arg),), {'default': default} else: positional_args = argspec.args for arg in positional_args: yield (arg,), {} if argspec.varargs: yield (argspec.varargs,), {'nargs': '*'}
def function[describe_arguments, parameter[func]]: constant[ Analyze a function's signature and return a data structure suitable for passing in as arguments to an argparse parser's add_argument() method.] variable[argspec] assign[=] call[name[inspect].getargspec, parameter[name[func]]] if name[argspec].defaults begin[:] variable[positional_args] assign[=] call[name[argspec].args][<ast.Slice object at 0x7da18dc983d0>] variable[keyword_names] assign[=] call[name[argspec].args][<ast.Slice object at 0x7da18dc98760>] for taget[tuple[[<ast.Name object at 0x7da18dc9bf10>, <ast.Name object at 0x7da18dc9a4d0>]]] in starred[call[name[zip], parameter[name[keyword_names], name[argspec].defaults]]] begin[:] <ast.Yield object at 0x7da18dc99240> for taget[name[arg]] in starred[name[positional_args]] begin[:] <ast.Yield object at 0x7da18dc987c0> if name[argspec].varargs begin[:] <ast.Yield object at 0x7da18dc981f0>
keyword[def] identifier[describe_arguments] ( identifier[func] ): literal[string] identifier[argspec] = identifier[inspect] . identifier[getargspec] ( identifier[func] ) keyword[if] identifier[argspec] . identifier[defaults] : identifier[positional_args] = identifier[argspec] . identifier[args] [:- identifier[len] ( identifier[argspec] . identifier[defaults] )] identifier[keyword_names] = identifier[argspec] . identifier[args] [- identifier[len] ( identifier[argspec] . identifier[defaults] ):] keyword[for] identifier[arg] , identifier[default] keyword[in] identifier[zip] ( identifier[keyword_names] , identifier[argspec] . identifier[defaults] ): keyword[yield] ( literal[string] . identifier[format] ( identifier[arg] ),),{ literal[string] : identifier[default] } keyword[else] : identifier[positional_args] = identifier[argspec] . identifier[args] keyword[for] identifier[arg] keyword[in] identifier[positional_args] : keyword[yield] ( identifier[arg] ,),{} keyword[if] identifier[argspec] . identifier[varargs] : keyword[yield] ( identifier[argspec] . identifier[varargs] ,),{ literal[string] : literal[string] }
def describe_arguments(func): """ Analyze a function's signature and return a data structure suitable for passing in as arguments to an argparse parser's add_argument() method.""" argspec = inspect.getargspec(func) # we should probably raise an exception somewhere if func includes **kwargs if argspec.defaults: positional_args = argspec.args[:-len(argspec.defaults)] keyword_names = argspec.args[-len(argspec.defaults):] for (arg, default) in zip(keyword_names, argspec.defaults): yield (('--{}'.format(arg),), {'default': default}) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] else: positional_args = argspec.args for arg in positional_args: yield ((arg,), {}) # depends on [control=['for'], data=['arg']] if argspec.varargs: yield ((argspec.varargs,), {'nargs': '*'}) # depends on [control=['if'], data=[]]
def on_site(self): """ Return entries published on current site. """ return super(EntryPublishedManager, self).get_queryset().filter( sites=Site.objects.get_current())
def function[on_site, parameter[self]]: constant[ Return entries published on current site. ] return[call[call[call[name[super], parameter[name[EntryPublishedManager], name[self]]].get_queryset, parameter[]].filter, parameter[]]]
keyword[def] identifier[on_site] ( identifier[self] ): literal[string] keyword[return] identifier[super] ( identifier[EntryPublishedManager] , identifier[self] ). identifier[get_queryset] (). identifier[filter] ( identifier[sites] = identifier[Site] . identifier[objects] . identifier[get_current] ())
def on_site(self): """ Return entries published on current site. """ return super(EntryPublishedManager, self).get_queryset().filter(sites=Site.objects.get_current())
def get_top_segmentations(table, n): """ Parameters ---------- table : matrix of probabilities Each cell (i, j) of `table` gives the probability that i and j are in the same symbol. n : int Number of best segmentations which get returned """ stroke_count = list(range(len(table))) topf = TopFinder(n) for curr_segmentation in all_segmentations(stroke_count): curr_seg_score = score_segmentation(curr_segmentation, table) topf.push(curr_segmentation, curr_seg_score) for el, score in topf: yield [normalize_segmentation(el), score]
def function[get_top_segmentations, parameter[table, n]]: constant[ Parameters ---------- table : matrix of probabilities Each cell (i, j) of `table` gives the probability that i and j are in the same symbol. n : int Number of best segmentations which get returned ] variable[stroke_count] assign[=] call[name[list], parameter[call[name[range], parameter[call[name[len], parameter[name[table]]]]]]] variable[topf] assign[=] call[name[TopFinder], parameter[name[n]]] for taget[name[curr_segmentation]] in starred[call[name[all_segmentations], parameter[name[stroke_count]]]] begin[:] variable[curr_seg_score] assign[=] call[name[score_segmentation], parameter[name[curr_segmentation], name[table]]] call[name[topf].push, parameter[name[curr_segmentation], name[curr_seg_score]]] for taget[tuple[[<ast.Name object at 0x7da1b281b580>, <ast.Name object at 0x7da1b2819f00>]]] in starred[name[topf]] begin[:] <ast.Yield object at 0x7da1b2818670>
keyword[def] identifier[get_top_segmentations] ( identifier[table] , identifier[n] ): literal[string] identifier[stroke_count] = identifier[list] ( identifier[range] ( identifier[len] ( identifier[table] ))) identifier[topf] = identifier[TopFinder] ( identifier[n] ) keyword[for] identifier[curr_segmentation] keyword[in] identifier[all_segmentations] ( identifier[stroke_count] ): identifier[curr_seg_score] = identifier[score_segmentation] ( identifier[curr_segmentation] , identifier[table] ) identifier[topf] . identifier[push] ( identifier[curr_segmentation] , identifier[curr_seg_score] ) keyword[for] identifier[el] , identifier[score] keyword[in] identifier[topf] : keyword[yield] [ identifier[normalize_segmentation] ( identifier[el] ), identifier[score] ]
def get_top_segmentations(table, n): """ Parameters ---------- table : matrix of probabilities Each cell (i, j) of `table` gives the probability that i and j are in the same symbol. n : int Number of best segmentations which get returned """ stroke_count = list(range(len(table))) topf = TopFinder(n) for curr_segmentation in all_segmentations(stroke_count): curr_seg_score = score_segmentation(curr_segmentation, table) topf.push(curr_segmentation, curr_seg_score) # depends on [control=['for'], data=['curr_segmentation']] for (el, score) in topf: yield [normalize_segmentation(el), score] # depends on [control=['for'], data=[]]
def start(self, *args, **kw): """ Start the daemon """ # Check for a pidfile to see if the daemon already runs try: pidfile = file(self.pidfile, 'r') pid = int(pidfile.read().strip()) pidfile.close() except IOError: pid = None if pid: message = "pidfile %s already exist. Daemon already running?\n" sys.stderr.write(message % self.pidfile) sys.exit(1) # Start the daemon self.daemonize() self.run(*args, **kw)
def function[start, parameter[self]]: constant[ Start the daemon ] <ast.Try object at 0x7da18f58ff70> if name[pid] begin[:] variable[message] assign[=] constant[pidfile %s already exist. Daemon already running? ] call[name[sys].stderr.write, parameter[binary_operation[name[message] <ast.Mod object at 0x7da2590d6920> name[self].pidfile]]] call[name[sys].exit, parameter[constant[1]]] call[name[self].daemonize, parameter[]] call[name[self].run, parameter[<ast.Starred object at 0x7da1b254d0c0>]]
keyword[def] identifier[start] ( identifier[self] ,* identifier[args] ,** identifier[kw] ): literal[string] keyword[try] : identifier[pidfile] = identifier[file] ( identifier[self] . identifier[pidfile] , literal[string] ) identifier[pid] = identifier[int] ( identifier[pidfile] . identifier[read] (). identifier[strip] ()) identifier[pidfile] . identifier[close] () keyword[except] identifier[IOError] : identifier[pid] = keyword[None] keyword[if] identifier[pid] : identifier[message] = literal[string] identifier[sys] . identifier[stderr] . identifier[write] ( identifier[message] % identifier[self] . identifier[pidfile] ) identifier[sys] . identifier[exit] ( literal[int] ) identifier[self] . identifier[daemonize] () identifier[self] . identifier[run] (* identifier[args] ,** identifier[kw] )
def start(self, *args, **kw): """ Start the daemon """ # Check for a pidfile to see if the daemon already runs try: pidfile = file(self.pidfile, 'r') pid = int(pidfile.read().strip()) pidfile.close() # depends on [control=['try'], data=[]] except IOError: pid = None # depends on [control=['except'], data=[]] if pid: message = 'pidfile %s already exist. Daemon already running?\n' sys.stderr.write(message % self.pidfile) sys.exit(1) # depends on [control=['if'], data=[]] # Start the daemon self.daemonize() self.run(*args, **kw)
def download_file(filename, session): """ Downloads a file """ print('Downloading file %s' % filename) infilesource = os.path.join('sftp://' + ADDRESS + WORKING_DIR, filename) infiletarget = os.path.join(os.getcwd(), filename) incoming = saga.filesystem.File(infilesource, session=session, flags=OVERWRITE) incoming.copy(infiletarget) print('Transfer of `%s` to `%s` successful' % (filename, infiletarget))
def function[download_file, parameter[filename, session]]: constant[ Downloads a file ] call[name[print], parameter[binary_operation[constant[Downloading file %s] <ast.Mod object at 0x7da2590d6920> name[filename]]]] variable[infilesource] assign[=] call[name[os].path.join, parameter[binary_operation[binary_operation[constant[sftp://] + name[ADDRESS]] + name[WORKING_DIR]], name[filename]]] variable[infiletarget] assign[=] call[name[os].path.join, parameter[call[name[os].getcwd, parameter[]], name[filename]]] variable[incoming] assign[=] call[name[saga].filesystem.File, parameter[name[infilesource]]] call[name[incoming].copy, parameter[name[infiletarget]]] call[name[print], parameter[binary_operation[constant[Transfer of `%s` to `%s` successful] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0349ba0>, <ast.Name object at 0x7da1b034a650>]]]]]
keyword[def] identifier[download_file] ( identifier[filename] , identifier[session] ): literal[string] identifier[print] ( literal[string] % identifier[filename] ) identifier[infilesource] = identifier[os] . identifier[path] . identifier[join] ( literal[string] + identifier[ADDRESS] + identifier[WORKING_DIR] , identifier[filename] ) identifier[infiletarget] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[getcwd] (), identifier[filename] ) identifier[incoming] = identifier[saga] . identifier[filesystem] . identifier[File] ( identifier[infilesource] , identifier[session] = identifier[session] , identifier[flags] = identifier[OVERWRITE] ) identifier[incoming] . identifier[copy] ( identifier[infiletarget] ) identifier[print] ( literal[string] %( identifier[filename] , identifier[infiletarget] ))
def download_file(filename, session): """ Downloads a file """ print('Downloading file %s' % filename) infilesource = os.path.join('sftp://' + ADDRESS + WORKING_DIR, filename) infiletarget = os.path.join(os.getcwd(), filename) incoming = saga.filesystem.File(infilesource, session=session, flags=OVERWRITE) incoming.copy(infiletarget) print('Transfer of `%s` to `%s` successful' % (filename, infiletarget))
def _setup_catalog(portal, catalog_id, catalog_definition): """ Given a catalog definition it updates the indexes, columns and content_type definitions of the catalog. :portal: the Plone site object :catalog_id: a string as the catalog id :catalog_definition: a dictionary like { 'types': ['ContentType', ...], 'indexes': { 'UID': 'FieldIndex', ... }, 'columns': [ 'Title', ... ] } """ reindex = False catalog = getToolByName(portal, catalog_id, None) if catalog is None: logger.warning('Could not find the %s tool.' % (catalog_id)) return False # Indexes indexes_ids = catalog_definition.get('indexes', {}).keys() # Indexing for idx in indexes_ids: # The function returns if the index needs to be reindexed indexed = _addIndex(catalog, idx, catalog_definition['indexes'][idx]) reindex = True if indexed else reindex # Removing indexes in_catalog_idxs = catalog.indexes() to_remove = list(set(in_catalog_idxs)-set(indexes_ids)) for idx in to_remove: # The function returns if the index has been deleted desindexed = _delIndex(catalog, idx) reindex = True if desindexed else reindex # Columns columns_ids = catalog_definition.get('columns', []) for col in columns_ids: created = _addColumn(catalog, col) reindex = True if created else reindex # Removing columns in_catalog_cols = catalog.schema() to_remove = list(set(in_catalog_cols)-set(columns_ids)) for col in to_remove: # The function returns if the index has been deleted desindexed = _delColumn(catalog, col) reindex = True if desindexed else reindex return reindex
def function[_setup_catalog, parameter[portal, catalog_id, catalog_definition]]: constant[ Given a catalog definition it updates the indexes, columns and content_type definitions of the catalog. :portal: the Plone site object :catalog_id: a string as the catalog id :catalog_definition: a dictionary like { 'types': ['ContentType', ...], 'indexes': { 'UID': 'FieldIndex', ... }, 'columns': [ 'Title', ... ] } ] variable[reindex] assign[=] constant[False] variable[catalog] assign[=] call[name[getToolByName], parameter[name[portal], name[catalog_id], constant[None]]] if compare[name[catalog] is constant[None]] begin[:] call[name[logger].warning, parameter[binary_operation[constant[Could not find the %s tool.] <ast.Mod object at 0x7da2590d6920> name[catalog_id]]]] return[constant[False]] variable[indexes_ids] assign[=] call[call[name[catalog_definition].get, parameter[constant[indexes], dictionary[[], []]]].keys, parameter[]] for taget[name[idx]] in starred[name[indexes_ids]] begin[:] variable[indexed] assign[=] call[name[_addIndex], parameter[name[catalog], name[idx], call[call[name[catalog_definition]][constant[indexes]]][name[idx]]]] variable[reindex] assign[=] <ast.IfExp object at 0x7da207f981f0> variable[in_catalog_idxs] assign[=] call[name[catalog].indexes, parameter[]] variable[to_remove] assign[=] call[name[list], parameter[binary_operation[call[name[set], parameter[name[in_catalog_idxs]]] - call[name[set], parameter[name[indexes_ids]]]]]] for taget[name[idx]] in starred[name[to_remove]] begin[:] variable[desindexed] assign[=] call[name[_delIndex], parameter[name[catalog], name[idx]]] variable[reindex] assign[=] <ast.IfExp object at 0x7da207f9ba30> variable[columns_ids] assign[=] call[name[catalog_definition].get, parameter[constant[columns], list[[]]]] for taget[name[col]] in starred[name[columns_ids]] begin[:] variable[created] assign[=] call[name[_addColumn], parameter[name[catalog], name[col]]] variable[reindex] assign[=] <ast.IfExp object at 0x7da207f992d0> variable[in_catalog_cols] assign[=] call[name[catalog].schema, parameter[]] variable[to_remove] assign[=] call[name[list], parameter[binary_operation[call[name[set], parameter[name[in_catalog_cols]]] - call[name[set], parameter[name[columns_ids]]]]]] for taget[name[col]] in starred[name[to_remove]] begin[:] variable[desindexed] assign[=] call[name[_delColumn], parameter[name[catalog], name[col]]] variable[reindex] assign[=] <ast.IfExp object at 0x7da18eb56320> return[name[reindex]]
keyword[def] identifier[_setup_catalog] ( identifier[portal] , identifier[catalog_id] , identifier[catalog_definition] ): literal[string] identifier[reindex] = keyword[False] identifier[catalog] = identifier[getToolByName] ( identifier[portal] , identifier[catalog_id] , keyword[None] ) keyword[if] identifier[catalog] keyword[is] keyword[None] : identifier[logger] . identifier[warning] ( literal[string] %( identifier[catalog_id] )) keyword[return] keyword[False] identifier[indexes_ids] = identifier[catalog_definition] . identifier[get] ( literal[string] ,{}). identifier[keys] () keyword[for] identifier[idx] keyword[in] identifier[indexes_ids] : identifier[indexed] = identifier[_addIndex] ( identifier[catalog] , identifier[idx] , identifier[catalog_definition] [ literal[string] ][ identifier[idx] ]) identifier[reindex] = keyword[True] keyword[if] identifier[indexed] keyword[else] identifier[reindex] identifier[in_catalog_idxs] = identifier[catalog] . identifier[indexes] () identifier[to_remove] = identifier[list] ( identifier[set] ( identifier[in_catalog_idxs] )- identifier[set] ( identifier[indexes_ids] )) keyword[for] identifier[idx] keyword[in] identifier[to_remove] : identifier[desindexed] = identifier[_delIndex] ( identifier[catalog] , identifier[idx] ) identifier[reindex] = keyword[True] keyword[if] identifier[desindexed] keyword[else] identifier[reindex] identifier[columns_ids] = identifier[catalog_definition] . identifier[get] ( literal[string] ,[]) keyword[for] identifier[col] keyword[in] identifier[columns_ids] : identifier[created] = identifier[_addColumn] ( identifier[catalog] , identifier[col] ) identifier[reindex] = keyword[True] keyword[if] identifier[created] keyword[else] identifier[reindex] identifier[in_catalog_cols] = identifier[catalog] . identifier[schema] () identifier[to_remove] = identifier[list] ( identifier[set] ( identifier[in_catalog_cols] )- identifier[set] ( identifier[columns_ids] )) keyword[for] identifier[col] keyword[in] identifier[to_remove] : identifier[desindexed] = identifier[_delColumn] ( identifier[catalog] , identifier[col] ) identifier[reindex] = keyword[True] keyword[if] identifier[desindexed] keyword[else] identifier[reindex] keyword[return] identifier[reindex]
def _setup_catalog(portal, catalog_id, catalog_definition): """ Given a catalog definition it updates the indexes, columns and content_type definitions of the catalog. :portal: the Plone site object :catalog_id: a string as the catalog id :catalog_definition: a dictionary like { 'types': ['ContentType', ...], 'indexes': { 'UID': 'FieldIndex', ... }, 'columns': [ 'Title', ... ] } """ reindex = False catalog = getToolByName(portal, catalog_id, None) if catalog is None: logger.warning('Could not find the %s tool.' % catalog_id) return False # depends on [control=['if'], data=[]] # Indexes indexes_ids = catalog_definition.get('indexes', {}).keys() # Indexing for idx in indexes_ids: # The function returns if the index needs to be reindexed indexed = _addIndex(catalog, idx, catalog_definition['indexes'][idx]) reindex = True if indexed else reindex # depends on [control=['for'], data=['idx']] # Removing indexes in_catalog_idxs = catalog.indexes() to_remove = list(set(in_catalog_idxs) - set(indexes_ids)) for idx in to_remove: # The function returns if the index has been deleted desindexed = _delIndex(catalog, idx) reindex = True if desindexed else reindex # depends on [control=['for'], data=['idx']] # Columns columns_ids = catalog_definition.get('columns', []) for col in columns_ids: created = _addColumn(catalog, col) reindex = True if created else reindex # depends on [control=['for'], data=['col']] # Removing columns in_catalog_cols = catalog.schema() to_remove = list(set(in_catalog_cols) - set(columns_ids)) for col in to_remove: # The function returns if the index has been deleted desindexed = _delColumn(catalog, col) reindex = True if desindexed else reindex # depends on [control=['for'], data=['col']] return reindex
def validate(self, nonce): """Does the nonce exist and is it valid for the request?""" if self.debug: print("Checking nonce " + str(nonce),file=sys.stderr) try: opaque, ip, expiretime = self.get(nonce) #pylint: disable=unused-variable if expiretime < time.time(): if self.debug: print("Nonce expired",file=sys.stderr) self.remove(nonce) return False elif ip != flask.request.remote_addr: if self.debug: print("Nonce IP mismatch",file=sys.stderr) self.remove(nonce) return False else: return True except KeyError: if self.debug: print("Nonce " + nonce + " does not exist",file=sys.stderr) return False
def function[validate, parameter[self, nonce]]: constant[Does the nonce exist and is it valid for the request?] if name[self].debug begin[:] call[name[print], parameter[binary_operation[constant[Checking nonce ] + call[name[str], parameter[name[nonce]]]]]] <ast.Try object at 0x7da18c4ceb30>
keyword[def] identifier[validate] ( identifier[self] , identifier[nonce] ): literal[string] keyword[if] identifier[self] . identifier[debug] : identifier[print] ( literal[string] + identifier[str] ( identifier[nonce] ), identifier[file] = identifier[sys] . identifier[stderr] ) keyword[try] : identifier[opaque] , identifier[ip] , identifier[expiretime] = identifier[self] . identifier[get] ( identifier[nonce] ) keyword[if] identifier[expiretime] < identifier[time] . identifier[time] (): keyword[if] identifier[self] . identifier[debug] : identifier[print] ( literal[string] , identifier[file] = identifier[sys] . identifier[stderr] ) identifier[self] . identifier[remove] ( identifier[nonce] ) keyword[return] keyword[False] keyword[elif] identifier[ip] != identifier[flask] . identifier[request] . identifier[remote_addr] : keyword[if] identifier[self] . identifier[debug] : identifier[print] ( literal[string] , identifier[file] = identifier[sys] . identifier[stderr] ) identifier[self] . identifier[remove] ( identifier[nonce] ) keyword[return] keyword[False] keyword[else] : keyword[return] keyword[True] keyword[except] identifier[KeyError] : keyword[if] identifier[self] . identifier[debug] : identifier[print] ( literal[string] + identifier[nonce] + literal[string] , identifier[file] = identifier[sys] . identifier[stderr] ) keyword[return] keyword[False]
def validate(self, nonce): """Does the nonce exist and is it valid for the request?""" if self.debug: print('Checking nonce ' + str(nonce), file=sys.stderr) # depends on [control=['if'], data=[]] try: (opaque, ip, expiretime) = self.get(nonce) #pylint: disable=unused-variable if expiretime < time.time(): if self.debug: print('Nonce expired', file=sys.stderr) # depends on [control=['if'], data=[]] self.remove(nonce) return False # depends on [control=['if'], data=[]] elif ip != flask.request.remote_addr: if self.debug: print('Nonce IP mismatch', file=sys.stderr) # depends on [control=['if'], data=[]] self.remove(nonce) return False # depends on [control=['if'], data=[]] else: return True # depends on [control=['try'], data=[]] except KeyError: if self.debug: print('Nonce ' + nonce + ' does not exist', file=sys.stderr) # depends on [control=['if'], data=[]] return False # depends on [control=['except'], data=[]]
def _From(self, t): """ Handle "from xyz import foo, bar as baz". """ # fixme: Are From and ImportFrom handled differently? self._fill("from ") self._write(t.modname) self._write(" import ") for i, (name,asname) in enumerate(t.names): if i != 0: self._write(", ") self._write(name) if asname is not None: self._write(" as "+asname)
def function[_From, parameter[self, t]]: constant[ Handle "from xyz import foo, bar as baz". ] call[name[self]._fill, parameter[constant[from ]]] call[name[self]._write, parameter[name[t].modname]] call[name[self]._write, parameter[constant[ import ]]] for taget[tuple[[<ast.Name object at 0x7da1b12d9b40>, <ast.Tuple object at 0x7da1b12d9a50>]]] in starred[call[name[enumerate], parameter[name[t].names]]] begin[:] if compare[name[i] not_equal[!=] constant[0]] begin[:] call[name[self]._write, parameter[constant[, ]]] call[name[self]._write, parameter[name[name]]] if compare[name[asname] is_not constant[None]] begin[:] call[name[self]._write, parameter[binary_operation[constant[ as ] + name[asname]]]]
keyword[def] identifier[_From] ( identifier[self] , identifier[t] ): literal[string] identifier[self] . identifier[_fill] ( literal[string] ) identifier[self] . identifier[_write] ( identifier[t] . identifier[modname] ) identifier[self] . identifier[_write] ( literal[string] ) keyword[for] identifier[i] ,( identifier[name] , identifier[asname] ) keyword[in] identifier[enumerate] ( identifier[t] . identifier[names] ): keyword[if] identifier[i] != literal[int] : identifier[self] . identifier[_write] ( literal[string] ) identifier[self] . identifier[_write] ( identifier[name] ) keyword[if] identifier[asname] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[_write] ( literal[string] + identifier[asname] )
def _From(self, t): """ Handle "from xyz import foo, bar as baz". """ # fixme: Are From and ImportFrom handled differently? self._fill('from ') self._write(t.modname) self._write(' import ') for (i, (name, asname)) in enumerate(t.names): if i != 0: self._write(', ') # depends on [control=['if'], data=[]] self._write(name) if asname is not None: self._write(' as ' + asname) # depends on [control=['if'], data=['asname']] # depends on [control=['for'], data=[]]
def make(game, state=State.DEFAULT, inttype=retro.data.Integrations.DEFAULT, **kwargs): """ Create a Gym environment for the specified game """ try: retro.data.get_romfile_path(game, inttype) except FileNotFoundError: if not retro.data.get_file_path(game, "rom.sha", inttype): raise else: raise FileNotFoundError('Game not found: %s. Did you make sure to import the ROM?' % game) return RetroEnv(game, state, inttype=inttype, **kwargs)
def function[make, parameter[game, state, inttype]]: constant[ Create a Gym environment for the specified game ] <ast.Try object at 0x7da18c4cd6f0> return[call[name[RetroEnv], parameter[name[game], name[state]]]]
keyword[def] identifier[make] ( identifier[game] , identifier[state] = identifier[State] . identifier[DEFAULT] , identifier[inttype] = identifier[retro] . identifier[data] . identifier[Integrations] . identifier[DEFAULT] ,** identifier[kwargs] ): literal[string] keyword[try] : identifier[retro] . identifier[data] . identifier[get_romfile_path] ( identifier[game] , identifier[inttype] ) keyword[except] identifier[FileNotFoundError] : keyword[if] keyword[not] identifier[retro] . identifier[data] . identifier[get_file_path] ( identifier[game] , literal[string] , identifier[inttype] ): keyword[raise] keyword[else] : keyword[raise] identifier[FileNotFoundError] ( literal[string] % identifier[game] ) keyword[return] identifier[RetroEnv] ( identifier[game] , identifier[state] , identifier[inttype] = identifier[inttype] ,** identifier[kwargs] )
def make(game, state=State.DEFAULT, inttype=retro.data.Integrations.DEFAULT, **kwargs): """ Create a Gym environment for the specified game """ try: retro.data.get_romfile_path(game, inttype) # depends on [control=['try'], data=[]] except FileNotFoundError: if not retro.data.get_file_path(game, 'rom.sha', inttype): raise # depends on [control=['if'], data=[]] else: raise FileNotFoundError('Game not found: %s. Did you make sure to import the ROM?' % game) # depends on [control=['except'], data=[]] return RetroEnv(game, state, inttype=inttype, **kwargs)
async def post_data(self, path, data=None, headers=None, timeout=None): """Perform a POST request.""" url = self.base_url + path _LOGGER.debug('POST URL: %s', url) self._log_data(data, False) resp = None try: resp = await self._session.post( url, headers=headers, data=data, timeout=DEFAULT_TIMEOUT if timeout is None else timeout) if resp.content_length is not None: resp_data = await resp.read() else: resp_data = None self._log_data(resp_data, True) return resp_data, resp.status except Exception as ex: if resp is not None: resp.close() raise ex finally: if resp is not None: await resp.release()
<ast.AsyncFunctionDef object at 0x7da18f720dc0>
keyword[async] keyword[def] identifier[post_data] ( identifier[self] , identifier[path] , identifier[data] = keyword[None] , identifier[headers] = keyword[None] , identifier[timeout] = keyword[None] ): literal[string] identifier[url] = identifier[self] . identifier[base_url] + identifier[path] identifier[_LOGGER] . identifier[debug] ( literal[string] , identifier[url] ) identifier[self] . identifier[_log_data] ( identifier[data] , keyword[False] ) identifier[resp] = keyword[None] keyword[try] : identifier[resp] = keyword[await] identifier[self] . identifier[_session] . identifier[post] ( identifier[url] , identifier[headers] = identifier[headers] , identifier[data] = identifier[data] , identifier[timeout] = identifier[DEFAULT_TIMEOUT] keyword[if] identifier[timeout] keyword[is] keyword[None] keyword[else] identifier[timeout] ) keyword[if] identifier[resp] . identifier[content_length] keyword[is] keyword[not] keyword[None] : identifier[resp_data] = keyword[await] identifier[resp] . identifier[read] () keyword[else] : identifier[resp_data] = keyword[None] identifier[self] . identifier[_log_data] ( identifier[resp_data] , keyword[True] ) keyword[return] identifier[resp_data] , identifier[resp] . identifier[status] keyword[except] identifier[Exception] keyword[as] identifier[ex] : keyword[if] identifier[resp] keyword[is] keyword[not] keyword[None] : identifier[resp] . identifier[close] () keyword[raise] identifier[ex] keyword[finally] : keyword[if] identifier[resp] keyword[is] keyword[not] keyword[None] : keyword[await] identifier[resp] . identifier[release] ()
async def post_data(self, path, data=None, headers=None, timeout=None): """Perform a POST request.""" url = self.base_url + path _LOGGER.debug('POST URL: %s', url) self._log_data(data, False) resp = None try: resp = await self._session.post(url, headers=headers, data=data, timeout=DEFAULT_TIMEOUT if timeout is None else timeout) if resp.content_length is not None: resp_data = await resp.read() # depends on [control=['if'], data=[]] else: resp_data = None self._log_data(resp_data, True) return (resp_data, resp.status) # depends on [control=['try'], data=[]] except Exception as ex: if resp is not None: resp.close() # depends on [control=['if'], data=['resp']] raise ex # depends on [control=['except'], data=['ex']] finally: if resp is not None: await resp.release() # depends on [control=['if'], data=['resp']]
def search_point(self, lat, lng, filters=None, startDate=None, endDate=None, types=None, type=None): ''' Perform a catalog search over a specific point, specified by lat,lng Args: lat: latitude lng: longitude filters: Array of filters. Optional. Example: [ "(sensorPlatformName = 'WORLDVIEW01' OR sensorPlatformName ='QUICKBIRD02')", "cloudCover < 10", "offNadirAngle < 10" ] startDate: string. Optional. Example: "2004-01-01T00:00:00.000Z" endDate: string. Optional. Example: "2004-01-01T00:00:00.000Z" types: Array of types to search for. Optional. Example (and default): ["Acquisition"] Returns: catalog search resultset ''' searchAreaWkt = "POLYGON ((%s %s, %s %s, %s %s, %s %s, %s %s))" % (lng, lat,lng,lat,lng,lat,lng,lat,lng,lat) return self.search(searchAreaWkt=searchAreaWkt, filters=filters, startDate=startDate, endDate=endDate, types=types)
def function[search_point, parameter[self, lat, lng, filters, startDate, endDate, types, type]]: constant[ Perform a catalog search over a specific point, specified by lat,lng Args: lat: latitude lng: longitude filters: Array of filters. Optional. Example: [ "(sensorPlatformName = 'WORLDVIEW01' OR sensorPlatformName ='QUICKBIRD02')", "cloudCover < 10", "offNadirAngle < 10" ] startDate: string. Optional. Example: "2004-01-01T00:00:00.000Z" endDate: string. Optional. Example: "2004-01-01T00:00:00.000Z" types: Array of types to search for. Optional. Example (and default): ["Acquisition"] Returns: catalog search resultset ] variable[searchAreaWkt] assign[=] binary_operation[constant[POLYGON ((%s %s, %s %s, %s %s, %s %s, %s %s))] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b01411e0>, <ast.Name object at 0x7da1b01404f0>, <ast.Name object at 0x7da1b0142560>, <ast.Name object at 0x7da1b0142710>, <ast.Name object at 0x7da1b0140910>, <ast.Name object at 0x7da1b0142dd0>, <ast.Name object at 0x7da1b01405b0>, <ast.Name object at 0x7da1b0142d70>, <ast.Name object at 0x7da1b0142aa0>, <ast.Name object at 0x7da1b0142b60>]]] return[call[name[self].search, parameter[]]]
keyword[def] identifier[search_point] ( identifier[self] , identifier[lat] , identifier[lng] , identifier[filters] = keyword[None] , identifier[startDate] = keyword[None] , identifier[endDate] = keyword[None] , identifier[types] = keyword[None] , identifier[type] = keyword[None] ): literal[string] identifier[searchAreaWkt] = literal[string] %( identifier[lng] , identifier[lat] , identifier[lng] , identifier[lat] , identifier[lng] , identifier[lat] , identifier[lng] , identifier[lat] , identifier[lng] , identifier[lat] ) keyword[return] identifier[self] . identifier[search] ( identifier[searchAreaWkt] = identifier[searchAreaWkt] , identifier[filters] = identifier[filters] , identifier[startDate] = identifier[startDate] , identifier[endDate] = identifier[endDate] , identifier[types] = identifier[types] )
def search_point(self, lat, lng, filters=None, startDate=None, endDate=None, types=None, type=None): """ Perform a catalog search over a specific point, specified by lat,lng Args: lat: latitude lng: longitude filters: Array of filters. Optional. Example: [ "(sensorPlatformName = 'WORLDVIEW01' OR sensorPlatformName ='QUICKBIRD02')", "cloudCover < 10", "offNadirAngle < 10" ] startDate: string. Optional. Example: "2004-01-01T00:00:00.000Z" endDate: string. Optional. Example: "2004-01-01T00:00:00.000Z" types: Array of types to search for. Optional. Example (and default): ["Acquisition"] Returns: catalog search resultset """ searchAreaWkt = 'POLYGON ((%s %s, %s %s, %s %s, %s %s, %s %s))' % (lng, lat, lng, lat, lng, lat, lng, lat, lng, lat) return self.search(searchAreaWkt=searchAreaWkt, filters=filters, startDate=startDate, endDate=endDate, types=types)
def _raw_sentences(paracrawl_file): """Generates Unicode strings, one for each <seg> in a ParaCrawl data file. Also decodes some of the most common HTML entities found in ParaCrawl data. Args: paracrawl_file: A ParaCrawl V3.0 en-.. data file. Yields: One Unicode string for each <seg> element in the ParaCrawl data file. """ for line_utf8 in paracrawl_file: line_uni = line_utf8.decode('UTF-8') text_match = re.match(r' +<seg>(.*)</seg>$', line_uni) if text_match: txt = text_match.group(1) txt = re.sub(r'&amp;', r'&', txt) txt = re.sub(r'& ?amp;', r'&', txt) txt = re.sub(r'& ?apos;', r"'", txt) txt = re.sub(r'& ?quot;', r'"', txt) txt = re.sub(r'& ?lt;', r'<', txt) txt = re.sub(r'& ?gt;', r'>', txt) yield txt
def function[_raw_sentences, parameter[paracrawl_file]]: constant[Generates Unicode strings, one for each <seg> in a ParaCrawl data file. Also decodes some of the most common HTML entities found in ParaCrawl data. Args: paracrawl_file: A ParaCrawl V3.0 en-.. data file. Yields: One Unicode string for each <seg> element in the ParaCrawl data file. ] for taget[name[line_utf8]] in starred[name[paracrawl_file]] begin[:] variable[line_uni] assign[=] call[name[line_utf8].decode, parameter[constant[UTF-8]]] variable[text_match] assign[=] call[name[re].match, parameter[constant[ +<seg>(.*)</seg>$], name[line_uni]]] if name[text_match] begin[:] variable[txt] assign[=] call[name[text_match].group, parameter[constant[1]]] variable[txt] assign[=] call[name[re].sub, parameter[constant[&amp;], constant[&], name[txt]]] variable[txt] assign[=] call[name[re].sub, parameter[constant[& ?amp;], constant[&], name[txt]]] variable[txt] assign[=] call[name[re].sub, parameter[constant[& ?apos;], constant['], name[txt]]] variable[txt] assign[=] call[name[re].sub, parameter[constant[& ?quot;], constant["], name[txt]]] variable[txt] assign[=] call[name[re].sub, parameter[constant[& ?lt;], constant[<], name[txt]]] variable[txt] assign[=] call[name[re].sub, parameter[constant[& ?gt;], constant[>], name[txt]]] <ast.Yield object at 0x7da1b208a4d0>
keyword[def] identifier[_raw_sentences] ( identifier[paracrawl_file] ): literal[string] keyword[for] identifier[line_utf8] keyword[in] identifier[paracrawl_file] : identifier[line_uni] = identifier[line_utf8] . identifier[decode] ( literal[string] ) identifier[text_match] = identifier[re] . identifier[match] ( literal[string] , identifier[line_uni] ) keyword[if] identifier[text_match] : identifier[txt] = identifier[text_match] . identifier[group] ( literal[int] ) identifier[txt] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[txt] ) identifier[txt] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[txt] ) identifier[txt] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[txt] ) identifier[txt] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[txt] ) identifier[txt] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[txt] ) identifier[txt] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[txt] ) keyword[yield] identifier[txt]
def _raw_sentences(paracrawl_file): """Generates Unicode strings, one for each <seg> in a ParaCrawl data file. Also decodes some of the most common HTML entities found in ParaCrawl data. Args: paracrawl_file: A ParaCrawl V3.0 en-.. data file. Yields: One Unicode string for each <seg> element in the ParaCrawl data file. """ for line_utf8 in paracrawl_file: line_uni = line_utf8.decode('UTF-8') text_match = re.match(' +<seg>(.*)</seg>$', line_uni) if text_match: txt = text_match.group(1) txt = re.sub('&amp;', '&', txt) txt = re.sub('& ?amp;', '&', txt) txt = re.sub('& ?apos;', "'", txt) txt = re.sub('& ?quot;', '"', txt) txt = re.sub('& ?lt;', '<', txt) txt = re.sub('& ?gt;', '>', txt) yield txt # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line_utf8']]
def add(self, files, items): """ Add a list of files with a reference to a list of objects. """ if isinstance(files, (str, bytes)): files = iter([files]) for pathname in files: try: values = self._filemap[pathname] except KeyError: self._filemap[pathname] = items else: values.extend(items)
def function[add, parameter[self, files, items]]: constant[ Add a list of files with a reference to a list of objects. ] if call[name[isinstance], parameter[name[files], tuple[[<ast.Name object at 0x7da18eb55870>, <ast.Name object at 0x7da18eb57760>]]]] begin[:] variable[files] assign[=] call[name[iter], parameter[list[[<ast.Name object at 0x7da18eb55ba0>]]]] for taget[name[pathname]] in starred[name[files]] begin[:] <ast.Try object at 0x7da18eb56ce0>
keyword[def] identifier[add] ( identifier[self] , identifier[files] , identifier[items] ): literal[string] keyword[if] identifier[isinstance] ( identifier[files] ,( identifier[str] , identifier[bytes] )): identifier[files] = identifier[iter] ([ identifier[files] ]) keyword[for] identifier[pathname] keyword[in] identifier[files] : keyword[try] : identifier[values] = identifier[self] . identifier[_filemap] [ identifier[pathname] ] keyword[except] identifier[KeyError] : identifier[self] . identifier[_filemap] [ identifier[pathname] ]= identifier[items] keyword[else] : identifier[values] . identifier[extend] ( identifier[items] )
def add(self, files, items): """ Add a list of files with a reference to a list of objects. """ if isinstance(files, (str, bytes)): files = iter([files]) # depends on [control=['if'], data=[]] for pathname in files: try: values = self._filemap[pathname] # depends on [control=['try'], data=[]] except KeyError: self._filemap[pathname] = items # depends on [control=['except'], data=[]] else: values.extend(items) # depends on [control=['for'], data=['pathname']]
def scatter_drag( x_points: 'Array', y_points: 'Array', *, fig=None, show_eqn=True, options={} ): """ Generates an interactive scatter plot with the best fit line plotted over the points. The points can be dragged by the user and the line will automatically update. Args: x_points (Array Number): x-values of points to plot y_points (Array Number): y-values of points to plot Kwargs: show_eqn (bool): If True (default), displays the best fit line's equation above the scatterplot. {options} Returns: VBox with two children: the equation widget and the figure. >>> xs = np.arange(10) >>> ys = np.arange(10) + np.random.rand(10) >>> scatter_drag(xs, ys) VBox(...) """ params = { 'marks': [{ 'x': x_points, 'y': y_points, 'enable_move': True, }, { 'colors': [GOLDENROD], }] } fig = options.get('_fig', False) or _create_fig(options=options) [scat, lin] = _create_marks( fig=fig, marks=[bq.Scatter, bq.Lines], options=options, params=params ) _add_marks(fig, [scat, lin]) equation = widgets.Label() # create line fit to data and display equation def update_line(change=None): x_sc = scat.scales['x'] lin.x = [ x_sc.min if x_sc.min is not None else np.min(scat.x), x_sc.max if x_sc.max is not None else np.max(scat.x), ] poly = np.polyfit(scat.x, scat.y, deg=1) lin.y = np.polyval(poly, lin.x) if show_eqn: equation.value = 'y = {:.2f}x + {:.2f}'.format(poly[0], poly[1]) update_line() scat.observe(update_line, names=['x', 'y']) return widgets.VBox([equation, fig])
def function[scatter_drag, parameter[x_points, y_points]]: constant[ Generates an interactive scatter plot with the best fit line plotted over the points. The points can be dragged by the user and the line will automatically update. Args: x_points (Array Number): x-values of points to plot y_points (Array Number): y-values of points to plot Kwargs: show_eqn (bool): If True (default), displays the best fit line's equation above the scatterplot. {options} Returns: VBox with two children: the equation widget and the figure. >>> xs = np.arange(10) >>> ys = np.arange(10) + np.random.rand(10) >>> scatter_drag(xs, ys) VBox(...) ] variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b19c1990>], [<ast.List object at 0x7da1b19c1d80>]] variable[fig] assign[=] <ast.BoolOp object at 0x7da1b19c0790> <ast.List object at 0x7da1b19c0070> assign[=] call[name[_create_marks], parameter[]] call[name[_add_marks], parameter[name[fig], list[[<ast.Name object at 0x7da1b1951a50>, <ast.Name object at 0x7da1b1951ed0>]]]] variable[equation] assign[=] call[name[widgets].Label, parameter[]] def function[update_line, parameter[change]]: variable[x_sc] assign[=] call[name[scat].scales][constant[x]] name[lin].x assign[=] list[[<ast.IfExp object at 0x7da1b19ccdc0>, <ast.IfExp object at 0x7da1b19ce6e0>]] variable[poly] assign[=] call[name[np].polyfit, parameter[name[scat].x, name[scat].y]] name[lin].y assign[=] call[name[np].polyval, parameter[name[poly], name[lin].x]] if name[show_eqn] begin[:] name[equation].value assign[=] call[constant[y = {:.2f}x + {:.2f}].format, parameter[call[name[poly]][constant[0]], call[name[poly]][constant[1]]]] call[name[update_line], parameter[]] call[name[scat].observe, parameter[name[update_line]]] return[call[name[widgets].VBox, parameter[list[[<ast.Name object at 0x7da1b19ccfd0>, <ast.Name object at 0x7da1b19cd360>]]]]]
keyword[def] identifier[scatter_drag] ( identifier[x_points] : literal[string] , identifier[y_points] : literal[string] , *, identifier[fig] = keyword[None] , identifier[show_eqn] = keyword[True] , identifier[options] ={} ): literal[string] identifier[params] ={ literal[string] :[{ literal[string] : identifier[x_points] , literal[string] : identifier[y_points] , literal[string] : keyword[True] , },{ literal[string] :[ identifier[GOLDENROD] ], }] } identifier[fig] = identifier[options] . identifier[get] ( literal[string] , keyword[False] ) keyword[or] identifier[_create_fig] ( identifier[options] = identifier[options] ) [ identifier[scat] , identifier[lin] ]= identifier[_create_marks] ( identifier[fig] = identifier[fig] , identifier[marks] =[ identifier[bq] . identifier[Scatter] , identifier[bq] . identifier[Lines] ], identifier[options] = identifier[options] , identifier[params] = identifier[params] ) identifier[_add_marks] ( identifier[fig] ,[ identifier[scat] , identifier[lin] ]) identifier[equation] = identifier[widgets] . identifier[Label] () keyword[def] identifier[update_line] ( identifier[change] = keyword[None] ): identifier[x_sc] = identifier[scat] . identifier[scales] [ literal[string] ] identifier[lin] . identifier[x] =[ identifier[x_sc] . identifier[min] keyword[if] identifier[x_sc] . identifier[min] keyword[is] keyword[not] keyword[None] keyword[else] identifier[np] . identifier[min] ( identifier[scat] . identifier[x] ), identifier[x_sc] . identifier[max] keyword[if] identifier[x_sc] . identifier[max] keyword[is] keyword[not] keyword[None] keyword[else] identifier[np] . identifier[max] ( identifier[scat] . identifier[x] ), ] identifier[poly] = identifier[np] . identifier[polyfit] ( identifier[scat] . identifier[x] , identifier[scat] . identifier[y] , identifier[deg] = literal[int] ) identifier[lin] . identifier[y] = identifier[np] . identifier[polyval] ( identifier[poly] , identifier[lin] . identifier[x] ) keyword[if] identifier[show_eqn] : identifier[equation] . identifier[value] = literal[string] . identifier[format] ( identifier[poly] [ literal[int] ], identifier[poly] [ literal[int] ]) identifier[update_line] () identifier[scat] . identifier[observe] ( identifier[update_line] , identifier[names] =[ literal[string] , literal[string] ]) keyword[return] identifier[widgets] . identifier[VBox] ([ identifier[equation] , identifier[fig] ])
def scatter_drag(x_points: 'Array', y_points: 'Array', *, fig=None, show_eqn=True, options={}): """ Generates an interactive scatter plot with the best fit line plotted over the points. The points can be dragged by the user and the line will automatically update. Args: x_points (Array Number): x-values of points to plot y_points (Array Number): y-values of points to plot Kwargs: show_eqn (bool): If True (default), displays the best fit line's equation above the scatterplot. {options} Returns: VBox with two children: the equation widget and the figure. >>> xs = np.arange(10) >>> ys = np.arange(10) + np.random.rand(10) >>> scatter_drag(xs, ys) VBox(...) """ params = {'marks': [{'x': x_points, 'y': y_points, 'enable_move': True}, {'colors': [GOLDENROD]}]} fig = options.get('_fig', False) or _create_fig(options=options) [scat, lin] = _create_marks(fig=fig, marks=[bq.Scatter, bq.Lines], options=options, params=params) _add_marks(fig, [scat, lin]) equation = widgets.Label() # create line fit to data and display equation def update_line(change=None): x_sc = scat.scales['x'] lin.x = [x_sc.min if x_sc.min is not None else np.min(scat.x), x_sc.max if x_sc.max is not None else np.max(scat.x)] poly = np.polyfit(scat.x, scat.y, deg=1) lin.y = np.polyval(poly, lin.x) if show_eqn: equation.value = 'y = {:.2f}x + {:.2f}'.format(poly[0], poly[1]) # depends on [control=['if'], data=[]] update_line() scat.observe(update_line, names=['x', 'y']) return widgets.VBox([equation, fig])
def transaction_manager(fn): """ Decorator which wraps whole function into ``with transaction.manager:``. """ @wraps(fn) def transaction_manager_decorator(*args, **kwargs): with transaction.manager: return fn(*args, **kwargs) return transaction_manager_decorator
def function[transaction_manager, parameter[fn]]: constant[ Decorator which wraps whole function into ``with transaction.manager:``. ] def function[transaction_manager_decorator, parameter[]]: with name[transaction].manager begin[:] return[call[name[fn], parameter[<ast.Starred object at 0x7da2041d9300>]]] return[name[transaction_manager_decorator]]
keyword[def] identifier[transaction_manager] ( identifier[fn] ): literal[string] @ identifier[wraps] ( identifier[fn] ) keyword[def] identifier[transaction_manager_decorator] (* identifier[args] ,** identifier[kwargs] ): keyword[with] identifier[transaction] . identifier[manager] : keyword[return] identifier[fn] (* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[transaction_manager_decorator]
def transaction_manager(fn): """ Decorator which wraps whole function into ``with transaction.manager:``. """ @wraps(fn) def transaction_manager_decorator(*args, **kwargs): with transaction.manager: return fn(*args, **kwargs) # depends on [control=['with'], data=[]] return transaction_manager_decorator
def deal_list_query(self, code="", trd_env=TrdEnv.REAL, acc_id=0, acc_index=0): """for querying deal list""" ret, msg = self._check_trd_env(trd_env) if ret != RET_OK: return ret, msg ret, msg, acc_id = self._check_acc_id_and_acc_index(trd_env, acc_id, acc_index) if ret != RET_OK: return ret, msg ret, msg, stock_code = self._check_stock_code(code) if ret != RET_OK: return ret, msg query_processor = self._get_sync_query_processor( DealListQuery.pack_req, DealListQuery.unpack_rsp) kargs = { 'code': stock_code, 'trd_mkt': self.__trd_mkt, 'trd_env': trd_env, 'acc_id': acc_id, 'conn_id': self.get_sync_conn_id() } ret_code, msg, deal_list = query_processor(**kargs) if ret_code != RET_OK: return RET_ERROR, msg col_list = [ "code", "stock_name", "deal_id", "order_id", "qty", "price", "trd_side", "create_time", "counter_broker_id", "counter_broker_name" ] deal_list_table = pd.DataFrame(deal_list, columns=col_list) return RET_OK, deal_list_table
def function[deal_list_query, parameter[self, code, trd_env, acc_id, acc_index]]: constant[for querying deal list] <ast.Tuple object at 0x7da20c991bd0> assign[=] call[name[self]._check_trd_env, parameter[name[trd_env]]] if compare[name[ret] not_equal[!=] name[RET_OK]] begin[:] return[tuple[[<ast.Name object at 0x7da20c990b80>, <ast.Name object at 0x7da20c993640>]]] <ast.Tuple object at 0x7da20c990b20> assign[=] call[name[self]._check_acc_id_and_acc_index, parameter[name[trd_env], name[acc_id], name[acc_index]]] if compare[name[ret] not_equal[!=] name[RET_OK]] begin[:] return[tuple[[<ast.Name object at 0x7da20c992ce0>, <ast.Name object at 0x7da20c991c00>]]] <ast.Tuple object at 0x7da20c990e50> assign[=] call[name[self]._check_stock_code, parameter[name[code]]] if compare[name[ret] not_equal[!=] name[RET_OK]] begin[:] return[tuple[[<ast.Name object at 0x7da20c9936d0>, <ast.Name object at 0x7da20c991240>]]] variable[query_processor] assign[=] call[name[self]._get_sync_query_processor, parameter[name[DealListQuery].pack_req, name[DealListQuery].unpack_rsp]] variable[kargs] assign[=] dictionary[[<ast.Constant object at 0x7da20c993670>, <ast.Constant object at 0x7da20c9903a0>, <ast.Constant object at 0x7da20c990430>, <ast.Constant object at 0x7da20c990130>, <ast.Constant object at 0x7da20c990b50>], [<ast.Name object at 0x7da20c992b00>, <ast.Attribute object at 0x7da20c991ff0>, <ast.Name object at 0x7da20c992200>, <ast.Name object at 0x7da20c990310>, <ast.Call object at 0x7da20c990760>]] <ast.Tuple object at 0x7da20c993520> assign[=] call[name[query_processor], parameter[]] if compare[name[ret_code] not_equal[!=] name[RET_OK]] begin[:] return[tuple[[<ast.Name object at 0x7da20c990040>, <ast.Name object at 0x7da20c993ca0>]]] variable[col_list] assign[=] list[[<ast.Constant object at 0x7da20c9922f0>, <ast.Constant object at 0x7da20c993c10>, <ast.Constant object at 0x7da20c992ad0>, <ast.Constant object at 0x7da20c990100>, <ast.Constant object at 0x7da20c993160>, <ast.Constant object at 0x7da20c992680>, <ast.Constant object at 0x7da20c9923e0>, <ast.Constant object at 0x7da20c992320>, <ast.Constant object at 0x7da20c9909d0>, <ast.Constant object at 0x7da20c993760>]] variable[deal_list_table] assign[=] call[name[pd].DataFrame, parameter[name[deal_list]]] return[tuple[[<ast.Name object at 0x7da20c992a40>, <ast.Name object at 0x7da20c990e80>]]]
keyword[def] identifier[deal_list_query] ( identifier[self] , identifier[code] = literal[string] , identifier[trd_env] = identifier[TrdEnv] . identifier[REAL] , identifier[acc_id] = literal[int] , identifier[acc_index] = literal[int] ): literal[string] identifier[ret] , identifier[msg] = identifier[self] . identifier[_check_trd_env] ( identifier[trd_env] ) keyword[if] identifier[ret] != identifier[RET_OK] : keyword[return] identifier[ret] , identifier[msg] identifier[ret] , identifier[msg] , identifier[acc_id] = identifier[self] . identifier[_check_acc_id_and_acc_index] ( identifier[trd_env] , identifier[acc_id] , identifier[acc_index] ) keyword[if] identifier[ret] != identifier[RET_OK] : keyword[return] identifier[ret] , identifier[msg] identifier[ret] , identifier[msg] , identifier[stock_code] = identifier[self] . identifier[_check_stock_code] ( identifier[code] ) keyword[if] identifier[ret] != identifier[RET_OK] : keyword[return] identifier[ret] , identifier[msg] identifier[query_processor] = identifier[self] . identifier[_get_sync_query_processor] ( identifier[DealListQuery] . identifier[pack_req] , identifier[DealListQuery] . identifier[unpack_rsp] ) identifier[kargs] ={ literal[string] : identifier[stock_code] , literal[string] : identifier[self] . identifier[__trd_mkt] , literal[string] : identifier[trd_env] , literal[string] : identifier[acc_id] , literal[string] : identifier[self] . identifier[get_sync_conn_id] () } identifier[ret_code] , identifier[msg] , identifier[deal_list] = identifier[query_processor] (** identifier[kargs] ) keyword[if] identifier[ret_code] != identifier[RET_OK] : keyword[return] identifier[RET_ERROR] , identifier[msg] identifier[col_list] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] identifier[deal_list_table] = identifier[pd] . identifier[DataFrame] ( identifier[deal_list] , identifier[columns] = identifier[col_list] ) keyword[return] identifier[RET_OK] , identifier[deal_list_table]
def deal_list_query(self, code='', trd_env=TrdEnv.REAL, acc_id=0, acc_index=0): """for querying deal list""" (ret, msg) = self._check_trd_env(trd_env) if ret != RET_OK: return (ret, msg) # depends on [control=['if'], data=['ret']] (ret, msg, acc_id) = self._check_acc_id_and_acc_index(trd_env, acc_id, acc_index) if ret != RET_OK: return (ret, msg) # depends on [control=['if'], data=['ret']] (ret, msg, stock_code) = self._check_stock_code(code) if ret != RET_OK: return (ret, msg) # depends on [control=['if'], data=['ret']] query_processor = self._get_sync_query_processor(DealListQuery.pack_req, DealListQuery.unpack_rsp) kargs = {'code': stock_code, 'trd_mkt': self.__trd_mkt, 'trd_env': trd_env, 'acc_id': acc_id, 'conn_id': self.get_sync_conn_id()} (ret_code, msg, deal_list) = query_processor(**kargs) if ret_code != RET_OK: return (RET_ERROR, msg) # depends on [control=['if'], data=[]] col_list = ['code', 'stock_name', 'deal_id', 'order_id', 'qty', 'price', 'trd_side', 'create_time', 'counter_broker_id', 'counter_broker_name'] deal_list_table = pd.DataFrame(deal_list, columns=col_list) return (RET_OK, deal_list_table)
def density(self): """ The percent of non- ``fill_value`` points, as decimal. Examples -------- >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0) >>> s.density 0.6 """ r = float(self.sp_index.npoints) / float(self.sp_index.length) return r
def function[density, parameter[self]]: constant[ The percent of non- ``fill_value`` points, as decimal. Examples -------- >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0) >>> s.density 0.6 ] variable[r] assign[=] binary_operation[call[name[float], parameter[name[self].sp_index.npoints]] / call[name[float], parameter[name[self].sp_index.length]]] return[name[r]]
keyword[def] identifier[density] ( identifier[self] ): literal[string] identifier[r] = identifier[float] ( identifier[self] . identifier[sp_index] . identifier[npoints] )/ identifier[float] ( identifier[self] . identifier[sp_index] . identifier[length] ) keyword[return] identifier[r]
def density(self): """ The percent of non- ``fill_value`` points, as decimal. Examples -------- >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0) >>> s.density 0.6 """ r = float(self.sp_index.npoints) / float(self.sp_index.length) return r
def find_natural_neighbors(tri, grid_points): r"""Return the natural neighbor triangles for each given grid cell. These are determined by the properties of the given delaunay triangulation. A triangle is a natural neighbor of a grid cell if that triangles circumcenter is within the circumradius of the grid cell center. Parameters ---------- tri: Object A Delaunay Triangulation. grid_points: (X, Y) ndarray Locations of grids. Returns ------- members: dictionary List of simplex codes for natural neighbor triangles in 'tri' for each grid cell. triangle_info: dictionary Circumcenter and radius information for each triangle in 'tri'. """ tree = cKDTree(grid_points) in_triangulation = tri.find_simplex(tree.data) >= 0 triangle_info = {} members = {key: [] for key in range(len(tree.data))} for i, simplices in enumerate(tri.simplices): ps = tri.points[simplices] cc = circumcenter(*ps) r = circumcircle_radius(*ps) triangle_info[i] = {'cc': cc, 'r': r} qualifiers = tree.query_ball_point(cc, r) for qualifier in qualifiers: if in_triangulation[qualifier]: members[qualifier].append(i) return members, triangle_info
def function[find_natural_neighbors, parameter[tri, grid_points]]: constant[Return the natural neighbor triangles for each given grid cell. These are determined by the properties of the given delaunay triangulation. A triangle is a natural neighbor of a grid cell if that triangles circumcenter is within the circumradius of the grid cell center. Parameters ---------- tri: Object A Delaunay Triangulation. grid_points: (X, Y) ndarray Locations of grids. Returns ------- members: dictionary List of simplex codes for natural neighbor triangles in 'tri' for each grid cell. triangle_info: dictionary Circumcenter and radius information for each triangle in 'tri'. ] variable[tree] assign[=] call[name[cKDTree], parameter[name[grid_points]]] variable[in_triangulation] assign[=] compare[call[name[tri].find_simplex, parameter[name[tree].data]] greater_or_equal[>=] constant[0]] variable[triangle_info] assign[=] dictionary[[], []] variable[members] assign[=] <ast.DictComp object at 0x7da1b1d34dc0> for taget[tuple[[<ast.Name object at 0x7da1b1d36980>, <ast.Name object at 0x7da1b1d34880>]]] in starred[call[name[enumerate], parameter[name[tri].simplices]]] begin[:] variable[ps] assign[=] call[name[tri].points][name[simplices]] variable[cc] assign[=] call[name[circumcenter], parameter[<ast.Starred object at 0x7da1b1d37220>]] variable[r] assign[=] call[name[circumcircle_radius], parameter[<ast.Starred object at 0x7da1b1d35690>]] call[name[triangle_info]][name[i]] assign[=] dictionary[[<ast.Constant object at 0x7da1b1d35870>, <ast.Constant object at 0x7da1b1d37b20>], [<ast.Name object at 0x7da1b1d36bf0>, <ast.Name object at 0x7da1b1d377c0>]] variable[qualifiers] assign[=] call[name[tree].query_ball_point, parameter[name[cc], name[r]]] for taget[name[qualifier]] in starred[name[qualifiers]] begin[:] if call[name[in_triangulation]][name[qualifier]] begin[:] call[call[name[members]][name[qualifier]].append, parameter[name[i]]] return[tuple[[<ast.Name object at 0x7da1b1d36680>, <ast.Name object at 0x7da1b1d365f0>]]]
keyword[def] identifier[find_natural_neighbors] ( identifier[tri] , identifier[grid_points] ): literal[string] identifier[tree] = identifier[cKDTree] ( identifier[grid_points] ) identifier[in_triangulation] = identifier[tri] . identifier[find_simplex] ( identifier[tree] . identifier[data] )>= literal[int] identifier[triangle_info] ={} identifier[members] ={ identifier[key] :[] keyword[for] identifier[key] keyword[in] identifier[range] ( identifier[len] ( identifier[tree] . identifier[data] ))} keyword[for] identifier[i] , identifier[simplices] keyword[in] identifier[enumerate] ( identifier[tri] . identifier[simplices] ): identifier[ps] = identifier[tri] . identifier[points] [ identifier[simplices] ] identifier[cc] = identifier[circumcenter] (* identifier[ps] ) identifier[r] = identifier[circumcircle_radius] (* identifier[ps] ) identifier[triangle_info] [ identifier[i] ]={ literal[string] : identifier[cc] , literal[string] : identifier[r] } identifier[qualifiers] = identifier[tree] . identifier[query_ball_point] ( identifier[cc] , identifier[r] ) keyword[for] identifier[qualifier] keyword[in] identifier[qualifiers] : keyword[if] identifier[in_triangulation] [ identifier[qualifier] ]: identifier[members] [ identifier[qualifier] ]. identifier[append] ( identifier[i] ) keyword[return] identifier[members] , identifier[triangle_info]
def find_natural_neighbors(tri, grid_points): """Return the natural neighbor triangles for each given grid cell. These are determined by the properties of the given delaunay triangulation. A triangle is a natural neighbor of a grid cell if that triangles circumcenter is within the circumradius of the grid cell center. Parameters ---------- tri: Object A Delaunay Triangulation. grid_points: (X, Y) ndarray Locations of grids. Returns ------- members: dictionary List of simplex codes for natural neighbor triangles in 'tri' for each grid cell. triangle_info: dictionary Circumcenter and radius information for each triangle in 'tri'. """ tree = cKDTree(grid_points) in_triangulation = tri.find_simplex(tree.data) >= 0 triangle_info = {} members = {key: [] for key in range(len(tree.data))} for (i, simplices) in enumerate(tri.simplices): ps = tri.points[simplices] cc = circumcenter(*ps) r = circumcircle_radius(*ps) triangle_info[i] = {'cc': cc, 'r': r} qualifiers = tree.query_ball_point(cc, r) for qualifier in qualifiers: if in_triangulation[qualifier]: members[qualifier].append(i) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['qualifier']] # depends on [control=['for'], data=[]] return (members, triangle_info)
def create_user(self, username, password): """ Creates a user in the CouchDB instance with the username `username` and password `password` """ user_id = "org.couchdb.user:" + username res = self["_users"].resource.put( user_id, body=json.dumps({ "_id": user_id, "name": username, "roles": [], "type": "user", "password": password, "farms": [] }) ) if res[0] == 409: raise RuntimeError( 'The username "{}" is already taken'.format(username) ) elif res[0] != 201: raise RuntimeError( "Failed to create user ({}): {}".format( res.status_code, res.content ) )
def function[create_user, parameter[self, username, password]]: constant[ Creates a user in the CouchDB instance with the username `username` and password `password` ] variable[user_id] assign[=] binary_operation[constant[org.couchdb.user:] + name[username]] variable[res] assign[=] call[call[name[self]][constant[_users]].resource.put, parameter[name[user_id]]] if compare[call[name[res]][constant[0]] equal[==] constant[409]] begin[:] <ast.Raise object at 0x7da207f01c90>
keyword[def] identifier[create_user] ( identifier[self] , identifier[username] , identifier[password] ): literal[string] identifier[user_id] = literal[string] + identifier[username] identifier[res] = identifier[self] [ literal[string] ]. identifier[resource] . identifier[put] ( identifier[user_id] , identifier[body] = identifier[json] . identifier[dumps] ({ literal[string] : identifier[user_id] , literal[string] : identifier[username] , literal[string] :[], literal[string] : literal[string] , literal[string] : identifier[password] , literal[string] :[] }) ) keyword[if] identifier[res] [ literal[int] ]== literal[int] : keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[username] ) ) keyword[elif] identifier[res] [ literal[int] ]!= literal[int] : keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[res] . identifier[status_code] , identifier[res] . identifier[content] ) )
def create_user(self, username, password): """ Creates a user in the CouchDB instance with the username `username` and password `password` """ user_id = 'org.couchdb.user:' + username res = self['_users'].resource.put(user_id, body=json.dumps({'_id': user_id, 'name': username, 'roles': [], 'type': 'user', 'password': password, 'farms': []})) if res[0] == 409: raise RuntimeError('The username "{}" is already taken'.format(username)) # depends on [control=['if'], data=[]] elif res[0] != 201: raise RuntimeError('Failed to create user ({}): {}'.format(res.status_code, res.content)) # depends on [control=['if'], data=[]]
def uid(self, p_todo): """ Returns the unique text-based ID for a todo item. """ try: return self._todo_id_map[p_todo] except KeyError as ex: raise InvalidTodoException from ex
def function[uid, parameter[self, p_todo]]: constant[ Returns the unique text-based ID for a todo item. ] <ast.Try object at 0x7da20c6e4a90>
keyword[def] identifier[uid] ( identifier[self] , identifier[p_todo] ): literal[string] keyword[try] : keyword[return] identifier[self] . identifier[_todo_id_map] [ identifier[p_todo] ] keyword[except] identifier[KeyError] keyword[as] identifier[ex] : keyword[raise] identifier[InvalidTodoException] keyword[from] identifier[ex]
def uid(self, p_todo): """ Returns the unique text-based ID for a todo item. """ try: return self._todo_id_map[p_todo] # depends on [control=['try'], data=[]] except KeyError as ex: raise InvalidTodoException from ex # depends on [control=['except'], data=['ex']]
def _bg_combine(self, bgs): """Combine several background amplitude images""" out = np.ones(self.h5["raw"].shape, dtype=float) # bg is an h5py.DataSet for bg in bgs: out *= bg[:] return out
def function[_bg_combine, parameter[self, bgs]]: constant[Combine several background amplitude images] variable[out] assign[=] call[name[np].ones, parameter[call[name[self].h5][constant[raw]].shape]] for taget[name[bg]] in starred[name[bgs]] begin[:] <ast.AugAssign object at 0x7da1b10457e0> return[name[out]]
keyword[def] identifier[_bg_combine] ( identifier[self] , identifier[bgs] ): literal[string] identifier[out] = identifier[np] . identifier[ones] ( identifier[self] . identifier[h5] [ literal[string] ]. identifier[shape] , identifier[dtype] = identifier[float] ) keyword[for] identifier[bg] keyword[in] identifier[bgs] : identifier[out] *= identifier[bg] [:] keyword[return] identifier[out]
def _bg_combine(self, bgs): """Combine several background amplitude images""" out = np.ones(self.h5['raw'].shape, dtype=float) # bg is an h5py.DataSet for bg in bgs: out *= bg[:] # depends on [control=['for'], data=['bg']] return out
def filter(self, model=None, context=None): """ Perform filtering on the model. Will change model in place. :param model: object or dict :param context: object, dict or None :return: None """ if model is None: return # properties self.filter_properties(model, context=context) # entities self.filter_entities(model, context=context) # collections self.filter_collections(model, context=context)
def function[filter, parameter[self, model, context]]: constant[ Perform filtering on the model. Will change model in place. :param model: object or dict :param context: object, dict or None :return: None ] if compare[name[model] is constant[None]] begin[:] return[None] call[name[self].filter_properties, parameter[name[model]]] call[name[self].filter_entities, parameter[name[model]]] call[name[self].filter_collections, parameter[name[model]]]
keyword[def] identifier[filter] ( identifier[self] , identifier[model] = keyword[None] , identifier[context] = keyword[None] ): literal[string] keyword[if] identifier[model] keyword[is] keyword[None] : keyword[return] identifier[self] . identifier[filter_properties] ( identifier[model] , identifier[context] = identifier[context] ) identifier[self] . identifier[filter_entities] ( identifier[model] , identifier[context] = identifier[context] ) identifier[self] . identifier[filter_collections] ( identifier[model] , identifier[context] = identifier[context] )
def filter(self, model=None, context=None): """ Perform filtering on the model. Will change model in place. :param model: object or dict :param context: object, dict or None :return: None """ if model is None: return # depends on [control=['if'], data=[]] # properties self.filter_properties(model, context=context) # entities self.filter_entities(model, context=context) # collections self.filter_collections(model, context=context)
def cleanup(self): """Clean up, making sure the process is stopped before we pack up and go home.""" if self.process is None: # Process wasn't running yet, so nothing to worry about return if self.process.poll() is None: log.info("Sending TERM to %d", self.process.pid) self.process.terminate() # Give process a second to terminate, if it didn't, kill it. start = time.clock() while time.clock() - start < 1.0: time.sleep(0.05) if self.process.poll() is not None: break else: log.info("Sending KILL to %d", self.process.pid) self.process.kill() assert self.process.poll() is not None
def function[cleanup, parameter[self]]: constant[Clean up, making sure the process is stopped before we pack up and go home.] if compare[name[self].process is constant[None]] begin[:] return[None] if compare[call[name[self].process.poll, parameter[]] is constant[None]] begin[:] call[name[log].info, parameter[constant[Sending TERM to %d], name[self].process.pid]] call[name[self].process.terminate, parameter[]] variable[start] assign[=] call[name[time].clock, parameter[]] while compare[binary_operation[call[name[time].clock, parameter[]] - name[start]] less[<] constant[1.0]] begin[:] call[name[time].sleep, parameter[constant[0.05]]] if compare[call[name[self].process.poll, parameter[]] is_not constant[None]] begin[:] break assert[compare[call[name[self].process.poll, parameter[]] is_not constant[None]]]
keyword[def] identifier[cleanup] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[process] keyword[is] keyword[None] : keyword[return] keyword[if] identifier[self] . identifier[process] . identifier[poll] () keyword[is] keyword[None] : identifier[log] . identifier[info] ( literal[string] , identifier[self] . identifier[process] . identifier[pid] ) identifier[self] . identifier[process] . identifier[terminate] () identifier[start] = identifier[time] . identifier[clock] () keyword[while] identifier[time] . identifier[clock] ()- identifier[start] < literal[int] : identifier[time] . identifier[sleep] ( literal[int] ) keyword[if] identifier[self] . identifier[process] . identifier[poll] () keyword[is] keyword[not] keyword[None] : keyword[break] keyword[else] : identifier[log] . identifier[info] ( literal[string] , identifier[self] . identifier[process] . identifier[pid] ) identifier[self] . identifier[process] . identifier[kill] () keyword[assert] identifier[self] . identifier[process] . identifier[poll] () keyword[is] keyword[not] keyword[None]
def cleanup(self): """Clean up, making sure the process is stopped before we pack up and go home.""" if self.process is None: # Process wasn't running yet, so nothing to worry about return # depends on [control=['if'], data=[]] if self.process.poll() is None: log.info('Sending TERM to %d', self.process.pid) self.process.terminate() # Give process a second to terminate, if it didn't, kill it. start = time.clock() while time.clock() - start < 1.0: time.sleep(0.05) if self.process.poll() is not None: break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] else: log.info('Sending KILL to %d', self.process.pid) self.process.kill() # depends on [control=['if'], data=[]] assert self.process.poll() is not None
def fastaAlignmentRead(fasta, mapFn=(lambda x : x), l=None): """ reads in columns of multiple alignment and returns them iteratively """ if l is None: l = _getMultiFastaOffsets(fasta) else: l = l[:] seqNo = len(l) for i in xrange(0, seqNo): j = open(fasta, 'r') j.seek(l[i]) l[i] = j column = [sys.maxint]*seqNo if seqNo != 0: while True: for j in xrange(0, seqNo): i = l[j].read(1) while i == '\n': i = l[j].read(1) column[j] = i if column[0] == '>' or column[0] == '': for j in xrange(1, seqNo): assert column[j] == '>' or column[j] == '' break for j in xrange(1, seqNo): assert column[j] != '>' and column[j] != '' column[j] = mapFn(column[j]) yield column[:] for i in l: i.close()
def function[fastaAlignmentRead, parameter[fasta, mapFn, l]]: constant[ reads in columns of multiple alignment and returns them iteratively ] if compare[name[l] is constant[None]] begin[:] variable[l] assign[=] call[name[_getMultiFastaOffsets], parameter[name[fasta]]] variable[seqNo] assign[=] call[name[len], parameter[name[l]]] for taget[name[i]] in starred[call[name[xrange], parameter[constant[0], name[seqNo]]]] begin[:] variable[j] assign[=] call[name[open], parameter[name[fasta], constant[r]]] call[name[j].seek, parameter[call[name[l]][name[i]]]] call[name[l]][name[i]] assign[=] name[j] variable[column] assign[=] binary_operation[list[[<ast.Attribute object at 0x7da18fe90a00>]] * name[seqNo]] if compare[name[seqNo] not_equal[!=] constant[0]] begin[:] while constant[True] begin[:] for taget[name[j]] in starred[call[name[xrange], parameter[constant[0], name[seqNo]]]] begin[:] variable[i] assign[=] call[call[name[l]][name[j]].read, parameter[constant[1]]] while compare[name[i] equal[==] constant[ ]] begin[:] variable[i] assign[=] call[call[name[l]][name[j]].read, parameter[constant[1]]] call[name[column]][name[j]] assign[=] name[i] if <ast.BoolOp object at 0x7da18fe91540> begin[:] for taget[name[j]] in starred[call[name[xrange], parameter[constant[1], name[seqNo]]]] begin[:] assert[<ast.BoolOp object at 0x7da18fe90610>] break for taget[name[j]] in starred[call[name[xrange], parameter[constant[1], name[seqNo]]]] begin[:] assert[<ast.BoolOp object at 0x7da18fe930d0>] call[name[column]][name[j]] assign[=] call[name[mapFn], parameter[call[name[column]][name[j]]]] <ast.Yield object at 0x7da18fe921d0> for taget[name[i]] in starred[name[l]] begin[:] call[name[i].close, parameter[]]
keyword[def] identifier[fastaAlignmentRead] ( identifier[fasta] , identifier[mapFn] =( keyword[lambda] identifier[x] : identifier[x] ), identifier[l] = keyword[None] ): literal[string] keyword[if] identifier[l] keyword[is] keyword[None] : identifier[l] = identifier[_getMultiFastaOffsets] ( identifier[fasta] ) keyword[else] : identifier[l] = identifier[l] [:] identifier[seqNo] = identifier[len] ( identifier[l] ) keyword[for] identifier[i] keyword[in] identifier[xrange] ( literal[int] , identifier[seqNo] ): identifier[j] = identifier[open] ( identifier[fasta] , literal[string] ) identifier[j] . identifier[seek] ( identifier[l] [ identifier[i] ]) identifier[l] [ identifier[i] ]= identifier[j] identifier[column] =[ identifier[sys] . identifier[maxint] ]* identifier[seqNo] keyword[if] identifier[seqNo] != literal[int] : keyword[while] keyword[True] : keyword[for] identifier[j] keyword[in] identifier[xrange] ( literal[int] , identifier[seqNo] ): identifier[i] = identifier[l] [ identifier[j] ]. identifier[read] ( literal[int] ) keyword[while] identifier[i] == literal[string] : identifier[i] = identifier[l] [ identifier[j] ]. identifier[read] ( literal[int] ) identifier[column] [ identifier[j] ]= identifier[i] keyword[if] identifier[column] [ literal[int] ]== literal[string] keyword[or] identifier[column] [ literal[int] ]== literal[string] : keyword[for] identifier[j] keyword[in] identifier[xrange] ( literal[int] , identifier[seqNo] ): keyword[assert] identifier[column] [ identifier[j] ]== literal[string] keyword[or] identifier[column] [ identifier[j] ]== literal[string] keyword[break] keyword[for] identifier[j] keyword[in] identifier[xrange] ( literal[int] , identifier[seqNo] ): keyword[assert] identifier[column] [ identifier[j] ]!= literal[string] keyword[and] identifier[column] [ identifier[j] ]!= literal[string] identifier[column] [ identifier[j] ]= identifier[mapFn] ( identifier[column] [ identifier[j] ]) keyword[yield] identifier[column] [:] keyword[for] identifier[i] keyword[in] identifier[l] : identifier[i] . identifier[close] ()
def fastaAlignmentRead(fasta, mapFn=lambda x: x, l=None): """ reads in columns of multiple alignment and returns them iteratively """ if l is None: l = _getMultiFastaOffsets(fasta) # depends on [control=['if'], data=['l']] else: l = l[:] seqNo = len(l) for i in xrange(0, seqNo): j = open(fasta, 'r') j.seek(l[i]) l[i] = j # depends on [control=['for'], data=['i']] column = [sys.maxint] * seqNo if seqNo != 0: while True: for j in xrange(0, seqNo): i = l[j].read(1) while i == '\n': i = l[j].read(1) # depends on [control=['while'], data=['i']] column[j] = i # depends on [control=['for'], data=['j']] if column[0] == '>' or column[0] == '': for j in xrange(1, seqNo): assert column[j] == '>' or column[j] == '' # depends on [control=['for'], data=['j']] break # depends on [control=['if'], data=[]] for j in xrange(1, seqNo): assert column[j] != '>' and column[j] != '' column[j] = mapFn(column[j]) # depends on [control=['for'], data=['j']] yield column[:] # depends on [control=['while'], data=[]] # depends on [control=['if'], data=['seqNo']] for i in l: i.close() # depends on [control=['for'], data=['i']]
def get_facets(self): ''' Returns a dictionary of facets:: >>> res = solr.query('SolrClient_unittest',{ 'q':'product_name:Lorem', 'facet':True, 'facet.field':'facet_test', })... ... ... ... >>> res.get_results_count() 4 >>> res.get_facets() {'facet_test': {'ipsum': 0, 'sit': 0, 'dolor': 2, 'amet,': 1, 'Lorem': 1}} ''' if not hasattr(self,'facets'): self.facets = {} data = self.data if 'facet_counts' in data.keys() and type(data['facet_counts']) == dict: if 'facet_fields' in data['facet_counts'].keys() and type(data['facet_counts']['facet_fields']) == dict: for facetfield in data['facet_counts']['facet_fields']: if type(data['facet_counts']['facet_fields'][facetfield] == list): l = data['facet_counts']['facet_fields'][facetfield] self.facets[facetfield] = OrderedDict(zip(l[::2],l[1::2])) return self.facets else: raise SolrResponseError("No Facet Information in the Response") else: return self.facets
def function[get_facets, parameter[self]]: constant[ Returns a dictionary of facets:: >>> res = solr.query('SolrClient_unittest',{ 'q':'product_name:Lorem', 'facet':True, 'facet.field':'facet_test', })... ... ... ... >>> res.get_results_count() 4 >>> res.get_facets() {'facet_test': {'ipsum': 0, 'sit': 0, 'dolor': 2, 'amet,': 1, 'Lorem': 1}} ] if <ast.UnaryOp object at 0x7da18dc9b0d0> begin[:] name[self].facets assign[=] dictionary[[], []] variable[data] assign[=] name[self].data if <ast.BoolOp object at 0x7da18dc998d0> begin[:] if <ast.BoolOp object at 0x7da18dc9b160> begin[:] for taget[name[facetfield]] in starred[call[call[name[data]][constant[facet_counts]]][constant[facet_fields]]] begin[:] if call[name[type], parameter[compare[call[call[call[name[data]][constant[facet_counts]]][constant[facet_fields]]][name[facetfield]] equal[==] name[list]]]] begin[:] variable[l] assign[=] call[call[call[name[data]][constant[facet_counts]]][constant[facet_fields]]][name[facetfield]] call[name[self].facets][name[facetfield]] assign[=] call[name[OrderedDict], parameter[call[name[zip], parameter[call[name[l]][<ast.Slice object at 0x7da1b0f520e0>], call[name[l]][<ast.Slice object at 0x7da1b0f511e0>]]]]] return[name[self].facets]
keyword[def] identifier[get_facets] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[self] . identifier[facets] ={} identifier[data] = identifier[self] . identifier[data] keyword[if] literal[string] keyword[in] identifier[data] . identifier[keys] () keyword[and] identifier[type] ( identifier[data] [ literal[string] ])== identifier[dict] : keyword[if] literal[string] keyword[in] identifier[data] [ literal[string] ]. identifier[keys] () keyword[and] identifier[type] ( identifier[data] [ literal[string] ][ literal[string] ])== identifier[dict] : keyword[for] identifier[facetfield] keyword[in] identifier[data] [ literal[string] ][ literal[string] ]: keyword[if] identifier[type] ( identifier[data] [ literal[string] ][ literal[string] ][ identifier[facetfield] ]== identifier[list] ): identifier[l] = identifier[data] [ literal[string] ][ literal[string] ][ identifier[facetfield] ] identifier[self] . identifier[facets] [ identifier[facetfield] ]= identifier[OrderedDict] ( identifier[zip] ( identifier[l] [:: literal[int] ], identifier[l] [ literal[int] :: literal[int] ])) keyword[return] identifier[self] . identifier[facets] keyword[else] : keyword[raise] identifier[SolrResponseError] ( literal[string] ) keyword[else] : keyword[return] identifier[self] . identifier[facets]
def get_facets(self): """ Returns a dictionary of facets:: >>> res = solr.query('SolrClient_unittest',{ 'q':'product_name:Lorem', 'facet':True, 'facet.field':'facet_test', })... ... ... ... >>> res.get_results_count() 4 >>> res.get_facets() {'facet_test': {'ipsum': 0, 'sit': 0, 'dolor': 2, 'amet,': 1, 'Lorem': 1}} """ if not hasattr(self, 'facets'): self.facets = {} data = self.data if 'facet_counts' in data.keys() and type(data['facet_counts']) == dict: if 'facet_fields' in data['facet_counts'].keys() and type(data['facet_counts']['facet_fields']) == dict: for facetfield in data['facet_counts']['facet_fields']: if type(data['facet_counts']['facet_fields'][facetfield] == list): l = data['facet_counts']['facet_fields'][facetfield] self.facets[facetfield] = OrderedDict(zip(l[::2], l[1::2])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['facetfield']] # depends on [control=['if'], data=[]] return self.facets # depends on [control=['if'], data=[]] else: raise SolrResponseError('No Facet Information in the Response') # depends on [control=['if'], data=[]] else: return self.facets
def terminate_bits(self, payload): """This method adds zeros to the end of the encoded data so that the encoded data is of the correct length. It returns a binary string containing the bits to be added. """ data_capacity = tables.data_capacity[self.version][self.error][0] if len(payload) > data_capacity: raise ValueError('The supplied data will not fit ' 'within this version of a QR code.') #We must add up to 4 zeros to make up for any shortfall in the #length of the data field. if len(payload) == data_capacity: return None elif len(payload) <= data_capacity-4: bits = self.binary_string(0,4) else: #Make up any shortfall need with less than 4 zeros bits = self.binary_string(0, data_capacity - len(payload)) return bits
def function[terminate_bits, parameter[self, payload]]: constant[This method adds zeros to the end of the encoded data so that the encoded data is of the correct length. It returns a binary string containing the bits to be added. ] variable[data_capacity] assign[=] call[call[call[name[tables].data_capacity][name[self].version]][name[self].error]][constant[0]] if compare[call[name[len], parameter[name[payload]]] greater[>] name[data_capacity]] begin[:] <ast.Raise object at 0x7da1b19cff10> if compare[call[name[len], parameter[name[payload]]] equal[==] name[data_capacity]] begin[:] return[constant[None]] return[name[bits]]
keyword[def] identifier[terminate_bits] ( identifier[self] , identifier[payload] ): literal[string] identifier[data_capacity] = identifier[tables] . identifier[data_capacity] [ identifier[self] . identifier[version] ][ identifier[self] . identifier[error] ][ literal[int] ] keyword[if] identifier[len] ( identifier[payload] )> identifier[data_capacity] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) keyword[if] identifier[len] ( identifier[payload] )== identifier[data_capacity] : keyword[return] keyword[None] keyword[elif] identifier[len] ( identifier[payload] )<= identifier[data_capacity] - literal[int] : identifier[bits] = identifier[self] . identifier[binary_string] ( literal[int] , literal[int] ) keyword[else] : identifier[bits] = identifier[self] . identifier[binary_string] ( literal[int] , identifier[data_capacity] - identifier[len] ( identifier[payload] )) keyword[return] identifier[bits]
def terminate_bits(self, payload): """This method adds zeros to the end of the encoded data so that the encoded data is of the correct length. It returns a binary string containing the bits to be added. """ data_capacity = tables.data_capacity[self.version][self.error][0] if len(payload) > data_capacity: raise ValueError('The supplied data will not fit within this version of a QR code.') # depends on [control=['if'], data=[]] #We must add up to 4 zeros to make up for any shortfall in the #length of the data field. if len(payload) == data_capacity: return None # depends on [control=['if'], data=[]] elif len(payload) <= data_capacity - 4: bits = self.binary_string(0, 4) # depends on [control=['if'], data=[]] else: #Make up any shortfall need with less than 4 zeros bits = self.binary_string(0, data_capacity - len(payload)) return bits